hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
1f39a625c05f47105e3bb42f8b31a1b742cbb038.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Compile this file with clang to see how CUDA // is translated into NVVM IR. __device__ int cube(int x) { int y; asm(".reg .u32 t1;\n\t" // temp reg t1 " mul.lo.u32 t1, %1, %1;\n\t" // t1 = x * x " mul.lo.u32 %0, t1, %1;" // y = t1 * x : "=r"(y) : "r"(x)); return y + clock64(); } __global__ void __launch_bounds__(1024, 2) test_ldg(float *a, float *b) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; a[i] = cube(1); auto c = __ldg((float4 *)a); a[i] = __ldg(&b[i]) + __ldg((double *)&b[i]) + __ldg((char *)&b[i]); } int main() { float *a, *b; hipLaunchKernelGGL(( test_ldg), dim3(1), dim3(1), 0, 0, a, b); }
1f39a625c05f47105e3bb42f8b31a1b742cbb038.cu
// Compile this file with clang to see how CUDA // is translated into NVVM IR. __device__ int cube(int x) { int y; asm(".reg .u32 t1;\n\t" // temp reg t1 " mul.lo.u32 t1, %1, %1;\n\t" // t1 = x * x " mul.lo.u32 %0, t1, %1;" // y = t1 * x : "=r"(y) : "r"(x)); return y + clock64(); } __global__ void __launch_bounds__(1024, 2) test_ldg(float *a, float *b) { unsigned int i = blockIdx.x * blockDim.x + threadIdx.x; a[i] = cube(1); auto c = __ldg((float4 *)a); a[i] = __ldg(&b[i]) + __ldg((double *)&b[i]) + __ldg((char *)&b[i]); } int main() { float *a, *b; test_ldg<<<1, 1>>>(a, b); }
68da72c9a30fbbebdf40898c4f006c759f8a5def.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <iostream> #include <math.h> #include <ctime> #include <cmath> #include <stdlib.h> #include <fstream> #include <sstream> #include "Payoff.h" #include "enum_header.h" __host__ __device__ double* three_dim_index(double* matrix, int i, int j, int k, double m, int b, int num_assets); __host__ __device__ double* two_dim_index(double* vector, int i, int j, double m, int b); __device__ double GeometricPayOffCallM(double* X, int i, int j, double m, int b, int num_assets, double Strike); __device__ double GeometricPayOffPutM(double* X, int i, int j, double m, int b, int num_assets, double Strike); __device__ double inner_control_meshME(int i, int j, int b, double r, double delta_t, double m, double* W_device, double* X_device, double* V_device, int num_assets ); //this function returns the high bias mesh price __global__ void MeshEstimatorKernel(double strike, double r, double delta_t, int b, double m, double* X_device, double* W_device, double* V_device, double* asset_amount_device, int num_assets, int ker){ double H; //payoff variable double C; //continuation value variable int idx =blockDim.x*blockIdx.x + threadIdx.x; if(idx<b){ if(ker==0){ H=GeometricPayOffCallM( X_device, m-1-ker, idx, m, b, num_assets, strike)*exp(-r*delta_t*(m-ker)); *two_dim_index(V_device, ker, idx, m, b)=H; } else{ //the inner control function calculate the continuation value using a control variate C=inner_control_meshME(ker, idx, b, r, delta_t, m, W_device, X_device, V_device, num_assets); H=GeometricPayOffCallM( X_device, m-1-ker, idx, m, b, num_assets, strike)*exp(-r*delta_t*(m-ker)); if(H>=C){ *two_dim_index(V_device, ker, idx, m, b)=H; } else{ *two_dim_index(V_device, ker, idx, m, b)=C; } } } //end of if statement } //this function allocates the gpu memory copies data to the gpu double MeshEstimator(double strike, double r, double delta_t, int b, double m, double* X, double* W, double* V, double asset_amount[], int num_assets ){ double V_0; int m_int=(int)m; double* asset_amount_host; asset_amount_host =asset_amount; int X_N=(m_int) * b * (num_assets); int asset_amount_N=num_assets; int W_N=(m_int) * b*b; int V_N=(m_int) * b; double* X_device; double* V_device; double* asset_amount_device; double* W_device; hipMalloc((void**) &X_device, X_N*sizeof(double) ); hipMemcpy(X_device, X, X_N*sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &asset_amount_device, asset_amount_N*sizeof(double) ); hipMemcpy(asset_amount_device, asset_amount_host, asset_amount_N*sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &V_device, V_N*sizeof(double) ); hipMemcpy(V_device, V, V_N*sizeof(double), hipMemcpyHostToDevice); hipMalloc((void**) &W_device, W_N*sizeof(double) ); hipMemcpy(W_device, W, W_N*sizeof(double), hipMemcpyHostToDevice); //set the number of threads dim3 gridDim((int)ceil(b/512.0)); dim3 blockDim(512.0); hipError_t error = hipGetLastError(); if( error != hipSuccess ) { std::cout << hipGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } //we loop of the high bias kernel for each time step for(int ker=0; ker<m; ker++){hipLaunchKernelGGL(( MeshEstimatorKernel), dim3(gridDim), dim3(blockDim), 0, 0, strike, r, delta_t, b, m, X_device, W_device, V_device, asset_amount_device, num_assets, ker); hipDeviceSynchronize(); error = hipGetLastError(); if( error != hipSuccess ) { std::cout << hipGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } hipMemcpy(V, V_device, sizeof(double)*V_N, hipMemcpyDeviceToHost); if(ker<m-1){ hipMemcpy(V_device, V, V_N*sizeof(double), hipMemcpyHostToDevice); } } hipFree(X_device); hipFree(asset_amount_device); hipFree(V_device); hipFree(W_device); double sum=0; for(int k=0; k<b; k++){ sum+=*two_dim_index(V, (m_int-1), k, m, b); } //this is the high bias option value at time 0 V_0=(1/((double)b))*sum; return V_0; }
68da72c9a30fbbebdf40898c4f006c759f8a5def.cu
#include <cuda.h> #include <iostream> #include <math.h> #include <ctime> #include <cmath> #include <stdlib.h> #include <fstream> #include <sstream> #include "Payoff.h" #include "enum_header.h" __host__ __device__ double* three_dim_index(double* matrix, int i, int j, int k, double m, int b, int num_assets); __host__ __device__ double* two_dim_index(double* vector, int i, int j, double m, int b); __device__ double GeometricPayOffCallM(double* X, int i, int j, double m, int b, int num_assets, double Strike); __device__ double GeometricPayOffPutM(double* X, int i, int j, double m, int b, int num_assets, double Strike); __device__ double inner_control_meshME(int i, int j, int b, double r, double delta_t, double m, double* W_device, double* X_device, double* V_device, int num_assets ); //this function returns the high bias mesh price __global__ void MeshEstimatorKernel(double strike, double r, double delta_t, int b, double m, double* X_device, double* W_device, double* V_device, double* asset_amount_device, int num_assets, int ker){ double H; //payoff variable double C; //continuation value variable int idx =blockDim.x*blockIdx.x + threadIdx.x; if(idx<b){ if(ker==0){ H=GeometricPayOffCallM( X_device, m-1-ker, idx, m, b, num_assets, strike)*exp(-r*delta_t*(m-ker)); *two_dim_index(V_device, ker, idx, m, b)=H; } else{ //the inner control function calculate the continuation value using a control variate C=inner_control_meshME(ker, idx, b, r, delta_t, m, W_device, X_device, V_device, num_assets); H=GeometricPayOffCallM( X_device, m-1-ker, idx, m, b, num_assets, strike)*exp(-r*delta_t*(m-ker)); if(H>=C){ *two_dim_index(V_device, ker, idx, m, b)=H; } else{ *two_dim_index(V_device, ker, idx, m, b)=C; } } } //end of if statement } //this function allocates the gpu memory copies data to the gpu double MeshEstimator(double strike, double r, double delta_t, int b, double m, double* X, double* W, double* V, double asset_amount[], int num_assets ){ double V_0; int m_int=(int)m; double* asset_amount_host; asset_amount_host =asset_amount; int X_N=(m_int) * b * (num_assets); int asset_amount_N=num_assets; int W_N=(m_int) * b*b; int V_N=(m_int) * b; double* X_device; double* V_device; double* asset_amount_device; double* W_device; cudaMalloc((void**) &X_device, X_N*sizeof(double) ); cudaMemcpy(X_device, X, X_N*sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &asset_amount_device, asset_amount_N*sizeof(double) ); cudaMemcpy(asset_amount_device, asset_amount_host, asset_amount_N*sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &V_device, V_N*sizeof(double) ); cudaMemcpy(V_device, V, V_N*sizeof(double), cudaMemcpyHostToDevice); cudaMalloc((void**) &W_device, W_N*sizeof(double) ); cudaMemcpy(W_device, W, W_N*sizeof(double), cudaMemcpyHostToDevice); //set the number of threads dim3 gridDim((int)ceil(b/512.0)); dim3 blockDim(512.0); cudaError_t error = cudaGetLastError(); if( error != cudaSuccess ) { std::cout << cudaGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } //we loop of the high bias kernel for each time step for(int ker=0; ker<m; ker++){ MeshEstimatorKernel<<<gridDim, blockDim>>>(strike, r, delta_t, b, m, X_device, W_device, V_device, asset_amount_device, num_assets, ker); cudaDeviceSynchronize(); error = cudaGetLastError(); if( error != cudaSuccess ) { std::cout << cudaGetErrorString(error) << std::endl; printf("found at line %d\n", __LINE__); exit(1); } cudaMemcpy(V, V_device, sizeof(double)*V_N, cudaMemcpyDeviceToHost); if(ker<m-1){ cudaMemcpy(V_device, V, V_N*sizeof(double), cudaMemcpyHostToDevice); } } cudaFree(X_device); cudaFree(asset_amount_device); cudaFree(V_device); cudaFree(W_device); double sum=0; for(int k=0; k<b; k++){ sum+=*two_dim_index(V, (m_int-1), k, m, b); } //this is the high bias option value at time 0 V_0=(1/((double)b))*sum; return V_0; }
e5e34e3109293c4784bf0de56314c72f17fbba44.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * C code for creating the Q data structure for fast convolution-based * Hessian multiplication for arbitrary k-space trajectories. * * Inputs: * kx - VECTOR of kx values, same length as ky and kz * ky - VECTOR of ky values, same length as kx and kz * kz - VECTOR of kz values, same length as kx and ky * x - VECTOR of x values, same length as y and z * y - VECTOR of y values, same length as x and z * z - VECTOR of z values, same length as x and y * phi - VECTOR of the Fourier transform of the spatial basis * function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz. * * recommended g++ options: * -O3 -lm -ffast-math -funroll-all-loops */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <sys/time.h> #include <malloc.h> #include "parboil.h" #include "file.h" #include "computeQ.hip" static void setupMemoryGPU(int num, int size, float*& dev_ptr, float*& host_ptr) { hipMalloc ((void **) &dev_ptr, num * size); CUDA_ERRCK; hipMemcpy (dev_ptr, host_ptr, num * size, hipMemcpyHostToDevice); CUDA_ERRCK; } static void cleanupMemoryGPU(int num, int size, float *& dev_ptr, float * host_ptr) { hipMemcpy (host_ptr, dev_ptr, num * size, hipMemcpyDeviceToHost); CUDA_ERRCK; hipFree(dev_ptr); CUDA_ERRCK; } int main (int argc, char *argv[]) { int numX, numK; /* Number of X and K values */ int original_numK; /* Number of K values in input file */ float *kx, *ky, *kz; /* K trajectory (3D vectors) */ float *x, *y, *z; /* X coordinates (3D vectors) */ float *phiR, *phiI; /* Phi values (complex) */ float *phiMag; /* Magnitude of Phi */ float *Qr, *Qi; /* Q signal (complex) */ struct kValues* kVals; struct pb_Parameters *params; struct pb_TimerSet timers; pb_InitializeTimerSet(&timers); /* Read command line */ params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } /* Read in data */ pb_SwitchToTimer(&timers, pb_TimerID_IO); inputData(params->inpFiles[0], &original_numK, &numX, &kx, &ky, &kz, &x, &y, &z, &phiR, &phiI); /* Reduce the number of k-space samples if a number is given * on the command line */ if (argc < 2) numK = original_numK; else { int inputK; char *end; inputK = strtol(argv[1], &end, 10); if (end == argv[1]) { fprintf(stderr, "Expecting an integer parameter\n"); exit(-1); } numK = MIN(inputK, original_numK); } printf("%d pixels in output; %d samples in trajectory; using %d samples\n", numX, original_numK, numK); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); /* Create CPU data structures */ createDataStructsCPU(numK, numX, &phiMag, &Qr, &Qi); /* GPU section 1 (precompute PhiMag) */ { /* Mirror several data structures on the device */ float *phiR_d, *phiI_d; float *phiMag_d; pb_SwitchToTimer(&timers, pb_TimerID_COPY); setupMemoryGPU(numK, sizeof(float), phiR_d, phiR); setupMemoryGPU(numK, sizeof(float), phiI_d, phiI); hipMalloc((void **)&phiMag_d, numK * sizeof(float)); CUDA_ERRCK; hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); computePhiMag_GPU(numK, phiR_d, phiI_d, phiMag_d); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); cleanupMemoryGPU(numK, sizeof(float), phiMag_d, phiMag); hipFree(phiR_d); hipFree(phiI_d); } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); kVals = (struct kValues*)calloc(numK, sizeof (struct kValues)); for (int k = 0; k < numK; k++) { kVals[k].Kx = kx[k]; kVals[k].Ky = ky[k]; kVals[k].Kz = kz[k]; kVals[k].PhiMag = phiMag[k]; } free(phiMag); /* GPU section 2 */ { float *x_d, *y_d, *z_d; float *Qr_d, *Qi_d; pb_SwitchToTimer(&timers, pb_TimerID_COPY); setupMemoryGPU(numX, sizeof(float), x_d, x); setupMemoryGPU(numX, sizeof(float), y_d, y); setupMemoryGPU(numX, sizeof(float), z_d, z); hipMalloc((void **)&Qr_d, numX * sizeof(float)); CUDA_ERRCK; hipMemset((void *)Qr_d, 0, numX * sizeof(float)); hipMalloc((void **)&Qi_d, numX * sizeof(float)); CUDA_ERRCK; hipMemset((void *)Qi_d, 0, numX * sizeof(float)); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); computeQ_GPU(numK, numX, x_d, y_d, z_d, kVals, Qr_d, Qi_d); hipDeviceSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); hipFree(x_d); hipFree(y_d); hipFree(z_d); cleanupMemoryGPU(numX, sizeof(float), Qr_d, Qr); cleanupMemoryGPU(numX, sizeof(float), Qi_d, Qi); } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); if (params->outFile) { /* Write Q to file */ pb_SwitchToTimer(&timers, pb_TimerID_IO); outputData(params->outFile, Qr, Qi, numX); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } free (kx); free (ky); free (kz); free (x); free (y); free (z); free (phiR); free (phiI); free (kVals); free (Qr); free (Qi); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); return 0; }
e5e34e3109293c4784bf0de56314c72f17fbba44.cu
/*************************************************************************** *cr *cr (C) Copyright 2007 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * C code for creating the Q data structure for fast convolution-based * Hessian multiplication for arbitrary k-space trajectories. * * Inputs: * kx - VECTOR of kx values, same length as ky and kz * ky - VECTOR of ky values, same length as kx and kz * kz - VECTOR of kz values, same length as kx and ky * x - VECTOR of x values, same length as y and z * y - VECTOR of y values, same length as x and z * z - VECTOR of z values, same length as x and y * phi - VECTOR of the Fourier transform of the spatial basis * function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz. * * recommended g++ options: * -O3 -lm -ffast-math -funroll-all-loops */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <sys/time.h> #include <malloc.h> #include "parboil.h" #include "file.h" #include "computeQ.cu" static void setupMemoryGPU(int num, int size, float*& dev_ptr, float*& host_ptr) { cudaMalloc ((void **) &dev_ptr, num * size); CUDA_ERRCK; cudaMemcpy (dev_ptr, host_ptr, num * size, cudaMemcpyHostToDevice); CUDA_ERRCK; } static void cleanupMemoryGPU(int num, int size, float *& dev_ptr, float * host_ptr) { cudaMemcpy (host_ptr, dev_ptr, num * size, cudaMemcpyDeviceToHost); CUDA_ERRCK; cudaFree(dev_ptr); CUDA_ERRCK; } int main (int argc, char *argv[]) { int numX, numK; /* Number of X and K values */ int original_numK; /* Number of K values in input file */ float *kx, *ky, *kz; /* K trajectory (3D vectors) */ float *x, *y, *z; /* X coordinates (3D vectors) */ float *phiR, *phiI; /* Phi values (complex) */ float *phiMag; /* Magnitude of Phi */ float *Qr, *Qi; /* Q signal (complex) */ struct kValues* kVals; struct pb_Parameters *params; struct pb_TimerSet timers; pb_InitializeTimerSet(&timers); /* Read command line */ params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL)) { fprintf(stderr, "Expecting one input filename\n"); exit(-1); } /* Read in data */ pb_SwitchToTimer(&timers, pb_TimerID_IO); inputData(params->inpFiles[0], &original_numK, &numX, &kx, &ky, &kz, &x, &y, &z, &phiR, &phiI); /* Reduce the number of k-space samples if a number is given * on the command line */ if (argc < 2) numK = original_numK; else { int inputK; char *end; inputK = strtol(argv[1], &end, 10); if (end == argv[1]) { fprintf(stderr, "Expecting an integer parameter\n"); exit(-1); } numK = MIN(inputK, original_numK); } printf("%d pixels in output; %d samples in trajectory; using %d samples\n", numX, original_numK, numK); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); /* Create CPU data structures */ createDataStructsCPU(numK, numX, &phiMag, &Qr, &Qi); /* GPU section 1 (precompute PhiMag) */ { /* Mirror several data structures on the device */ float *phiR_d, *phiI_d; float *phiMag_d; pb_SwitchToTimer(&timers, pb_TimerID_COPY); setupMemoryGPU(numK, sizeof(float), phiR_d, phiR); setupMemoryGPU(numK, sizeof(float), phiI_d, phiI); cudaMalloc((void **)&phiMag_d, numK * sizeof(float)); CUDA_ERRCK; cudaThreadSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); computePhiMag_GPU(numK, phiR_d, phiI_d, phiMag_d); cudaThreadSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); cleanupMemoryGPU(numK, sizeof(float), phiMag_d, phiMag); cudaFree(phiR_d); cudaFree(phiI_d); } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); kVals = (struct kValues*)calloc(numK, sizeof (struct kValues)); for (int k = 0; k < numK; k++) { kVals[k].Kx = kx[k]; kVals[k].Ky = ky[k]; kVals[k].Kz = kz[k]; kVals[k].PhiMag = phiMag[k]; } free(phiMag); /* GPU section 2 */ { float *x_d, *y_d, *z_d; float *Qr_d, *Qi_d; pb_SwitchToTimer(&timers, pb_TimerID_COPY); setupMemoryGPU(numX, sizeof(float), x_d, x); setupMemoryGPU(numX, sizeof(float), y_d, y); setupMemoryGPU(numX, sizeof(float), z_d, z); cudaMalloc((void **)&Qr_d, numX * sizeof(float)); CUDA_ERRCK; cudaMemset((void *)Qr_d, 0, numX * sizeof(float)); cudaMalloc((void **)&Qi_d, numX * sizeof(float)); CUDA_ERRCK; cudaMemset((void *)Qi_d, 0, numX * sizeof(float)); cudaThreadSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_KERNEL); computeQ_GPU(numK, numX, x_d, y_d, z_d, kVals, Qr_d, Qi_d); cudaThreadSynchronize(); pb_SwitchToTimer(&timers, pb_TimerID_COPY); cudaFree(x_d); cudaFree(y_d); cudaFree(z_d); cleanupMemoryGPU(numX, sizeof(float), Qr_d, Qr); cleanupMemoryGPU(numX, sizeof(float), Qi_d, Qi); } pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); if (params->outFile) { /* Write Q to file */ pb_SwitchToTimer(&timers, pb_TimerID_IO); outputData(params->outFile, Qr, Qi, numX); pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE); } free (kx); free (ky); free (kz); free (x); free (y); free (z); free (phiR); free (phiI); free (kVals); free (Qr); free (Qi); pb_SwitchToTimer(&timers, pb_TimerID_NONE); pb_PrintTimerSet(&timers); pb_FreeParameters(params); return 0; }
c8f62af0f4ce0ff8ed717cd4be3051bae5684018.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "graph.h" #include "wtime.h" #include "util.h" #include <hipcub/hipcub.hpp> #include <fstream> #include<vector> #include <cub/util_type.cuh> #include "countingAlgorithm/partitionRecCounting.cuh" #define blocknumber 128 #define blocksize 1024 #define hash_blocksize 1024 using namespace std; int FR(graph* G,int bound ) { hipSetDeviceFlags (hipDeviceMapHost);//zerocopy long long vertexCount=G->uCount+G->vCount; int *hashTable; float *time; unsigned long long butterfly_num=0; vector<int> par_vertex_id;//each partition.i start from par_vertex_id[i] //partition int par_id=0; long long par_sum=0; par_vertex_id.push_back(0); if(vertexCount>1) { par_sum=G->beginPos[1]-G->beginPos[0]; } else{ cout<<"ERROR: less than one vertex"<<endl; } for(int vertex=1;vertex<G->uCount+G->vCount;vertex++) { long long deg=G->beginPos[vertex+1]-G->beginPos[vertex]; if(par_sum+deg+1>bound)//deg>bound { par_vertex_id.push_back(vertex); cout<<"push back vertex "<<vertex<<endl; par_sum=deg+1; } else { par_sum+=(deg+1); } } par_vertex_id.push_back(G->uCount+G->vCount); long long largest_par=0; for(int i=0;i<par_vertex_id.size()-1;i++) { if(par_vertex_id[i+1]-par_vertex_id[i]>largest_par) largest_par=par_vertex_id[i+1]-par_vertex_id[i]; } hipHostMalloc((void**) &hashTable, largest_par*largest_par*sizeof(int), hipHostMallocWriteCombined | hipHostMallocMapped); memset(hashTable, 0, largest_par*largest_par*sizeof(int)); int *D_hashTable; hipHostGetDevicePointer(&D_hashTable, hashTable, 0); //intra_partition //Memory Allocating and Data transferring for(int par_id=0;par_id<par_vertex_id.size()-1;par_id++) { //definition int* D_beginPos,*H_beginPos; int* D_edgeList; int vertex_n=par_vertex_id[par_id+1]-par_vertex_id[par_id]; int edge_n=G->beginPos[par_vertex_id[par_id+1]]-G->beginPos[par_vertex_id[par_id]]; //memory allocation HRR(hipMalloc(&D_beginPos,sizeof(int)*vertex_n)); HRR(hipMalloc(&D_edgeList,sizeof(int)*edge_n)); H_beginPos=new int[vertex_n]; //initial H_beginPos long long ->int for(int v=par_vertex_id[par_id];v<par_vertex_id[par_id+1];v++) { H_beginPos[v-par_vertex_id[par_id]]=G->beginPos[v]-G->beginPos[par_vertex_id[par_id]]; } HRR(hipMemcpy(D_beginPos,H_beginPos,sizeof(int)*vertex_n,hipMemcpyHostToDevice)); HRR(hipMemcpy(D_edgeList,G->edgeList+G->beginPos[par_vertex_id[par_id]],sizeof(int)*edge_n,hipMemcpyHostToDevice)); hipLaunchKernelGGL(( Intra_Partition_Counting), dim3(blocknumber),dim3(hash_blocksize), 0, 0, D_beginPos,D_edgeList,G->uCount,G->vCount,D_hashTable,par_vertex_id[par_id],vertex_n,edge_n); HRR(hipDeviceSynchronize()); HRR(hipFree(D_beginPos)); HRR(hipFree(D_edgeList)); for(int i=0;i<vertex_n*vertex_n;i++) { int ht=hashTable[i]; butterfly_num+=(ht*(ht-1)/2); } memset(hashTable, 0, largest_par*largest_par*sizeof(int)); } cout<<"intrra butterfly num="<<butterfly_num<<endl; //inter_partition for(int par_i=0;par_i<par_vertex_id.size()-1;par_i++) { for(int par_j=par_i+1;par_j<par_vertex_id.size()-1;par_j++) { printf("iiiii"); int *beginPos_i,*beginPos_j,*H_beginPos_i,*H_beginPos_j; int *edgeList_i,*edgeList_j; int vertex_n_i=par_vertex_id[par_i+1]-par_vertex_id[par_i]; int vertex_n_j=par_vertex_id[par_j+1]-par_vertex_id[par_j]; int edge_n_i=G->beginPos[par_vertex_id[par_i+1]]-G->beginPos[par_vertex_id[par_i]]; int edge_n_j=G->beginPos[par_vertex_id[par_j+1]]-G->beginPos[par_vertex_id[par_j]]; HRR(hipMalloc(&beginPos_i,sizeof(int)*vertex_n_i)); HRR(hipMalloc(&beginPos_j,sizeof(int)*vertex_n_j)); HRR(hipMalloc(&edgeList_i,sizeof(int)*edge_n_i)); HRR(hipMalloc(&edgeList_j,sizeof(int)*edge_n_j)); H_beginPos_i=new int[vertex_n_i]; H_beginPos_j=new int[vertex_n_j]; for(int v=par_vertex_id[par_i];v<par_vertex_id[par_i+1];v++) { H_beginPos_i[v-par_vertex_id[par_i]]=G->beginPos[v]-G->beginPos[par_vertex_id[par_i]]; } for(int v=par_vertex_id[par_j];v<par_vertex_id[par_j+1];v++) { H_beginPos_j[v-par_vertex_id[par_i]]=G->beginPos[v]-G->beginPos[par_vertex_id[par_j]]; } HRR(hipMemcpy(beginPos_i,H_beginPos_i,sizeof(int)*vertex_n_i,hipMemcpyHostToDevice)); HRR(hipMemcpy(edgeList_i,G->edgeList+G->beginPos[par_vertex_id[par_i]],sizeof(int)*edge_n_i,hipMemcpyHostToDevice)); HRR(hipMemcpy(beginPos_j,H_beginPos_j,sizeof(int)*vertex_n_j,hipMemcpyHostToDevice)); HRR(hipMemcpy(edgeList_j,G->edgeList+G->beginPos[par_vertex_id[par_j]],sizeof(int)*edge_n_j,hipMemcpyHostToDevice)); printf("222"); hipLaunchKernelGGL(( Inter_Partition_Counting), dim3(blocknumber),dim3(hash_blocksize), 0, 0, beginPos_i,beginPos_j,edgeList_i,edgeList_j,G->uCount,G->vCount,D_hashTable,par_vertex_id[par_i],par_vertex_id[par_j],vertex_n_i,vertex_n_j,edge_n_i,edge_n_j); HRR(hipDeviceSynchronize()); HRR(hipFree(beginPos_i)); HRR(hipFree(edgeList_i)); HRR(hipFree(beginPos_j)); HRR(hipFree(edgeList_j)); for(int i=0;i<vertex_n_i*vertex_n_j;i++) { int ht=hashTable[i]; butterfly_num+=(ht*(ht-1)/2); } memset(hashTable, 0, largest_par*largest_par*sizeof(int)); } } // for(long long i=0;i<vertexCount*vertexCount;i++) // { // int ht=hashTable[i]; // butterfly_num+=(ht*(ht-1)/2); // } cout<<"total butterfly num="<<butterfly_num<<endl; hipHostFree(hashTable); return 0; }
c8f62af0f4ce0ff8ed717cd4be3051bae5684018.cu
#include <iostream> #include "graph.h" #include "wtime.h" #include "util.h" #include <cub/cub.cuh> #include <fstream> #include<vector> #include <cub/util_type.cuh> #include "countingAlgorithm/partitionRecCounting.cuh" #define blocknumber 128 #define blocksize 1024 #define hash_blocksize 1024 using namespace std; int FR(graph* G,int bound ) { cudaSetDeviceFlags (cudaDeviceMapHost);//启用zerocopy long long vertexCount=G->uCount+G->vCount; int *hashTable; float *time; unsigned long long butterfly_num=0; vector<int> par_vertex_id;//each partition.i start from par_vertex_id[i] //partition int par_id=0; long long par_sum=0; par_vertex_id.push_back(0); if(vertexCount>1) { par_sum=G->beginPos[1]-G->beginPos[0]; } else{ cout<<"ERROR: less than one vertex"<<endl; } for(int vertex=1;vertex<G->uCount+G->vCount;vertex++) { long long deg=G->beginPos[vertex+1]-G->beginPos[vertex]; if(par_sum+deg+1>bound)//是否考虑deg>bound { par_vertex_id.push_back(vertex); cout<<"push back vertex "<<vertex<<endl; par_sum=deg+1; } else { par_sum+=(deg+1); } } par_vertex_id.push_back(G->uCount+G->vCount); long long largest_par=0; for(int i=0;i<par_vertex_id.size()-1;i++) { if(par_vertex_id[i+1]-par_vertex_id[i]>largest_par) largest_par=par_vertex_id[i+1]-par_vertex_id[i]; } cudaHostAlloc((void**) &hashTable, largest_par*largest_par*sizeof(int), cudaHostAllocWriteCombined | cudaHostAllocMapped); memset(hashTable, 0, largest_par*largest_par*sizeof(int)); int *D_hashTable; cudaHostGetDevicePointer(&D_hashTable, hashTable, 0); //intra_partition //Memory Allocating and Data transferring for(int par_id=0;par_id<par_vertex_id.size()-1;par_id++) { //definition int* D_beginPos,*H_beginPos; int* D_edgeList; int vertex_n=par_vertex_id[par_id+1]-par_vertex_id[par_id]; int edge_n=G->beginPos[par_vertex_id[par_id+1]]-G->beginPos[par_vertex_id[par_id]]; //memory allocation HRR(cudaMalloc(&D_beginPos,sizeof(int)*vertex_n)); HRR(cudaMalloc(&D_edgeList,sizeof(int)*edge_n)); H_beginPos=new int[vertex_n]; //initial H_beginPos long long ->int for(int v=par_vertex_id[par_id];v<par_vertex_id[par_id+1];v++) { H_beginPos[v-par_vertex_id[par_id]]=G->beginPos[v]-G->beginPos[par_vertex_id[par_id]]; } HRR(cudaMemcpy(D_beginPos,H_beginPos,sizeof(int)*vertex_n,cudaMemcpyHostToDevice)); HRR(cudaMemcpy(D_edgeList,G->edgeList+G->beginPos[par_vertex_id[par_id]],sizeof(int)*edge_n,cudaMemcpyHostToDevice)); Intra_Partition_Counting<<<blocknumber,hash_blocksize>>>(D_beginPos,D_edgeList,G->uCount,G->vCount,D_hashTable,par_vertex_id[par_id],vertex_n,edge_n); HRR(cudaDeviceSynchronize()); HRR(cudaFree(D_beginPos)); HRR(cudaFree(D_edgeList)); for(int i=0;i<vertex_n*vertex_n;i++) { int ht=hashTable[i]; butterfly_num+=(ht*(ht-1)/2); } memset(hashTable, 0, largest_par*largest_par*sizeof(int)); } cout<<"intrra butterfly num="<<butterfly_num<<endl; //inter_partition for(int par_i=0;par_i<par_vertex_id.size()-1;par_i++) { for(int par_j=par_i+1;par_j<par_vertex_id.size()-1;par_j++) { printf("iiiii"); int *beginPos_i,*beginPos_j,*H_beginPos_i,*H_beginPos_j; int *edgeList_i,*edgeList_j; int vertex_n_i=par_vertex_id[par_i+1]-par_vertex_id[par_i]; int vertex_n_j=par_vertex_id[par_j+1]-par_vertex_id[par_j]; int edge_n_i=G->beginPos[par_vertex_id[par_i+1]]-G->beginPos[par_vertex_id[par_i]]; int edge_n_j=G->beginPos[par_vertex_id[par_j+1]]-G->beginPos[par_vertex_id[par_j]]; HRR(cudaMalloc(&beginPos_i,sizeof(int)*vertex_n_i)); HRR(cudaMalloc(&beginPos_j,sizeof(int)*vertex_n_j)); HRR(cudaMalloc(&edgeList_i,sizeof(int)*edge_n_i)); HRR(cudaMalloc(&edgeList_j,sizeof(int)*edge_n_j)); H_beginPos_i=new int[vertex_n_i]; H_beginPos_j=new int[vertex_n_j]; for(int v=par_vertex_id[par_i];v<par_vertex_id[par_i+1];v++) { H_beginPos_i[v-par_vertex_id[par_i]]=G->beginPos[v]-G->beginPos[par_vertex_id[par_i]]; } for(int v=par_vertex_id[par_j];v<par_vertex_id[par_j+1];v++) { H_beginPos_j[v-par_vertex_id[par_i]]=G->beginPos[v]-G->beginPos[par_vertex_id[par_j]]; } HRR(cudaMemcpy(beginPos_i,H_beginPos_i,sizeof(int)*vertex_n_i,cudaMemcpyHostToDevice)); HRR(cudaMemcpy(edgeList_i,G->edgeList+G->beginPos[par_vertex_id[par_i]],sizeof(int)*edge_n_i,cudaMemcpyHostToDevice)); HRR(cudaMemcpy(beginPos_j,H_beginPos_j,sizeof(int)*vertex_n_j,cudaMemcpyHostToDevice)); HRR(cudaMemcpy(edgeList_j,G->edgeList+G->beginPos[par_vertex_id[par_j]],sizeof(int)*edge_n_j,cudaMemcpyHostToDevice)); printf("222"); Inter_Partition_Counting<<<blocknumber,hash_blocksize>>>(beginPos_i,beginPos_j,edgeList_i,edgeList_j,G->uCount,G->vCount,D_hashTable,par_vertex_id[par_i],par_vertex_id[par_j],vertex_n_i,vertex_n_j,edge_n_i,edge_n_j); HRR(cudaDeviceSynchronize()); HRR(cudaFree(beginPos_i)); HRR(cudaFree(edgeList_i)); HRR(cudaFree(beginPos_j)); HRR(cudaFree(edgeList_j)); for(int i=0;i<vertex_n_i*vertex_n_j;i++) { int ht=hashTable[i]; butterfly_num+=(ht*(ht-1)/2); } memset(hashTable, 0, largest_par*largest_par*sizeof(int)); } } // for(long long i=0;i<vertexCount*vertexCount;i++) // { // int ht=hashTable[i]; // butterfly_num+=(ht*(ht-1)/2); // } cout<<"total butterfly num="<<butterfly_num<<endl; cudaFreeHost(hashTable); return 0; }
c39e2ec9597b267ee70a626896980eb67b97c4ff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/channel_shuffle_op.h" #include <array> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" namespace caffe2 { template <typename T, bool kNFirst> __global__ void ChannelShuffleNCHWKernel( const int G, const int K, const int HxW, const T* X, T* Y) { const int C = G * K; const int n = kNFirst ? blockIdx.x : blockIdx.y; const int s = kNFirst ? blockIdx.y : blockIdx.x; const int g = blockIdx.z % G; const int k = blockIdx.z / G; const int offset = s * CAFFE_CUDA_NUM_THREADS + threadIdx.x; if (offset < HxW) { #if __CUDA_ARCH__ >= 350 Y[(n * C + blockIdx.z) * HxW + offset] = __ldg(X + (n * C + g * K + k) * HxW + offset); #else Y[(n * C + blockIdx.z) * HxW + offset] = X[(n * C + g * K + k) * HxW + offset]; #endif } } template <typename T, int kSharedSize> __global__ void ChannelShuffleNHWCKernel(const int G, const int K, const float* X, float* Y) { __shared__ T sdata[kSharedSize]; const int C = G * K; const int offset = blockIdx.x * C; for (int i = threadIdx.x; i < C; i += blockDim.x) { #if __CUDA_ARCH__ >= 350 sdata[i] = __ldg(X + offset + i); #else sdata[i] = X[offset + i]; #endif } __syncthreads(); for (int i = threadIdx.x; i < C; i += blockDim.x) { const int g = i % G; const int k = i / G; Y[offset + i] = sdata[g * K + k]; } } template <> bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { const auto& X = Input(0); auto* Y = Output(0, X.sizes(), at::dtype<float>()); const int N = X.dim32(0); const int C = X.dim32(1); const int G = this->group_; CAFFE_ENFORCE_EQ(C % G, 0); if (X.numel() == 0) { return true; } const int K = C / G; const int HxW = X.numel() / (N * C); const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; const float* X_data = X.data<float>(); float* Y_data = Y->mutable_data<float>(); if (N <= kCUDAGridDimMaxY) { const dim3 dim_grid(S, N, C); hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, false>) , dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), G, K, HxW, X_data, Y_data); } else { const dim3 dim_grid(N, S, C); hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, true>) , dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), G, K, HxW, X_data, Y_data); } return true; } template <> bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { const auto& X = Input(0); auto* Y = Output(0, X.sizes(), at::dtype<float>()); const int ndim = X.dim(); const int N = X.dim32(0); const int C = X.dim32(ndim - 1); const int G = this->group_; CAFFE_ENFORCE_EQ(C % G, 0); if (X.numel() == 0) { return true; } const int K = C / G; const int HxW = X.numel() / (N * C); const int outer_size = N * HxW; const float* X_data = X.data<float>(); float* Y_data = Y->mutable_data<float>(); if (C <= 32) { hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 32>) , dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), G, K, X_data, Y_data); } else if (C <= 128) { hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 128>) , dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), G, K, X_data, Y_data); } else if (C <= 512) { hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 512>) , dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), G, K, X_data, Y_data); } else { const std::array<std::int64_t, 3> dims = {N * HxW, G, K}; const std::array<std::int32_t, 3> axes = {0, 2, 1}; math::Transpose<std::int64_t, float, CUDAContext>( 3, dims.data(), axes.data(), X_data, Y_data, &context_); } return true; } template <> bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { const auto& dY = Input(0); auto* dX = Output(0, dY.sizes(), at::dtype<float>()); const int N = dY.dim32(0); const int C = dY.dim32(1); const int G = this->group_; CAFFE_ENFORCE_EQ(C % G, 0); if (dY.numel() == 0) { return true; } const int K = C / G; const int HxW = dY.numel() / (N * C); const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; const float* dY_data = dY.data<float>(); float* dX_data = dX->mutable_data<float>(); if (N <= kCUDAGridDimMaxY) { const dim3 dim_grid(S, N, C); hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, false>) , dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), K, G, HxW, dY_data, dX_data); } else { const dim3 dim_grid(N, S, C); hipLaunchKernelGGL(( ChannelShuffleNCHWKernel<float, true>) , dim3(dim_grid), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), K, G, HxW, dY_data, dX_data); } return true; } template <> bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { const auto& dY = Input(0); auto* dX = Output(0, dY.sizes(), at::dtype<float>()); const int ndim = dY.dim(); const int N = dY.dim32(0); const int C = dY.dim32(ndim - 1); const int G = this->group_; CAFFE_ENFORCE_EQ(C % G, 0); if (dY.numel() == 0) { return true; } const int K = C / G; const int HxW = dY.numel() / (N * C); const int outer_size = N * HxW; const float* dY_data = dY.data<float>(); float* dX_data = dX->mutable_data<float>(); if (C <= 32) { hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 32>) , dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), K, G, dY_data, dX_data); } else if (C <= 128) { hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 128>) , dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), K, G, dY_data, dX_data); } else if (C <= 512) { hipLaunchKernelGGL(( ChannelShuffleNHWCKernel<float, 512>) , dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), K, G, dY_data, dX_data); } else { const std::array<std::int64_t, 3> dims = {N * HxW, K, G}; const std::array<std::int32_t, 3> axes = {0, 2, 1}; math::Transpose<std::int64_t, float, CUDAContext>( 3, dims.data(), axes.data(), dY_data, dX_data, &context_); } return true; } REGISTER_CUDA_OPERATOR(ChannelShuffle, ChannelShuffleOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( ChannelShuffleGradient, ChannelShuffleGradientOp<float, CUDAContext>); } // namespace caffe2
c39e2ec9597b267ee70a626896980eb67b97c4ff.cu
#include "caffe2/operators/channel_shuffle_op.h" #include <array> #include "caffe2/core/context_gpu.h" #include "caffe2/utils/math.h" namespace caffe2 { template <typename T, bool kNFirst> __global__ void ChannelShuffleNCHWKernel( const int G, const int K, const int HxW, const T* X, T* Y) { const int C = G * K; const int n = kNFirst ? blockIdx.x : blockIdx.y; const int s = kNFirst ? blockIdx.y : blockIdx.x; const int g = blockIdx.z % G; const int k = blockIdx.z / G; const int offset = s * CAFFE_CUDA_NUM_THREADS + threadIdx.x; if (offset < HxW) { #if __CUDA_ARCH__ >= 350 Y[(n * C + blockIdx.z) * HxW + offset] = __ldg(X + (n * C + g * K + k) * HxW + offset); #else Y[(n * C + blockIdx.z) * HxW + offset] = X[(n * C + g * K + k) * HxW + offset]; #endif } } template <typename T, int kSharedSize> __global__ void ChannelShuffleNHWCKernel(const int G, const int K, const float* X, float* Y) { __shared__ T sdata[kSharedSize]; const int C = G * K; const int offset = blockIdx.x * C; for (int i = threadIdx.x; i < C; i += blockDim.x) { #if __CUDA_ARCH__ >= 350 sdata[i] = __ldg(X + offset + i); #else sdata[i] = X[offset + i]; #endif } __syncthreads(); for (int i = threadIdx.x; i < C; i += blockDim.x) { const int g = i % G; const int k = i / G; Y[offset + i] = sdata[g * K + k]; } } template <> bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { const auto& X = Input(0); auto* Y = Output(0, X.sizes(), at::dtype<float>()); const int N = X.dim32(0); const int C = X.dim32(1); const int G = this->group_; CAFFE_ENFORCE_EQ(C % G, 0); if (X.numel() == 0) { return true; } const int K = C / G; const int HxW = X.numel() / (N * C); const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; const float* X_data = X.data<float>(); float* Y_data = Y->mutable_data<float>(); if (N <= kCUDAGridDimMaxY) { const dim3 dim_grid(S, N, C); ChannelShuffleNCHWKernel<float, false> <<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( G, K, HxW, X_data, Y_data); } else { const dim3 dim_grid(N, S, C); ChannelShuffleNCHWKernel<float, true> <<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( G, K, HxW, X_data, Y_data); } return true; } template <> bool ChannelShuffleOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { const auto& X = Input(0); auto* Y = Output(0, X.sizes(), at::dtype<float>()); const int ndim = X.dim(); const int N = X.dim32(0); const int C = X.dim32(ndim - 1); const int G = this->group_; CAFFE_ENFORCE_EQ(C % G, 0); if (X.numel() == 0) { return true; } const int K = C / G; const int HxW = X.numel() / (N * C); const int outer_size = N * HxW; const float* X_data = X.data<float>(); float* Y_data = Y->mutable_data<float>(); if (C <= 32) { ChannelShuffleNHWCKernel<float, 32> <<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( G, K, X_data, Y_data); } else if (C <= 128) { ChannelShuffleNHWCKernel<float, 128> <<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( G, K, X_data, Y_data); } else if (C <= 512) { ChannelShuffleNHWCKernel<float, 512> <<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( G, K, X_data, Y_data); } else { const std::array<std::int64_t, 3> dims = {N * HxW, G, K}; const std::array<std::int32_t, 3> axes = {0, 2, 1}; math::Transpose<std::int64_t, float, CUDAContext>( 3, dims.data(), axes.data(), X_data, Y_data, &context_); } return true; } template <> bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNCHW() { const auto& dY = Input(0); auto* dX = Output(0, dY.sizes(), at::dtype<float>()); const int N = dY.dim32(0); const int C = dY.dim32(1); const int G = this->group_; CAFFE_ENFORCE_EQ(C % G, 0); if (dY.numel() == 0) { return true; } const int K = C / G; const int HxW = dY.numel() / (N * C); const int S = (HxW + CAFFE_CUDA_NUM_THREADS - 1) / CAFFE_CUDA_NUM_THREADS; const float* dY_data = dY.data<float>(); float* dX_data = dX->mutable_data<float>(); if (N <= kCUDAGridDimMaxY) { const dim3 dim_grid(S, N, C); ChannelShuffleNCHWKernel<float, false> <<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( K, G, HxW, dY_data, dX_data); } else { const dim3 dim_grid(N, S, C); ChannelShuffleNCHWKernel<float, true> <<<dim_grid, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( K, G, HxW, dY_data, dX_data); } return true; } template <> bool ChannelShuffleGradientOp<float, CUDAContext>::RunOnDeviceWithOrderNHWC() { const auto& dY = Input(0); auto* dX = Output(0, dY.sizes(), at::dtype<float>()); const int ndim = dY.dim(); const int N = dY.dim32(0); const int C = dY.dim32(ndim - 1); const int G = this->group_; CAFFE_ENFORCE_EQ(C % G, 0); if (dY.numel() == 0) { return true; } const int K = C / G; const int HxW = dY.numel() / (N * C); const int outer_size = N * HxW; const float* dY_data = dY.data<float>(); float* dX_data = dX->mutable_data<float>(); if (C <= 32) { ChannelShuffleNHWCKernel<float, 32> <<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( K, G, dY_data, dX_data); } else if (C <= 128) { ChannelShuffleNHWCKernel<float, 128> <<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( K, G, dY_data, dX_data); } else if (C <= 512) { ChannelShuffleNHWCKernel<float, 512> <<<outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( K, G, dY_data, dX_data); } else { const std::array<std::int64_t, 3> dims = {N * HxW, K, G}; const std::array<std::int32_t, 3> axes = {0, 2, 1}; math::Transpose<std::int64_t, float, CUDAContext>( 3, dims.data(), axes.data(), dY_data, dX_data, &context_); } return true; } REGISTER_CUDA_OPERATOR(ChannelShuffle, ChannelShuffleOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( ChannelShuffleGradient, ChannelShuffleGradientOp<float, CUDAContext>); } // namespace caffe2
18bfe2a8bf32e6a83382739bf12a0ccbf4d066c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" GLuint positionsVBO; struct cudaGraphicsResource* positionsVBO_CUDA; int main() { // Initialize OpenGL and GLUT for device 0 // and make the OpenGL context current ... glutDisplayFunc(display); // Explicitly set device 0 hipGLSetGLDevice(0); // Create buffer object and register it with CUDA glGenBuffers(1, positionsVBO); glBindBuffer(GL_ARRAY_BUFFER, &positionsVBO); unsigned int size = width * height * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); hipGraphicsGLRegisterBuffer(&positionsVBO_CUDA, positionsVBO, hipGraphicsMapFlagsWriteDiscard); // Launch rendering loop glutMainLoop(); ... } void display() { // Map buffer object for writing from CUDA float4* positions; hipGraphicsMapResources(1, &positionsVBO_CUDA, 0); size_t num_bytes; hipGraphicsResourceGetMappedPointer((void**)&positions, &num_bytes, positionsVBO_CUDA)); // Execute kernel dim3 dimBlock(16, 16, 1); dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1); hipLaunchKernelGGL(( createVertices), dim3(dimGrid), dim3(dimBlock), 0, 0, positions, time, width, height); // Unmap buffer object hipGraphicsUnmapResources(1, &positionsVBO_CUDA, 0); // Render from buffer object glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glBindBuffer(GL_ARRAY_BUFFER, positionsVBO); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glDrawArrays(GL_POINTS, 0, width * height); glDisableClientState(GL_VERTEX_ARRAY); // Swap buffers glutSwapBuffers(); glutPostRedisplay(); } void deleteVBO() { hipGraphicsUnregisterResource(positionsVBO_CUDA); glDeleteBuffers(1, &positionsVBO); } __global__ void createVertices(float4* positions, float time, unsigned int width, unsigned int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; // Calculate uv coordinates float u = x / (float)width; float v = y / (float)height; u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u * freq + time) * cosf(v * freq + time) * 0.5f; // Write positions positions[y * width + x] = make_float4(u, w, v, 1.0f); }
18bfe2a8bf32e6a83382739bf12a0ccbf4d066c5.cu
GLuint positionsVBO; struct cudaGraphicsResource* positionsVBO_CUDA; int main() { // Initialize OpenGL and GLUT for device 0 // and make the OpenGL context current ... glutDisplayFunc(display); // Explicitly set device 0 cudaGLSetGLDevice(0); // Create buffer object and register it with CUDA glGenBuffers(1, positionsVBO); glBindBuffer(GL_ARRAY_BUFFER, &positionsVBO); unsigned int size = width * height * 4 * sizeof(float); glBufferData(GL_ARRAY_BUFFER, size, 0, GL_DYNAMIC_DRAW); glBindBuffer(GL_ARRAY_BUFFER, 0); cudaGraphicsGLRegisterBuffer(&positionsVBO_CUDA, positionsVBO, cudaGraphicsMapFlagsWriteDiscard); // Launch rendering loop glutMainLoop(); ... } void display() { // Map buffer object for writing from CUDA float4* positions; cudaGraphicsMapResources(1, &positionsVBO_CUDA, 0); size_t num_bytes; cudaGraphicsResourceGetMappedPointer((void**)&positions, &num_bytes, positionsVBO_CUDA)); // Execute kernel dim3 dimBlock(16, 16, 1); dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1); createVertices<<<dimGrid, dimBlock>>>(positions, time, width, height); // Unmap buffer object cudaGraphicsUnmapResources(1, &positionsVBO_CUDA, 0); // Render from buffer object glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); glBindBuffer(GL_ARRAY_BUFFER, positionsVBO); glVertexPointer(4, GL_FLOAT, 0, 0); glEnableClientState(GL_VERTEX_ARRAY); glDrawArrays(GL_POINTS, 0, width * height); glDisableClientState(GL_VERTEX_ARRAY); // Swap buffers glutSwapBuffers(); glutPostRedisplay(); } void deleteVBO() { cudaGraphicsUnregisterResource(positionsVBO_CUDA); glDeleteBuffers(1, &positionsVBO); } __global__ void createVertices(float4* positions, float time, unsigned int width, unsigned int height) { unsigned int x = blockIdx.x * blockDim.x + threadIdx.x; unsigned int y = blockIdx.y * blockDim.y + threadIdx.y; // Calculate uv coordinates float u = x / (float)width; float v = y / (float)height; u = u * 2.0f - 1.0f; v = v * 2.0f - 1.0f; // calculate simple sine wave pattern float freq = 4.0f; float w = sinf(u * freq + time) * cosf(v * freq + time) * 0.5f; // Write positions positions[y * width + x] = make_float4(u, w, v, 1.0f); }
62d7ad4007bd5393b364973a5e75ad4bf7162051.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void add(int *v1, int *v2, int *sol) { *sol = *v1 + *v2; } int main(void) { int v1, v2, sol; //Host copies of the values int *d_v1, *d_v2, *d_sol; //int vector for v1,v2,v3 in the device int size = sizeof(int); //Allocating space in the device hipMalloc((void **)&d_v1, size); hipMalloc((void **)&d_v2, size); hipMalloc((void **)&d_sol, size); //setup input v1 = 11; v2 = 13; //Input values to device hipMemcpy(d_v1, &v1, size, hipMemcpyHostToDevice); hipMemcpy(d_v2, &v2, size, hipMemcpyHostToDevice); hipMemcpy(d_sol, &sol, size, hipMemcpyHostToDevice); //Lauch add() in device add << <1, 1 >> >(d_v1, d_v2, d_sol); //Copy result from device to host hipMemcpy(&sol, d_sol, size, hipMemcpyDeviceToHost); //Cleanup printf(" Hello %d \n", sol); hipFree(d_v1); hipFree(d_v2); hipFree(d_sol); return 0; }
62d7ad4007bd5393b364973a5e75ad4bf7162051.cu
#include <stdio.h> __global__ void add(int *v1, int *v2, int *sol) { *sol = *v1 + *v2; } int main(void) { int v1, v2, sol; //Host copies of the values int *d_v1, *d_v2, *d_sol; //int vector for v1,v2,v3 in the device int size = sizeof(int); //Allocating space in the device cudaMalloc((void **)&d_v1, size); cudaMalloc((void **)&d_v2, size); cudaMalloc((void **)&d_sol, size); //setup input v1 = 11; v2 = 13; //Input values to device cudaMemcpy(d_v1, &v1, size, cudaMemcpyHostToDevice); cudaMemcpy(d_v2, &v2, size, cudaMemcpyHostToDevice); cudaMemcpy(d_sol, &sol, size, cudaMemcpyHostToDevice); //Lauch add() in device add << <1, 1 >> >(d_v1, d_v2, d_sol); //Copy result from device to host cudaMemcpy(&sol, d_sol, size, cudaMemcpyDeviceToHost); //Cleanup printf(" Hello %d \n", sol); cudaFree(d_v1); cudaFree(d_v2); cudaFree(d_sol); return 0; }
9b5dc0a9c5236c88f10bb07c6a5b943664dfa1f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/utilities/device_operators.cuh> #include <cudf/strings/udf/case.cuh> #include <cudf/strings/udf/char_types.cuh> #include <cudf/strings/udf/replace.cuh> #include <cudf/strings/udf/search.cuh> #include <cudf/strings/udf/starts_with.cuh> #include <cudf/strings/udf/strip.cuh> #include <cudf/strings/udf/udf_string.cuh> #include <cuda/atomic> #include <hip/hip_cooperative_groups.h> #include <limits> #include <type_traits> using namespace cudf::strings::udf; extern "C" __device__ int len(int* nb_retval, void const* str) { auto sv = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = sv->length(); return 0; } extern "C" __device__ int startswith(bool* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = starts_with(*str_view, *substr_view); return 0; } extern "C" __device__ int endswith(bool* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = ends_with(*str_view, *substr_view); return 0; } extern "C" __device__ int contains(bool* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = (str_view->find(*substr_view) != cudf::string_view::npos); return 0; } extern "C" __device__ int find(int* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = str_view->find(*substr_view); return 0; } extern "C" __device__ int rfind(int* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = str_view->rfind(*substr_view); return 0; } extern "C" __device__ int eq(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view == *rhs_view); return 0; } extern "C" __device__ int ne(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view != *rhs_view); return 0; } extern "C" __device__ int ge(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view >= *rhs_view); return 0; } extern "C" __device__ int le(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view <= *rhs_view); return 0; } extern "C" __device__ int gt(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view > *rhs_view); return 0; } extern "C" __device__ int lt(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view < *rhs_view); return 0; } extern "C" __device__ int pyislower(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_lower( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisupper(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_upper( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisspace(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_space( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisdecimal(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_decimal( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisnumeric(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_numeric( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisdigit(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_digit( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisalnum(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_alpha_numeric( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisalpha(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_alpha( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyistitle(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_title( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pycount(int* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = count(*str_view, *substr_view); return 0; } extern "C" __device__ int udf_string_from_string_view(int* nb_retbal, void const* str, void* udf_str) { auto str_view_ptr = reinterpret_cast<cudf::string_view const*>(str); auto udf_str_ptr = new (udf_str) udf_string; *udf_str_ptr = udf_string(*str_view_ptr); return 0; } extern "C" __device__ int string_view_from_udf_string(int* nb_retval, void const* udf_str, void* str) { auto udf_str_ptr = reinterpret_cast<udf_string const*>(udf_str); auto sv_ptr = new (str) cudf::string_view; *sv_ptr = cudf::string_view(*udf_str_ptr); return 0; } extern "C" __device__ int strip(int* nb_retval, void* udf_str, void* const* to_strip, void* const* strip_str) { auto to_strip_ptr = reinterpret_cast<cudf::string_view const*>(to_strip); auto strip_str_ptr = reinterpret_cast<cudf::string_view const*>(strip_str); auto udf_str_ptr = new (udf_str) udf_string; *udf_str_ptr = strip(*to_strip_ptr, *strip_str_ptr); return 0; } extern "C" __device__ int lstrip(int* nb_retval, void* udf_str, void* const* to_strip, void* const* strip_str) { auto to_strip_ptr = reinterpret_cast<cudf::string_view const*>(to_strip); auto strip_str_ptr = reinterpret_cast<cudf::string_view const*>(strip_str); auto udf_str_ptr = new (udf_str) udf_string; *udf_str_ptr = strip(*to_strip_ptr, *strip_str_ptr, cudf::strings::side_type::LEFT); return 0; } extern "C" __device__ int rstrip(int* nb_retval, void* udf_str, void* const* to_strip, void* const* strip_str) { auto to_strip_ptr = reinterpret_cast<cudf::string_view const*>(to_strip); auto strip_str_ptr = reinterpret_cast<cudf::string_view const*>(strip_str); auto udf_str_ptr = new (udf_str) udf_string; *udf_str_ptr = strip(*to_strip_ptr, *strip_str_ptr, cudf::strings::side_type::RIGHT); return 0; } extern "C" __device__ int upper(int* nb_retval, void* udf_str, void const* st, std::uintptr_t flags_table, std::uintptr_t cases_table, std::uintptr_t special_table) { auto udf_str_ptr = new (udf_str) udf_string; auto st_ptr = reinterpret_cast<cudf::string_view const*>(st); auto flags_table_ptr = reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(flags_table); auto cases_table_ptr = reinterpret_cast<cudf::strings::detail::character_cases_table_type*>(cases_table); auto special_table_ptr = reinterpret_cast<cudf::strings::detail::special_case_mapping*>(special_table); cudf::strings::udf::chars_tables tables{flags_table_ptr, cases_table_ptr, special_table_ptr}; *udf_str_ptr = to_upper(tables, *st_ptr); return 0; } extern "C" __device__ int lower(int* nb_retval, void* udf_str, void const* st, std::uintptr_t flags_table, std::uintptr_t cases_table, std::uintptr_t special_table) { auto udf_str_ptr = new (udf_str) udf_string; auto st_ptr = reinterpret_cast<cudf::string_view const*>(st); auto flags_table_ptr = reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(flags_table); auto cases_table_ptr = reinterpret_cast<cudf::strings::detail::character_cases_table_type*>(cases_table); auto special_table_ptr = reinterpret_cast<cudf::strings::detail::special_case_mapping*>(special_table); cudf::strings::udf::chars_tables tables{flags_table_ptr, cases_table_ptr, special_table_ptr}; *udf_str_ptr = to_lower(tables, *st_ptr); return 0; } extern "C" __device__ int concat(int* nb_retval, void* udf_str, void* const* lhs, void* const* rhs) { auto lhs_ptr = reinterpret_cast<cudf::string_view const*>(lhs); auto rhs_ptr = reinterpret_cast<cudf::string_view const*>(rhs); auto udf_str_ptr = new (udf_str) udf_string; udf_string result; result.append(*lhs_ptr).append(*rhs_ptr); *udf_str_ptr = result; return 0; } extern "C" __device__ int replace( int* nb_retval, void* udf_str, void* const src, void* const to_replace, void* const replacement) { auto src_ptr = reinterpret_cast<cudf::string_view const*>(src); auto to_replace_ptr = reinterpret_cast<cudf::string_view const*>(to_replace); auto replacement_ptr = reinterpret_cast<cudf::string_view const*>(replacement); auto udf_str_ptr = new (udf_str) udf_string; *udf_str_ptr = replace(*src_ptr, *to_replace_ptr, *replacement_ptr); return 0; } // Groupby Shim Functions template <typename T> __device__ bool are_all_nans(cooperative_groups::thread_block const& block, T const* data, int64_t size) { // TODO: to be refactored with CG vote functions once // block size is known at build time __shared__ int64_t count; if (block.thread_rank() == 0) { count = 0; } block.sync(); for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { if (not std::isnan(data[idx])) { cuda::atomic_ref<int64_t, cuda::thread_scope_block> ref{count}; ref.fetch_add(1, cuda::std::memory_order_relaxed); break; } } block.sync(); return count == 0; } template <typename T, typename AccumT = std::conditional_t<std::is_integral_v<T>, int64_t, T>> __device__ AccumT device_sum(cooperative_groups::thread_block const& block, T const* data, int64_t size) { __shared__ AccumT block_sum; if (block.thread_rank() == 0) { block_sum = 0; } block.sync(); AccumT local_sum = 0; for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { local_sum += static_cast<AccumT>(data[idx]); } cuda::atomic_ref<AccumT, cuda::thread_scope_block> ref{block_sum}; ref.fetch_add(local_sum, cuda::std::memory_order_relaxed); block.sync(); return block_sum; } template <typename T, typename AccumT = std::conditional_t<std::is_integral_v<T>, int64_t, T>> __device__ AccumT BlockSum(T const* data, int64_t size) { auto block = cooperative_groups::this_thread_block(); if constexpr (std::is_floating_point_v<T>) { if (are_all_nans(block, data, size)) { return 0; } } auto block_sum = device_sum<T>(block, data, size); return block_sum; } template <typename T> __device__ double BlockMean(T const* data, int64_t size) { auto block = cooperative_groups::this_thread_block(); auto block_sum = device_sum<T>(block, data, size); return static_cast<double>(block_sum) / static_cast<double>(size); } template <typename T> __device__ double BlockCoVar(T const* lhs, T const* rhs, int64_t size) { auto block = cooperative_groups::this_thread_block(); __shared__ double block_covar; if (block.thread_rank() == 0) { block_covar = 0; } block.sync(); auto block_sum_lhs = device_sum<T>(block, lhs, size); auto const mu_l = static_cast<double>(block_sum_lhs) / static_cast<double>(size); auto const mu_r = [=]() { if (lhs == rhs) { // If the lhs and rhs are the same, this is calculating variance. // Thus we can assume mu_r = mu_l. return mu_l; } else { auto block_sum_rhs = device_sum<T>(block, rhs, size); return static_cast<double>(block_sum_rhs) / static_cast<double>(size); } }(); double local_covar = 0; for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { local_covar += (static_cast<double>(lhs[idx]) - mu_l) * (static_cast<double>(rhs[idx]) - mu_r); } cuda::atomic_ref<double, cuda::thread_scope_block> ref{block_covar}; ref.fetch_add(local_covar, cuda::std::memory_order_relaxed); block.sync(); if (block.thread_rank() == 0) { block_covar /= static_cast<double>(size - 1); } block.sync(); return block_covar; } template <typename T> __device__ double BlockVar(T const* data, int64_t size) { return BlockCoVar<T>(data, data, size); } template <typename T> __device__ double BlockStd(T const* data, int64_t size) { auto const var = BlockVar(data, size); return sqrt(var); } template <typename T> __device__ T BlockMax(T const* data, int64_t size) { auto block = cooperative_groups::this_thread_block(); if constexpr (std::is_floating_point_v<T>) { if (are_all_nans(block, data, size)) { return std::numeric_limits<T>::quiet_NaN(); } } auto local_max = cudf::DeviceMax::identity<T>(); __shared__ T block_max; if (block.thread_rank() == 0) { block_max = local_max; } block.sync(); for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { local_max = max(local_max, data[idx]); } cuda::atomic_ref<T, cuda::thread_scope_block> ref{block_max}; ref.fetch_max(local_max, cuda::std::memory_order_relaxed); block.sync(); return block_max; } template <typename T> __device__ T BlockMin(T const* data, int64_t size) { auto block = cooperative_groups::this_thread_block(); if constexpr (std::is_floating_point_v<T>) { if (are_all_nans(block, data, size)) { return std::numeric_limits<T>::quiet_NaN(); } } auto local_min = cudf::DeviceMin::identity<T>(); __shared__ T block_min; if (block.thread_rank() == 0) { block_min = local_min; } block.sync(); for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { local_min = min(local_min, data[idx]); } cuda::atomic_ref<T, cuda::thread_scope_block> ref{block_min}; ref.fetch_min(local_min, cuda::std::memory_order_relaxed); block.sync(); return block_min; } template <typename T> __device__ int64_t BlockIdxMax(T const* data, int64_t* index, int64_t size) { auto block = cooperative_groups::this_thread_block(); __shared__ T block_max; __shared__ int64_t block_idx_max; __shared__ bool found_max; auto local_max = cudf::DeviceMax::identity<T>(); auto local_idx_max = cudf::DeviceMin::identity<int64_t>(); if (block.thread_rank() == 0) { block_max = local_max; block_idx_max = local_idx_max; found_max = false; } block.sync(); for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { auto const current_data = data[idx]; if (current_data > local_max) { local_max = current_data; local_idx_max = index[idx]; found_max = true; } } cuda::atomic_ref<T, cuda::thread_scope_block> ref{block_max}; ref.fetch_max(local_max, cuda::std::memory_order_relaxed); block.sync(); if (found_max) { if (local_max == block_max) { cuda::atomic_ref<int64_t, cuda::thread_scope_block> ref_idx{block_idx_max}; ref_idx.fetch_min(local_idx_max, cuda::std::memory_order_relaxed); } } else { if (block.thread_rank() == 0) { block_idx_max = index[0]; } } block.sync(); return block_idx_max; } template <typename T> __device__ int64_t BlockIdxMin(T const* data, int64_t* index, int64_t size) { auto block = cooperative_groups::this_thread_block(); __shared__ T block_min; __shared__ int64_t block_idx_min; __shared__ bool found_min; auto local_min = cudf::DeviceMin::identity<T>(); auto local_idx_min = cudf::DeviceMin::identity<int64_t>(); if (block.thread_rank() == 0) { block_min = local_min; block_idx_min = local_idx_min; found_min = false; } block.sync(); for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { auto const current_data = data[idx]; if (current_data < local_min) { local_min = current_data; local_idx_min = index[idx]; found_min = true; } } cuda::atomic_ref<T, cuda::thread_scope_block> ref{block_min}; ref.fetch_min(local_min, cuda::std::memory_order_relaxed); block.sync(); if (found_min) { if (local_min == block_min) { cuda::atomic_ref<int64_t, cuda::thread_scope_block> ref_idx{block_idx_min}; ref_idx.fetch_min(local_idx_min, cuda::std::memory_order_relaxed); } } else { if (block.thread_rank() == 0) { block_idx_min = index[0]; } } block.sync(); return block_idx_min; } template <typename T> __device__ double BlockCorr(T* const lhs_ptr, T* const rhs_ptr, int64_t size) { auto numerator = BlockCoVar(lhs_ptr, rhs_ptr, size); auto denominator = BlockStd(lhs_ptr, size) * BlockStd<T>(rhs_ptr, size); if (denominator == 0.0) { return std::numeric_limits<double>::quiet_NaN(); } else { return numerator / denominator; } } extern "C" { #define make_definition(name, cname, type, return_type) \ __device__ int name##_##cname(return_type* numba_return_value, type* const data, int64_t size) \ { \ return_type const res = name<type>(data, size); \ *numba_return_value = res; \ __syncthreads(); \ return 0; \ } make_definition(BlockSum, int32, int32_t, int64_t); make_definition(BlockSum, int64, int64_t, int64_t); make_definition(BlockSum, float32, float, float); make_definition(BlockSum, float64, double, double); make_definition(BlockMean, int32, int32_t, double); make_definition(BlockMean, int64, int64_t, double); make_definition(BlockMean, float32, float, float); make_definition(BlockMean, float64, double, double); make_definition(BlockStd, int32, int32_t, double); make_definition(BlockStd, int64, int64_t, double); make_definition(BlockStd, float32, float, float); make_definition(BlockStd, float64, double, double); make_definition(BlockVar, int64, int64_t, double); make_definition(BlockVar, int32, int32_t, double); make_definition(BlockVar, float32, float, float); make_definition(BlockVar, float64, double, double); make_definition(BlockMin, int32, int32_t, int32_t); make_definition(BlockMin, int64, int64_t, int64_t); make_definition(BlockMin, float32, float, float); make_definition(BlockMin, float64, double, double); make_definition(BlockMax, int32, int32_t, int32_t); make_definition(BlockMax, int64, int64_t, int64_t); make_definition(BlockMax, float32, float, float); make_definition(BlockMax, float64, double, double); #undef make_definition } extern "C" { #define make_definition_idx(name, cname, type) \ __device__ int name##_##cname( \ int64_t* numba_return_value, type* const data, int64_t* index, int64_t size) \ { \ auto const res = name<type>(data, index, size); \ *numba_return_value = res; \ __syncthreads(); \ return 0; \ } make_definition_idx(BlockIdxMin, int32, int32_t); make_definition_idx(BlockIdxMin, int64, int64_t); make_definition_idx(BlockIdxMin, float32, float); make_definition_idx(BlockIdxMin, float64, double); make_definition_idx(BlockIdxMax, int32, int32_t); make_definition_idx(BlockIdxMax, int64, int64_t); make_definition_idx(BlockIdxMax, float32, float); make_definition_idx(BlockIdxMax, float64, double); #undef make_definition_idx } extern "C" { #define make_definition_corr(name, cname, type) \ __device__ int name##_##cname##_##cname( \ double* numba_return_value, type* const lhs, type* const rhs, int64_t size) \ { \ double const res = name<type>(lhs, rhs, size); \ *numba_return_value = res; \ __syncthreads(); \ return 0; \ } make_definition_corr(BlockCorr, int32, int32_t); make_definition_corr(BlockCorr, int64, int64_t); #undef make_definition_corr }
9b5dc0a9c5236c88f10bb07c6a5b943664dfa1f8.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/detail/utilities/device_operators.cuh> #include <cudf/strings/udf/case.cuh> #include <cudf/strings/udf/char_types.cuh> #include <cudf/strings/udf/replace.cuh> #include <cudf/strings/udf/search.cuh> #include <cudf/strings/udf/starts_with.cuh> #include <cudf/strings/udf/strip.cuh> #include <cudf/strings/udf/udf_string.cuh> #include <cuda/atomic> #include <cooperative_groups.h> #include <limits> #include <type_traits> using namespace cudf::strings::udf; extern "C" __device__ int len(int* nb_retval, void const* str) { auto sv = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = sv->length(); return 0; } extern "C" __device__ int startswith(bool* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = starts_with(*str_view, *substr_view); return 0; } extern "C" __device__ int endswith(bool* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = ends_with(*str_view, *substr_view); return 0; } extern "C" __device__ int contains(bool* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = (str_view->find(*substr_view) != cudf::string_view::npos); return 0; } extern "C" __device__ int find(int* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = str_view->find(*substr_view); return 0; } extern "C" __device__ int rfind(int* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = str_view->rfind(*substr_view); return 0; } extern "C" __device__ int eq(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view == *rhs_view); return 0; } extern "C" __device__ int ne(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view != *rhs_view); return 0; } extern "C" __device__ int ge(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view >= *rhs_view); return 0; } extern "C" __device__ int le(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view <= *rhs_view); return 0; } extern "C" __device__ int gt(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view > *rhs_view); return 0; } extern "C" __device__ int lt(bool* nb_retval, void const* str, void const* rhs) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto rhs_view = reinterpret_cast<cudf::string_view const*>(rhs); *nb_retval = (*str_view < *rhs_view); return 0; } extern "C" __device__ int pyislower(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_lower( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisupper(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_upper( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisspace(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_space( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisdecimal(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_decimal( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisnumeric(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_numeric( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisdigit(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_digit( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisalnum(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_alpha_numeric( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyisalpha(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_alpha( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pyistitle(bool* nb_retval, void const* str, std::uintptr_t chars_table) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); *nb_retval = is_title( reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(chars_table), *str_view); return 0; } extern "C" __device__ int pycount(int* nb_retval, void const* str, void const* substr) { auto str_view = reinterpret_cast<cudf::string_view const*>(str); auto substr_view = reinterpret_cast<cudf::string_view const*>(substr); *nb_retval = count(*str_view, *substr_view); return 0; } extern "C" __device__ int udf_string_from_string_view(int* nb_retbal, void const* str, void* udf_str) { auto str_view_ptr = reinterpret_cast<cudf::string_view const*>(str); auto udf_str_ptr = new (udf_str) udf_string; *udf_str_ptr = udf_string(*str_view_ptr); return 0; } extern "C" __device__ int string_view_from_udf_string(int* nb_retval, void const* udf_str, void* str) { auto udf_str_ptr = reinterpret_cast<udf_string const*>(udf_str); auto sv_ptr = new (str) cudf::string_view; *sv_ptr = cudf::string_view(*udf_str_ptr); return 0; } extern "C" __device__ int strip(int* nb_retval, void* udf_str, void* const* to_strip, void* const* strip_str) { auto to_strip_ptr = reinterpret_cast<cudf::string_view const*>(to_strip); auto strip_str_ptr = reinterpret_cast<cudf::string_view const*>(strip_str); auto udf_str_ptr = new (udf_str) udf_string; *udf_str_ptr = strip(*to_strip_ptr, *strip_str_ptr); return 0; } extern "C" __device__ int lstrip(int* nb_retval, void* udf_str, void* const* to_strip, void* const* strip_str) { auto to_strip_ptr = reinterpret_cast<cudf::string_view const*>(to_strip); auto strip_str_ptr = reinterpret_cast<cudf::string_view const*>(strip_str); auto udf_str_ptr = new (udf_str) udf_string; *udf_str_ptr = strip(*to_strip_ptr, *strip_str_ptr, cudf::strings::side_type::LEFT); return 0; } extern "C" __device__ int rstrip(int* nb_retval, void* udf_str, void* const* to_strip, void* const* strip_str) { auto to_strip_ptr = reinterpret_cast<cudf::string_view const*>(to_strip); auto strip_str_ptr = reinterpret_cast<cudf::string_view const*>(strip_str); auto udf_str_ptr = new (udf_str) udf_string; *udf_str_ptr = strip(*to_strip_ptr, *strip_str_ptr, cudf::strings::side_type::RIGHT); return 0; } extern "C" __device__ int upper(int* nb_retval, void* udf_str, void const* st, std::uintptr_t flags_table, std::uintptr_t cases_table, std::uintptr_t special_table) { auto udf_str_ptr = new (udf_str) udf_string; auto st_ptr = reinterpret_cast<cudf::string_view const*>(st); auto flags_table_ptr = reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(flags_table); auto cases_table_ptr = reinterpret_cast<cudf::strings::detail::character_cases_table_type*>(cases_table); auto special_table_ptr = reinterpret_cast<cudf::strings::detail::special_case_mapping*>(special_table); cudf::strings::udf::chars_tables tables{flags_table_ptr, cases_table_ptr, special_table_ptr}; *udf_str_ptr = to_upper(tables, *st_ptr); return 0; } extern "C" __device__ int lower(int* nb_retval, void* udf_str, void const* st, std::uintptr_t flags_table, std::uintptr_t cases_table, std::uintptr_t special_table) { auto udf_str_ptr = new (udf_str) udf_string; auto st_ptr = reinterpret_cast<cudf::string_view const*>(st); auto flags_table_ptr = reinterpret_cast<cudf::strings::detail::character_flags_table_type*>(flags_table); auto cases_table_ptr = reinterpret_cast<cudf::strings::detail::character_cases_table_type*>(cases_table); auto special_table_ptr = reinterpret_cast<cudf::strings::detail::special_case_mapping*>(special_table); cudf::strings::udf::chars_tables tables{flags_table_ptr, cases_table_ptr, special_table_ptr}; *udf_str_ptr = to_lower(tables, *st_ptr); return 0; } extern "C" __device__ int concat(int* nb_retval, void* udf_str, void* const* lhs, void* const* rhs) { auto lhs_ptr = reinterpret_cast<cudf::string_view const*>(lhs); auto rhs_ptr = reinterpret_cast<cudf::string_view const*>(rhs); auto udf_str_ptr = new (udf_str) udf_string; udf_string result; result.append(*lhs_ptr).append(*rhs_ptr); *udf_str_ptr = result; return 0; } extern "C" __device__ int replace( int* nb_retval, void* udf_str, void* const src, void* const to_replace, void* const replacement) { auto src_ptr = reinterpret_cast<cudf::string_view const*>(src); auto to_replace_ptr = reinterpret_cast<cudf::string_view const*>(to_replace); auto replacement_ptr = reinterpret_cast<cudf::string_view const*>(replacement); auto udf_str_ptr = new (udf_str) udf_string; *udf_str_ptr = replace(*src_ptr, *to_replace_ptr, *replacement_ptr); return 0; } // Groupby Shim Functions template <typename T> __device__ bool are_all_nans(cooperative_groups::thread_block const& block, T const* data, int64_t size) { // TODO: to be refactored with CG vote functions once // block size is known at build time __shared__ int64_t count; if (block.thread_rank() == 0) { count = 0; } block.sync(); for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { if (not std::isnan(data[idx])) { cuda::atomic_ref<int64_t, cuda::thread_scope_block> ref{count}; ref.fetch_add(1, cuda::std::memory_order_relaxed); break; } } block.sync(); return count == 0; } template <typename T, typename AccumT = std::conditional_t<std::is_integral_v<T>, int64_t, T>> __device__ AccumT device_sum(cooperative_groups::thread_block const& block, T const* data, int64_t size) { __shared__ AccumT block_sum; if (block.thread_rank() == 0) { block_sum = 0; } block.sync(); AccumT local_sum = 0; for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { local_sum += static_cast<AccumT>(data[idx]); } cuda::atomic_ref<AccumT, cuda::thread_scope_block> ref{block_sum}; ref.fetch_add(local_sum, cuda::std::memory_order_relaxed); block.sync(); return block_sum; } template <typename T, typename AccumT = std::conditional_t<std::is_integral_v<T>, int64_t, T>> __device__ AccumT BlockSum(T const* data, int64_t size) { auto block = cooperative_groups::this_thread_block(); if constexpr (std::is_floating_point_v<T>) { if (are_all_nans(block, data, size)) { return 0; } } auto block_sum = device_sum<T>(block, data, size); return block_sum; } template <typename T> __device__ double BlockMean(T const* data, int64_t size) { auto block = cooperative_groups::this_thread_block(); auto block_sum = device_sum<T>(block, data, size); return static_cast<double>(block_sum) / static_cast<double>(size); } template <typename T> __device__ double BlockCoVar(T const* lhs, T const* rhs, int64_t size) { auto block = cooperative_groups::this_thread_block(); __shared__ double block_covar; if (block.thread_rank() == 0) { block_covar = 0; } block.sync(); auto block_sum_lhs = device_sum<T>(block, lhs, size); auto const mu_l = static_cast<double>(block_sum_lhs) / static_cast<double>(size); auto const mu_r = [=]() { if (lhs == rhs) { // If the lhs and rhs are the same, this is calculating variance. // Thus we can assume mu_r = mu_l. return mu_l; } else { auto block_sum_rhs = device_sum<T>(block, rhs, size); return static_cast<double>(block_sum_rhs) / static_cast<double>(size); } }(); double local_covar = 0; for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { local_covar += (static_cast<double>(lhs[idx]) - mu_l) * (static_cast<double>(rhs[idx]) - mu_r); } cuda::atomic_ref<double, cuda::thread_scope_block> ref{block_covar}; ref.fetch_add(local_covar, cuda::std::memory_order_relaxed); block.sync(); if (block.thread_rank() == 0) { block_covar /= static_cast<double>(size - 1); } block.sync(); return block_covar; } template <typename T> __device__ double BlockVar(T const* data, int64_t size) { return BlockCoVar<T>(data, data, size); } template <typename T> __device__ double BlockStd(T const* data, int64_t size) { auto const var = BlockVar(data, size); return sqrt(var); } template <typename T> __device__ T BlockMax(T const* data, int64_t size) { auto block = cooperative_groups::this_thread_block(); if constexpr (std::is_floating_point_v<T>) { if (are_all_nans(block, data, size)) { return std::numeric_limits<T>::quiet_NaN(); } } auto local_max = cudf::DeviceMax::identity<T>(); __shared__ T block_max; if (block.thread_rank() == 0) { block_max = local_max; } block.sync(); for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { local_max = max(local_max, data[idx]); } cuda::atomic_ref<T, cuda::thread_scope_block> ref{block_max}; ref.fetch_max(local_max, cuda::std::memory_order_relaxed); block.sync(); return block_max; } template <typename T> __device__ T BlockMin(T const* data, int64_t size) { auto block = cooperative_groups::this_thread_block(); if constexpr (std::is_floating_point_v<T>) { if (are_all_nans(block, data, size)) { return std::numeric_limits<T>::quiet_NaN(); } } auto local_min = cudf::DeviceMin::identity<T>(); __shared__ T block_min; if (block.thread_rank() == 0) { block_min = local_min; } block.sync(); for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { local_min = min(local_min, data[idx]); } cuda::atomic_ref<T, cuda::thread_scope_block> ref{block_min}; ref.fetch_min(local_min, cuda::std::memory_order_relaxed); block.sync(); return block_min; } template <typename T> __device__ int64_t BlockIdxMax(T const* data, int64_t* index, int64_t size) { auto block = cooperative_groups::this_thread_block(); __shared__ T block_max; __shared__ int64_t block_idx_max; __shared__ bool found_max; auto local_max = cudf::DeviceMax::identity<T>(); auto local_idx_max = cudf::DeviceMin::identity<int64_t>(); if (block.thread_rank() == 0) { block_max = local_max; block_idx_max = local_idx_max; found_max = false; } block.sync(); for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { auto const current_data = data[idx]; if (current_data > local_max) { local_max = current_data; local_idx_max = index[idx]; found_max = true; } } cuda::atomic_ref<T, cuda::thread_scope_block> ref{block_max}; ref.fetch_max(local_max, cuda::std::memory_order_relaxed); block.sync(); if (found_max) { if (local_max == block_max) { cuda::atomic_ref<int64_t, cuda::thread_scope_block> ref_idx{block_idx_max}; ref_idx.fetch_min(local_idx_max, cuda::std::memory_order_relaxed); } } else { if (block.thread_rank() == 0) { block_idx_max = index[0]; } } block.sync(); return block_idx_max; } template <typename T> __device__ int64_t BlockIdxMin(T const* data, int64_t* index, int64_t size) { auto block = cooperative_groups::this_thread_block(); __shared__ T block_min; __shared__ int64_t block_idx_min; __shared__ bool found_min; auto local_min = cudf::DeviceMin::identity<T>(); auto local_idx_min = cudf::DeviceMin::identity<int64_t>(); if (block.thread_rank() == 0) { block_min = local_min; block_idx_min = local_idx_min; found_min = false; } block.sync(); for (int64_t idx = block.thread_rank(); idx < size; idx += block.size()) { auto const current_data = data[idx]; if (current_data < local_min) { local_min = current_data; local_idx_min = index[idx]; found_min = true; } } cuda::atomic_ref<T, cuda::thread_scope_block> ref{block_min}; ref.fetch_min(local_min, cuda::std::memory_order_relaxed); block.sync(); if (found_min) { if (local_min == block_min) { cuda::atomic_ref<int64_t, cuda::thread_scope_block> ref_idx{block_idx_min}; ref_idx.fetch_min(local_idx_min, cuda::std::memory_order_relaxed); } } else { if (block.thread_rank() == 0) { block_idx_min = index[0]; } } block.sync(); return block_idx_min; } template <typename T> __device__ double BlockCorr(T* const lhs_ptr, T* const rhs_ptr, int64_t size) { auto numerator = BlockCoVar(lhs_ptr, rhs_ptr, size); auto denominator = BlockStd(lhs_ptr, size) * BlockStd<T>(rhs_ptr, size); if (denominator == 0.0) { return std::numeric_limits<double>::quiet_NaN(); } else { return numerator / denominator; } } extern "C" { #define make_definition(name, cname, type, return_type) \ __device__ int name##_##cname(return_type* numba_return_value, type* const data, int64_t size) \ { \ return_type const res = name<type>(data, size); \ *numba_return_value = res; \ __syncthreads(); \ return 0; \ } make_definition(BlockSum, int32, int32_t, int64_t); make_definition(BlockSum, int64, int64_t, int64_t); make_definition(BlockSum, float32, float, float); make_definition(BlockSum, float64, double, double); make_definition(BlockMean, int32, int32_t, double); make_definition(BlockMean, int64, int64_t, double); make_definition(BlockMean, float32, float, float); make_definition(BlockMean, float64, double, double); make_definition(BlockStd, int32, int32_t, double); make_definition(BlockStd, int64, int64_t, double); make_definition(BlockStd, float32, float, float); make_definition(BlockStd, float64, double, double); make_definition(BlockVar, int64, int64_t, double); make_definition(BlockVar, int32, int32_t, double); make_definition(BlockVar, float32, float, float); make_definition(BlockVar, float64, double, double); make_definition(BlockMin, int32, int32_t, int32_t); make_definition(BlockMin, int64, int64_t, int64_t); make_definition(BlockMin, float32, float, float); make_definition(BlockMin, float64, double, double); make_definition(BlockMax, int32, int32_t, int32_t); make_definition(BlockMax, int64, int64_t, int64_t); make_definition(BlockMax, float32, float, float); make_definition(BlockMax, float64, double, double); #undef make_definition } extern "C" { #define make_definition_idx(name, cname, type) \ __device__ int name##_##cname( \ int64_t* numba_return_value, type* const data, int64_t* index, int64_t size) \ { \ auto const res = name<type>(data, index, size); \ *numba_return_value = res; \ __syncthreads(); \ return 0; \ } make_definition_idx(BlockIdxMin, int32, int32_t); make_definition_idx(BlockIdxMin, int64, int64_t); make_definition_idx(BlockIdxMin, float32, float); make_definition_idx(BlockIdxMin, float64, double); make_definition_idx(BlockIdxMax, int32, int32_t); make_definition_idx(BlockIdxMax, int64, int64_t); make_definition_idx(BlockIdxMax, float32, float); make_definition_idx(BlockIdxMax, float64, double); #undef make_definition_idx } extern "C" { #define make_definition_corr(name, cname, type) \ __device__ int name##_##cname##_##cname( \ double* numba_return_value, type* const lhs, type* const rhs, int64_t size) \ { \ double const res = name<type>(lhs, rhs, size); \ *numba_return_value = res; \ __syncthreads(); \ return 0; \ } make_definition_corr(BlockCorr, int32, int32_t); make_definition_corr(BlockCorr, int64, int64_t); #undef make_definition_corr }
f04f5156e1a4279a7ea6e78e7000bf22ffc5f56e.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2017-2020 XGBoost contributors */ #include <gtest/gtest.h> #include <dmlc/filesystem.h> #include <xgboost/c_api.h> #include <xgboost/predictor.h> #include <xgboost/logging.h> #include <xgboost/learner.h> #include <string> #include "../helpers.h" #include "../../../src/gbm/gbtree_model.h" #include "../../../src/data/device_adapter.cuh" #include "test_predictor.h" namespace xgboost { namespace predictor { TEST(GPUPredictor, Basic) { auto cpu_lparam = CreateEmptyGenericParam(-1); auto gpu_lparam = CreateEmptyGenericParam(0); std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam)); std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &cpu_lparam)); gpu_predictor->Configure({}); cpu_predictor->Configure({}); for (size_t i = 1; i < 33; i *= 2) { int n_row = i, n_col = i; auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix(); LearnerModelParam param; param.num_feature = n_col; param.num_output_group = 1; param.base_score = 0.5; gbm::GBTreeModel model = CreateTestModel(&param); // Test predict batch PredictionCacheEntry gpu_out_predictions; PredictionCacheEntry cpu_out_predictions; gpu_predictor->PredictBatch(dmat.get(), &gpu_out_predictions, model, 0); ASSERT_EQ(model.trees.size(), gpu_out_predictions.version); cpu_predictor->PredictBatch(dmat.get(), &cpu_out_predictions, model, 0); std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.predictions.HostVector(); std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.predictions.HostVector(); float abs_tolerance = 0.001; for (int j = 0; j < gpu_out_predictions.predictions.Size(); j++) { ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance); } } } TEST(GPUPredictor, EllpackBasic) { size_t constexpr kCols {8}; for (size_t bins = 2; bins < 258; bins += 16) { size_t rows = bins * 16; auto p_m = RandomDataGenerator{rows, kCols, 0.0} .Bins(bins) .Device(0) .GenerateDeviceDMatrix(true); TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", rows, kCols, p_m); TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", bins, kCols, p_m); } } TEST(GPUPredictor, EllpackTraining) { size_t constexpr kRows { 128 }, kCols { 16 }, kBins { 64 }; auto p_ellpack = RandomDataGenerator{kRows, kCols, 0.0} .Bins(kBins) .Device(0) .GenerateDeviceDMatrix(true); HostDeviceVector<float> storage(kRows * kCols); auto columnar = RandomDataGenerator{kRows, kCols, 0.0} .Device(0) .GenerateArrayInterface(&storage); auto adapter = data::CupyAdapter(columnar); std::shared_ptr<DMatrix> p_full { DMatrix::Create(&adapter, std::numeric_limits<float>::quiet_NaN(), 1) }; TestTrainingPrediction(kRows, kBins, "gpu_hist", p_full, p_ellpack); } TEST(GPUPredictor, ExternalMemoryTest) { auto lparam = CreateEmptyGenericParam(0); std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam)); gpu_predictor->Configure({}); LearnerModelParam param; param.num_feature = 5; const int n_classes = 3; param.num_output_group = n_classes; param.base_score = 0.5; gbm::GBTreeModel model = CreateTestModel(&param, n_classes); std::vector<std::unique_ptr<DMatrix>> dmats; dmlc::TemporaryDirectory tmpdir; std::string file0 = tmpdir.path + "/big_0.libsvm"; std::string file1 = tmpdir.path + "/big_1.libsvm"; std::string file2 = tmpdir.path + "/big_2.libsvm"; dmats.push_back(CreateSparsePageDMatrix(9, 64UL, file0)); dmats.push_back(CreateSparsePageDMatrix(128, 128UL, file1)); dmats.push_back(CreateSparsePageDMatrix(1024, 1024UL, file2)); for (const auto& dmat: dmats) { dmat->Info().base_margin_.Resize(dmat->Info().num_row_ * n_classes, 0.5); PredictionCacheEntry out_predictions; gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0); EXPECT_EQ(out_predictions.predictions.Size(), dmat->Info().num_row_ * n_classes); const std::vector<float> &host_vector = out_predictions.predictions.ConstHostVector(); for (int i = 0; i < host_vector.size() / n_classes; i++) { ASSERT_EQ(host_vector[i * n_classes], 2.0); ASSERT_EQ(host_vector[i * n_classes + 1], 0.5); ASSERT_EQ(host_vector[i * n_classes + 2], 0.5); } } } TEST(GPUPredictor, InplacePredictCupy) { size_t constexpr kRows{128}, kCols{64}; RandomDataGenerator gen(kRows, kCols, 0.5); gen.Device(0); HostDeviceVector<float> data; std::string interface_str = gen.GenerateArrayInterface(&data); auto x = std::make_shared<data::CupyAdapter>(interface_str); TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0); } TEST(GPUPredictor, InplacePredictCuDF) { size_t constexpr kRows{128}, kCols{64}; RandomDataGenerator gen(kRows, kCols, 0.5); gen.Device(0); std::vector<HostDeviceVector<float>> storage(kCols); auto interface_str = gen.GenerateColumnarArrayInterface(&storage); auto x = std::make_shared<data::CudfAdapter>(interface_str); TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0); } TEST(GPUPredictor, MGPU_InplacePredict) { // NOLINT int32_t n_gpus = xgboost::common::AllVisibleGPUs(); if (n_gpus <= 1) { LOG(WARNING) << "GPUPredictor.MGPU_InplacePredict is skipped."; return; } size_t constexpr kRows{128}, kCols{64}; RandomDataGenerator gen(kRows, kCols, 0.5); gen.Device(1); HostDeviceVector<float> data; std::string interface_str = gen.GenerateArrayInterface(&data); auto x = std::make_shared<data::CupyAdapter>(interface_str); TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 1); EXPECT_THROW(TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0), dmlc::Error); } TEST(GpuPredictor, LesserFeatures) { TestPredictionWithLesserFeatures("gpu_predictor"); } // Very basic test of empty model TEST(GPUPredictor, ShapStump) { hipSetDevice(0); LearnerModelParam param; param.num_feature = 1; param.num_output_group = 1; param.base_score = 0.5; gbm::GBTreeModel model(&param); std::vector<std::unique_ptr<RegTree>> trees; trees.push_back(std::unique_ptr<RegTree>(new RegTree)); model.CommitModel(std::move(trees), 0); auto gpu_lparam = CreateEmptyGenericParam(0); std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam)); gpu_predictor->Configure({}); std::vector<float > phis; auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix(); gpu_predictor->PredictContribution(dmat.get(), &phis, model); EXPECT_EQ(phis[0], 0.0); EXPECT_EQ(phis[1], param.base_score); EXPECT_EQ(phis[2], 0.0); EXPECT_EQ(phis[3], param.base_score); EXPECT_EQ(phis[4], 0.0); EXPECT_EQ(phis[5], param.base_score); } TEST(GPUPredictor, Shap) { LearnerModelParam param; param.num_feature = 1; param.num_output_group = 1; param.base_score = 0.5; gbm::GBTreeModel model(&param); std::vector<std::unique_ptr<RegTree>> trees; trees.push_back(std::unique_ptr<RegTree>(new RegTree)); trees[0]->ExpandNode(0, 0, 0.5, true, 1.0, -1.0, 1.0, 0.0, 5.0, 2.0, 3.0); model.CommitModel(std::move(trees), 0); auto gpu_lparam = CreateEmptyGenericParam(0); auto cpu_lparam = CreateEmptyGenericParam(-1); std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam)); std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &cpu_lparam)); gpu_predictor->Configure({}); cpu_predictor->Configure({}); std::vector<float > phis; std::vector<float > cpu_phis; auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix(); gpu_predictor->PredictContribution(dmat.get(), &phis, model); cpu_predictor->PredictContribution(dmat.get(), &cpu_phis, model); for(auto i = 0ull; i < phis.size(); i++) { EXPECT_NEAR(cpu_phis[i], phis[i], 1e-3); } } } // namespace predictor } // namespace xgboost
f04f5156e1a4279a7ea6e78e7000bf22ffc5f56e.cu
/*! * Copyright 2017-2020 XGBoost contributors */ #include <gtest/gtest.h> #include <dmlc/filesystem.h> #include <xgboost/c_api.h> #include <xgboost/predictor.h> #include <xgboost/logging.h> #include <xgboost/learner.h> #include <string> #include "../helpers.h" #include "../../../src/gbm/gbtree_model.h" #include "../../../src/data/device_adapter.cuh" #include "test_predictor.h" namespace xgboost { namespace predictor { TEST(GPUPredictor, Basic) { auto cpu_lparam = CreateEmptyGenericParam(-1); auto gpu_lparam = CreateEmptyGenericParam(0); std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam)); std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &cpu_lparam)); gpu_predictor->Configure({}); cpu_predictor->Configure({}); for (size_t i = 1; i < 33; i *= 2) { int n_row = i, n_col = i; auto dmat = RandomDataGenerator(n_row, n_col, 0).GenerateDMatrix(); LearnerModelParam param; param.num_feature = n_col; param.num_output_group = 1; param.base_score = 0.5; gbm::GBTreeModel model = CreateTestModel(&param); // Test predict batch PredictionCacheEntry gpu_out_predictions; PredictionCacheEntry cpu_out_predictions; gpu_predictor->PredictBatch(dmat.get(), &gpu_out_predictions, model, 0); ASSERT_EQ(model.trees.size(), gpu_out_predictions.version); cpu_predictor->PredictBatch(dmat.get(), &cpu_out_predictions, model, 0); std::vector<float>& gpu_out_predictions_h = gpu_out_predictions.predictions.HostVector(); std::vector<float>& cpu_out_predictions_h = cpu_out_predictions.predictions.HostVector(); float abs_tolerance = 0.001; for (int j = 0; j < gpu_out_predictions.predictions.Size(); j++) { ASSERT_NEAR(gpu_out_predictions_h[j], cpu_out_predictions_h[j], abs_tolerance); } } } TEST(GPUPredictor, EllpackBasic) { size_t constexpr kCols {8}; for (size_t bins = 2; bins < 258; bins += 16) { size_t rows = bins * 16; auto p_m = RandomDataGenerator{rows, kCols, 0.0} .Bins(bins) .Device(0) .GenerateDeviceDMatrix(true); TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", rows, kCols, p_m); TestPredictionFromGradientIndex<EllpackPage>("gpu_predictor", bins, kCols, p_m); } } TEST(GPUPredictor, EllpackTraining) { size_t constexpr kRows { 128 }, kCols { 16 }, kBins { 64 }; auto p_ellpack = RandomDataGenerator{kRows, kCols, 0.0} .Bins(kBins) .Device(0) .GenerateDeviceDMatrix(true); HostDeviceVector<float> storage(kRows * kCols); auto columnar = RandomDataGenerator{kRows, kCols, 0.0} .Device(0) .GenerateArrayInterface(&storage); auto adapter = data::CupyAdapter(columnar); std::shared_ptr<DMatrix> p_full { DMatrix::Create(&adapter, std::numeric_limits<float>::quiet_NaN(), 1) }; TestTrainingPrediction(kRows, kBins, "gpu_hist", p_full, p_ellpack); } TEST(GPUPredictor, ExternalMemoryTest) { auto lparam = CreateEmptyGenericParam(0); std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &lparam)); gpu_predictor->Configure({}); LearnerModelParam param; param.num_feature = 5; const int n_classes = 3; param.num_output_group = n_classes; param.base_score = 0.5; gbm::GBTreeModel model = CreateTestModel(&param, n_classes); std::vector<std::unique_ptr<DMatrix>> dmats; dmlc::TemporaryDirectory tmpdir; std::string file0 = tmpdir.path + "/big_0.libsvm"; std::string file1 = tmpdir.path + "/big_1.libsvm"; std::string file2 = tmpdir.path + "/big_2.libsvm"; dmats.push_back(CreateSparsePageDMatrix(9, 64UL, file0)); dmats.push_back(CreateSparsePageDMatrix(128, 128UL, file1)); dmats.push_back(CreateSparsePageDMatrix(1024, 1024UL, file2)); for (const auto& dmat: dmats) { dmat->Info().base_margin_.Resize(dmat->Info().num_row_ * n_classes, 0.5); PredictionCacheEntry out_predictions; gpu_predictor->PredictBatch(dmat.get(), &out_predictions, model, 0); EXPECT_EQ(out_predictions.predictions.Size(), dmat->Info().num_row_ * n_classes); const std::vector<float> &host_vector = out_predictions.predictions.ConstHostVector(); for (int i = 0; i < host_vector.size() / n_classes; i++) { ASSERT_EQ(host_vector[i * n_classes], 2.0); ASSERT_EQ(host_vector[i * n_classes + 1], 0.5); ASSERT_EQ(host_vector[i * n_classes + 2], 0.5); } } } TEST(GPUPredictor, InplacePredictCupy) { size_t constexpr kRows{128}, kCols{64}; RandomDataGenerator gen(kRows, kCols, 0.5); gen.Device(0); HostDeviceVector<float> data; std::string interface_str = gen.GenerateArrayInterface(&data); auto x = std::make_shared<data::CupyAdapter>(interface_str); TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0); } TEST(GPUPredictor, InplacePredictCuDF) { size_t constexpr kRows{128}, kCols{64}; RandomDataGenerator gen(kRows, kCols, 0.5); gen.Device(0); std::vector<HostDeviceVector<float>> storage(kCols); auto interface_str = gen.GenerateColumnarArrayInterface(&storage); auto x = std::make_shared<data::CudfAdapter>(interface_str); TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0); } TEST(GPUPredictor, MGPU_InplacePredict) { // NOLINT int32_t n_gpus = xgboost::common::AllVisibleGPUs(); if (n_gpus <= 1) { LOG(WARNING) << "GPUPredictor.MGPU_InplacePredict is skipped."; return; } size_t constexpr kRows{128}, kCols{64}; RandomDataGenerator gen(kRows, kCols, 0.5); gen.Device(1); HostDeviceVector<float> data; std::string interface_str = gen.GenerateArrayInterface(&data); auto x = std::make_shared<data::CupyAdapter>(interface_str); TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 1); EXPECT_THROW(TestInplacePrediction(x, "gpu_predictor", kRows, kCols, 0), dmlc::Error); } TEST(GpuPredictor, LesserFeatures) { TestPredictionWithLesserFeatures("gpu_predictor"); } // Very basic test of empty model TEST(GPUPredictor, ShapStump) { cudaSetDevice(0); LearnerModelParam param; param.num_feature = 1; param.num_output_group = 1; param.base_score = 0.5; gbm::GBTreeModel model(&param); std::vector<std::unique_ptr<RegTree>> trees; trees.push_back(std::unique_ptr<RegTree>(new RegTree)); model.CommitModel(std::move(trees), 0); auto gpu_lparam = CreateEmptyGenericParam(0); std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam)); gpu_predictor->Configure({}); std::vector<float > phis; auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix(); gpu_predictor->PredictContribution(dmat.get(), &phis, model); EXPECT_EQ(phis[0], 0.0); EXPECT_EQ(phis[1], param.base_score); EXPECT_EQ(phis[2], 0.0); EXPECT_EQ(phis[3], param.base_score); EXPECT_EQ(phis[4], 0.0); EXPECT_EQ(phis[5], param.base_score); } TEST(GPUPredictor, Shap) { LearnerModelParam param; param.num_feature = 1; param.num_output_group = 1; param.base_score = 0.5; gbm::GBTreeModel model(&param); std::vector<std::unique_ptr<RegTree>> trees; trees.push_back(std::unique_ptr<RegTree>(new RegTree)); trees[0]->ExpandNode(0, 0, 0.5, true, 1.0, -1.0, 1.0, 0.0, 5.0, 2.0, 3.0); model.CommitModel(std::move(trees), 0); auto gpu_lparam = CreateEmptyGenericParam(0); auto cpu_lparam = CreateEmptyGenericParam(-1); std::unique_ptr<Predictor> gpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("gpu_predictor", &gpu_lparam)); std::unique_ptr<Predictor> cpu_predictor = std::unique_ptr<Predictor>(Predictor::Create("cpu_predictor", &cpu_lparam)); gpu_predictor->Configure({}); cpu_predictor->Configure({}); std::vector<float > phis; std::vector<float > cpu_phis; auto dmat = RandomDataGenerator(3, 1, 0).GenerateDMatrix(); gpu_predictor->PredictContribution(dmat.get(), &phis, model); cpu_predictor->PredictContribution(dmat.get(), &cpu_phis, model); for(auto i = 0ull; i < phis.size(); i++) { EXPECT_NEAR(cpu_phis[i], phis[i], 1e-3); } } } // namespace predictor } // namespace xgboost
3aa2de52a77fcef9806699b908eb3316127d17bb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2018,2019,2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/matrix_diag_part.hpp> #include <nbla/cuda/math.hpp> #include <nbla/variable.hpp> namespace nbla { template <typename T> __global__ void kernel_matrix_diag_part_forward(const int num, const int last_ndim, T *y, const T *x) { NBLA_CUDA_KERNEL_LOOP(idx, num) { y[idx] = x[idx * last_ndim + idx % last_ndim]; } } template <typename T> __global__ void kernel_matrix_diag_part_backward_accum(const int num, const int last_ndim, T *dx, const T *dy) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx * last_ndim + idx % last_ndim] += dy[idx]; } } template <typename T> __global__ void kernel_matrix_diag_part_backward_nonaccum(const int num, const int last_ndim, T *dx, const T *dy) { NBLA_CUDA_KERNEL_LOOP(idx, num) { int c_idx = idx % last_ndim; // column index in a matrix int dy_idx = idx / last_ndim; // index of an input array int r_idx = dy_idx % last_ndim; // row index in a matrix dx[idx] = (r_idx == c_idx) ? dy[dy_idx] : (T)0.; } } template <typename T> void MatrixDiagPartCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true); size_t size = outputs[0]->size(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_matrix_diag_part_forward, size, this->last_ndim_, y, x) } template <typename T> void MatrixDiagPartCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (!propagate_down[0]) { return; } Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]); const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_); Size_t size = outputs[0]->size(); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_matrix_diag_part_backward_accum<Tc>), size, this->last_ndim_, dx, dy); } else { Size_t size_by_last_ndim = size * this->last_ndim_; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_matrix_diag_part_backward_nonaccum<Tc>), size_by_last_ndim, this->last_ndim_, dx, dy); } } } // namespace nbla
3aa2de52a77fcef9806699b908eb3316127d17bb.cu
// Copyright 2018,2019,2020,2021 Sony Corporation. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <nbla/array.hpp> #include <nbla/cuda/common.hpp> #include <nbla/cuda/function/matrix_diag_part.hpp> #include <nbla/cuda/math.hpp> #include <nbla/variable.hpp> namespace nbla { template <typename T> __global__ void kernel_matrix_diag_part_forward(const int num, const int last_ndim, T *y, const T *x) { NBLA_CUDA_KERNEL_LOOP(idx, num) { y[idx] = x[idx * last_ndim + idx % last_ndim]; } } template <typename T> __global__ void kernel_matrix_diag_part_backward_accum(const int num, const int last_ndim, T *dx, const T *dy) { NBLA_CUDA_KERNEL_LOOP(idx, num) { dx[idx * last_ndim + idx % last_ndim] += dy[idx]; } } template <typename T> __global__ void kernel_matrix_diag_part_backward_nonaccum(const int num, const int last_ndim, T *dx, const T *dy) { NBLA_CUDA_KERNEL_LOOP(idx, num) { int c_idx = idx % last_ndim; // column index in a matrix int dy_idx = idx / last_ndim; // index of an input array int r_idx = dy_idx % last_ndim; // row index in a matrix dx[idx] = (r_idx == c_idx) ? dy[dy_idx] : (T)0.; } } template <typename T> void MatrixDiagPartCuda<T>::forward_impl(const Variables &inputs, const Variables &outputs) { cuda_set_device(std::stoi(this->ctx_.device_id)); const Tc *x = inputs[0]->get_data_pointer<Tc>(this->ctx_); Tc *y = outputs[0]->cast_data_and_get_pointer<Tc>(this->ctx_, true); size_t size = outputs[0]->size(); NBLA_CUDA_LAUNCH_KERNEL_SIMPLE(kernel_matrix_diag_part_forward, size, this->last_ndim_, y, x) } template <typename T> void MatrixDiagPartCuda<T>::backward_impl(const Variables &inputs, const Variables &outputs, const vector<bool> &propagate_down, const vector<bool> &accum) { cuda_set_device(std::stoi(this->ctx_.device_id)); if (!propagate_down[0]) { return; } Tc *dx = inputs[0]->cast_grad_and_get_pointer<Tc>(this->ctx_, !accum[0]); const Tc *dy = outputs[0]->get_grad_pointer<Tc>(this->ctx_); Size_t size = outputs[0]->size(); if (accum[0]) { NBLA_CUDA_LAUNCH_KERNEL_SIMPLE((kernel_matrix_diag_part_backward_accum<Tc>), size, this->last_ndim_, dx, dy); } else { Size_t size_by_last_ndim = size * this->last_ndim_; NBLA_CUDA_LAUNCH_KERNEL_SIMPLE( (kernel_matrix_diag_part_backward_nonaccum<Tc>), size_by_last_ndim, this->last_ndim_, dx, dy); } } } // namespace nbla
169696f62855bb8326701732bf3311eaa89562c5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include <stdio.h> # include <stdlib.h> # include <hip/hip_runtime.h> # include "rocblas.h" # include "batchCUBLAS.h" # define IDX2C(i,j,ld) ((( j )*( ld ))+( i )) float **gpuA_arr = NULL; float **gpuA0; float **gpuB_arr = NULL; float **gpuB0; float **gpuC_arr = NULL; float **gpuC0; int batchsize; int atomsize; int atomnum; int sparsenum; int blocksize; int *gammaindtab_arr; float *b_arr; float *dict; float *sigs; float *prkdx; float *gamma_arr; #define BATCHSIZE 2048 __global__ void _findmax(const float * inputmatrix, const int matrow, const int matcol, int * outputtable, const int _elnum, const int _sparsenum) { int row = blockIdx.x; int ty = threadIdx.x; int remainlen = matcol; int searchlen; int oddflag = 0; __shared__ float row_vec[BATCHSIZE]; __shared__ int max_index[BATCHSIZE]; __syncthreads(); float maxval = 1e-20; int maxindx = 10086; int startind = 0; int strid; while(remainlen > 0) { __syncthreads(); //compute the start pt from which to fetch data //compute the stride //if not at the end stride = BATCHSIZE/2 //else stride = rest data len /2 //if rest data len is odd set the odd flag if(remainlen >= BATCHSIZE) searchlen = BATCHSIZE; else { searchlen = remainlen; if((int)fmod((float)searchlen,(float)2) == 1) oddflag = 1; } remainlen -= searchlen; strid = searchlen / 2; //load data if( ty < strid) { row_vec[2 * ty] = fabsf(inputmatrix[row * matcol + 2*ty + startind]); max_index[2 * ty] = 2*ty + startind; row_vec[2*ty+1] = fabsf(inputmatrix[row * matcol + 2*ty + 1 + startind]); max_index[2*ty+1] = 2*ty + 1 + startind; } //if odd flag == 1 //add a extreme val at the end of the data array if(oddflag == 1 &&ty == 0) { row_vec[searchlen - 1] = fabsf(inputmatrix[row * matcol + (searchlen - 1) + startind]); max_index[searchlen - 1] = searchlen - 1 + startind; row_vec[searchlen] = 1e-10; max_index[searchlen] = 10086; } if(oddflag == 1) { strid += 1; } //do comparison while(strid >= 1) { __syncthreads(); if(ty < strid) { if(row_vec[ty] > row_vec[ty+strid]) { row_vec[ty] = row_vec[ty]; max_index[ty] = max_index[ty]; } else { row_vec[ty] = row_vec[ty+strid]; max_index[ty] = max_index[ty+strid]; } } if(strid == 1) strid = 0; else strid = (strid - 1)/2 + 1 ; } __syncthreads(); if(ty == 0) { //compare with the max val //renew the max val and max ind if(row_vec[0] > maxval ) { maxval = row_vec[0]; maxindx = max_index[0]; } } startind += searchlen; } if(ty == 0) { outputtable[_elnum + row * _sparsenum] = maxindx; } } //copy atoms in the dictionray to compute array //each thread process one batch __global__ void _buildc( float **dense, //the pointer to the dictionary float *sparse, //the pointer to each batch matrix int * _gammaindtab, //table indicates the atom indice to be copied const int _sparsenum, const int _wrkonsize, const int _atomnum //number of atoms to be copied for each batch ) { //determine the batch and stop some overdue thread int batchind = blockDim.x * blockIdx.x + threadIdx.x; float *srcpt = NULL; float *dstpt = NULL; int atomind; if(batchind < _wrkonsize) { //read the wrkontab and choose the batch matrix pointer srcpt = dense[batchind]; for(int i = 0; i<_atomnum; i++) { *(sparse + batchind * _atomnum + i) = 0; } //enter in the loop for(int i = 0; i < _sparsenum; i++) { //choose the atom pointer atomind = _gammaindtab[batchind * _sparsenum + i]; dstpt = sparse + atomind + batchind * _atomnum; //dst[batchind]; //read *dstpt = srcpt[i]; __syncthreads(); } } } //copy atoms in the dictionray to compute array //each thread process one batch __global__ void _memcpy_atom( float *src, //the pointer to the dictionary float **dst, //the pointer to each batch matrix int * _gammaindtab, //table indicates the atom indice to be copied const int _sparsenum, const int _wrkonsize, const int _atomnum, //number of atoms to be copied for each batch const int _atomsize //atom size ) { //determine the batch and stop some overdue thread int batchind = blockDim.x * blockIdx.x + threadIdx.x; float *srcpt = NULL; float *dstpt = NULL; int atomind; if(batchind < _wrkonsize) { //read the wrkontab and choose the batch matrix pointer dstpt = dst[batchind]; //enter in the loop for(int i = 0; i < _atomnum; i++) { //choose the atom pointer atomind = _gammaindtab[batchind * _sparsenum + i]; srcpt = src + atomind *_atomsize; //read for(int j = 0; j < _atomsize; j++) { dstpt[j] = srcpt[j]; } __syncthreads(); dstpt += _atomsize; } } } __global__ void _memcpy_sig( float **src, //the pointer to the dictionary float *dst, //the pointer to each batch matrix const int _wrkonsize, const int _atomsize //atom size ) { //determine the batch and stop some overdue thread int batchind = blockDim.x * blockIdx.x + threadIdx.x; float *srcpt = NULL; float *dstpt = NULL; if(batchind < _wrkonsize) { //read the wrkontab and choose the batch matrix pointer dstpt = dst + batchind*_atomsize;//dst[batchind]; srcpt = src[batchind]; //read for(int j = 0; j < _atomsize; j++) { dstpt[j] = srcpt[j]; } __syncthreads(); } } //each thread process one batch __global__ void _memcpy_b( float *src, //the pointer to the dictionary float **dst, //the pointer to each batch matrix const int _wrkonsize, const int _atomsize //atom size ) { //determine the batch and stop some overdue thread int batchind = blockDim.x * blockIdx.x + threadIdx.x; float *srcpt = NULL; float *dstpt = NULL; if(batchind < _wrkonsize) { //read the wrkontab and choose the batch matrix pointer dstpt = dst[batchind]; srcpt = src + batchind *_atomsize; //read for(int j = 0; j < _atomsize; j++) { dstpt[j] = srcpt[j]; } __syncthreads(); } } //function calls _memcpy_atom void memcpy_atom(int _blocksize,int elnum) { //compute grid size int blockperGrid = (batchsize - 1)/_blocksize + 1; //do loading hipLaunchKernelGGL(( _memcpy_atom), dim3(blockperGrid),dim3(_blocksize), 0, 0, dict, //the pointer to the dictionary gpuA_arr, //the pointer to each batch matrix gammaindtab_arr, //table indicates the atom indice to be copied sparsenum, batchsize, elnum, //number of atoms to be copied for each batch atomsize //atom size ); } void CLEANUP() { for(int i = 0; i < batchsize; ++i) { if(gpuA0[i]) hipFree(gpuA0[i]); if(gpuB0[i]) hipFree(gpuB0[i]); if(gpuC0[i]) hipFree(gpuC0[i]); } if (gpuA0) free(gpuA0); if (gpuB0) free(gpuB0); if (gpuC0) free(gpuC0); if (gpuA_arr) hipFree(gpuA_arr); if (gpuB_arr) hipFree(gpuB_arr); if (gpuC_arr) hipFree(gpuC_arr); hipFree(dict); hipFree(sigs); hipFree(prkdx); hipFree(gammaindtab_arr); hipFree(b_arr); hipFree(gamma_arr); } hipblasHandle_t handle ; // CUBLAS context int __init(int _atomsize,int _atomnum, int _batchsize, int _sparsenum, int _blocksize) { hipError_t err1, err2, err3; atomsize = _atomsize; atomnum = _atomnum; batchsize = _batchsize; sparsenum = _sparsenum; blocksize = _blocksize; gpuA0 = (float **)malloc(sizeof(*gpuA0)*batchsize); gpuB0 = (float **)malloc(sizeof(*gpuB0)*batchsize); gpuC0 = (float **)malloc(sizeof(*gpuB0)*batchsize); for(int i = 0; i < batchsize; i++) { err1 = hipMalloc((void **)&gpuA0[i], sizeof(gpuA0[0][0]) * atomsize * atomnum); err2 = hipMalloc((void **)&gpuB0[i], sizeof(gpuB0[0][0]) * atomsize); err3 = hipMalloc((void **)&gpuC0[i], sizeof(gpuC0[0][0]) * atomsize); if ((err1 != hipSuccess) || (err2 != hipSuccess) || (err3 != hipSuccess) ) { fprintf(stderr, "step1 : !!!! GPU memory allocation error\n"); return CUBLASTEST_FAILED; } } err1 = hipMalloc((void **)&gpuA_arr, sizeof(*gpuA0) * batchsize); err2 = hipMalloc((void **)&gpuB_arr, sizeof(*gpuB0) * batchsize); err3 = hipMalloc((void **)&gpuC_arr, sizeof(*gpuC0) * batchsize); if ((err1 != hipSuccess) || (err2 != hipSuccess) || (err3 != hipSuccess) ) { fprintf(stderr, "step2 : !!!! GPU memory allocation error\n"); return CUBLASTEST_FAILED; } batchsize = batchsize; err1 = hipMalloc((void **)&dict, sizeof(*dict)*atomsize*atomnum); err2 = hipMalloc((void **)&sigs, sizeof(*sigs)*atomsize*batchsize); err3 = hipMalloc((void **)&prkdx, sizeof(*prkdx)*atomnum*batchsize); if ((err1 != hipSuccess) || (err2 != hipSuccess) || (err3 != hipSuccess)) { fprintf(stderr, "step3 !!!! GPU memory allocation error\n"); return CUBLASTEST_FAILED; } hipMalloc((void**)&gammaindtab_arr,sizeof(*gammaindtab_arr)*batchsize*sparsenum); hipMalloc((void **)&b_arr, sizeof(*b_arr)*atomsize*batchsize); hipblasCreate (& handle ); // initialize CUBLAS context hipMalloc((void **)&gamma_arr,atomnum*batchsize*sizeof(*gamma_arr) ); err1 = hipMemcpy(gpuA_arr, gpuA0,batchsize * sizeof(*gpuA_arr), hipMemcpyHostToDevice); err2 = hipMemcpy(gpuB_arr, gpuB0,batchsize * sizeof(*gpuB_arr), hipMemcpyHostToDevice); err3 = hipMemcpy(gpuC_arr, gpuC0,batchsize * sizeof(*gpuC_arr), hipMemcpyHostToDevice); if ((err1 != hipSuccess) ||(err2 != hipSuccess) || (err3 != hipSuccess)) { fprintf(stderr, "step3 : !!!! GPU memory allocation error\n"); return HIPBLAS_STATUS_ALLOC_FAILED; } return EXIT_SUCCESS ; } int __omp(float *A, float *b, float *c,float tol,int _batchsize) { hipblasStatus_t stat ; // CUBLAS functions status hipError_t err1; int info; batchsize = _batchsize; int elnum; // the number of non-zero element hipblasStatus_t status1, status2, status3; float alpha; float beta; /***************load dict and signals for compute prkdx: the inner products between signal sigs an dictionary atoms*************************************/ status1 = hipblasSetMatrix(atomsize,atomnum,sizeof(*dict),A,atomsize,dict,atomsize); //a -> d_a status2 = hipblasSetMatrix(atomsize,batchsize,sizeof(*sigs),b,atomsize,sigs,atomsize); //b -> d_b status3 = hipblasSetMatrix(atomsize,batchsize,sizeof(*sigs),b,atomsize,b_arr,atomsize); //b -> d_b if ((status1 != HIPBLAS_STATUS_SUCCESS)||(status2 != HIPBLAS_STATUS_SUCCESS)||(status3 != HIPBLAS_STATUS_SUCCESS)) { fprintf(stderr, "!!!! in omp:loc1 GPU access error (write)\n"); return CUBLASTEST_FAILED; } /************ main loop ****************************/ for(elnum = 0; elnum < sparsenum; elnum ++) { alpha = 1; beta = 0; /***********************compute prkdx*****************************/ stat = hipblasSgemm(handle, HIPBLAS_OP_T,HIPBLAS_OP_N, atomnum,batchsize,atomsize, &alpha, dict, atomsize, sigs, atomsize, &beta, prkdx, atomnum ); if(stat != HIPBLAS_STATUS_SUCCESS) { hipError_t cudaStatus = hipGetLastError(); fprintf(stderr, "!!!! GPU program execution error:compute prkdx \n"); return CUBLASTEST_FAILED; } /*********** find the atom corresponding to the maximum products with the sigs *********/ /*************** and load the atom pointer to the pointer table ***********************/ hipLaunchKernelGGL(( _findmax), dim3(batchsize),dim3(BATCHSIZE/2), 0, 0, prkdx, 4, atomnum, gammaindtab_arr, elnum, sparsenum); /***************compute gamma*************************************/ //load the dictionary atoms and signals vector for each batch // ****************** this can be improved by dedicted kernel function***** // memory read from gpu to gpu memcpy_atom(blocksize,elnum + 1); hipLaunchKernelGGL(( _memcpy_b), dim3((batchsize - 1)/blocksize + 1),dim3(blocksize), 0, 0, b_arr, //the pointer to the patchsignals gpuB_arr, //the pointer to each batch matrix batchsize, atomsize //atom size ); // do the computation by cuBlas API stat = hipblasSgelsBatched(handle,HIPBLAS_OP_N,atomsize,elnum + 1,1,gpuA_arr,atomsize,gpuB_arr,atomsize, &info,NULL,batchsize); hipDeviceSynchronize(); if(stat != HIPBLAS_STATUS_SUCCESS) { hipError_t cudaStatus = hipGetLastError(); fprintf(stderr, "!!!! GPU program execution error : cublas Error=%d, cuda Error=%d,(%s)\n", status1, cudaStatus,hipGetErrorString(cudaStatus)); return CUBLASTEST_FAILED; } if(elnum == sparsenum - 1) break; // /*************compute redisual**********************/ //load data //load dictionary from device to device, this can be done by dedicated kernel //load b from device to devie memcpy_atom(blocksize,elnum + 1); hipLaunchKernelGGL(( _memcpy_b), dim3((batchsize - 1)/blocksize + 1),dim3(blocksize), 0, 0, b_arr, //the pointer to the patch signals gpuC_arr, //the pointer to each batch matrix batchsize, atomsize //atom size ); alpha = -1; beta = 1; // *************do computation*********************** status1 = hipblasSgemmBatched(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, atomsize, 1, elnum + 1, &alpha, (const float**)gpuA_arr, atomsize, (const float**)gpuB_arr, atomnum, &beta, gpuC_arr, atomsize, batchsize); if (status1 != HIPBLAS_STATUS_SUCCESS) { hipError_t cudaStatus = hipGetLastError(); fprintf(stderr, "!!!! GPU program execution error : cublas Error=%d, cuda Error=%d,(%s)\n", status1, cudaStatus,hipGetErrorString(cudaStatus)); return CUBLASTEST_FAILED; } // get output residual hipLaunchKernelGGL(( _memcpy_sig), dim3((batchsize - 1)/blocksize + 1),dim3(blocksize), 0, 0, gpuC_arr, //the pointer to the residual sigs, batchsize, atomsize //atom size ); } int blockperGrid = (batchsize - 1)/blocksize + 1; hipLaunchKernelGGL(( _buildc), dim3(blockperGrid),dim3(blocksize), 0, 0, gpuB_arr, //the pointer to the dictionary gamma_arr, //the pointer to each batch matrix gammaindtab_arr, //table indicates the atom indice to be copied sparsenum, batchsize, atomnum //number of atoms to be copied for each batch ); err1 = hipMemcpy(c, gamma_arr, sizeof(float)*batchsize*atomnum, hipMemcpyDeviceToHost); if((err1 != hipSuccess)) { fprintf(stderr, "!!!! GPU access error in indx read(read) in A readback\n"); return CUBLASTEST_FAILED; } printf("end of prg\n"); return EXIT_SUCCESS ; } void __release() { CLEANUP(); hipblasDestroy( handle ); // destroy CUBLAS context } extern "C" { void omp(float *A, float *B, float *C, float tol, int _batchsize) { __omp(A, B, C, tol,_batchsize); } int init(int _atomsize,int _atomnum, int _batchsize, int _sparsenum,int _blocksize) { return __init(_atomsize,_atomnum,_batchsize,_sparsenum,_blocksize); } void release() { __release(); } }
169696f62855bb8326701732bf3311eaa89562c5.cu
# include <stdio.h> # include <stdlib.h> # include <cuda_runtime.h> # include "cublas_v2.h" # include "batchCUBLAS.h" # define IDX2C(i,j,ld) ((( j )*( ld ))+( i )) float **gpuA_arr = NULL; float **gpuA0; float **gpuB_arr = NULL; float **gpuB0; float **gpuC_arr = NULL; float **gpuC0; int batchsize; int atomsize; int atomnum; int sparsenum; int blocksize; int *gammaindtab_arr; float *b_arr; float *dict; float *sigs; float *prkdx; float *gamma_arr; #define BATCHSIZE 2048 __global__ void _findmax(const float * inputmatrix, const int matrow, const int matcol, int * outputtable, const int _elnum, const int _sparsenum) { int row = blockIdx.x; int ty = threadIdx.x; int remainlen = matcol; int searchlen; int oddflag = 0; __shared__ float row_vec[BATCHSIZE]; __shared__ int max_index[BATCHSIZE]; __syncthreads(); float maxval = 1e-20; int maxindx = 10086; int startind = 0; int strid; while(remainlen > 0) { __syncthreads(); //compute the start pt from which to fetch data //compute the stride //if not at the end stride = BATCHSIZE/2 //else stride = rest data len /2 //if rest data len is odd set the odd flag if(remainlen >= BATCHSIZE) searchlen = BATCHSIZE; else { searchlen = remainlen; if((int)fmod((float)searchlen,(float)2) == 1) oddflag = 1; } remainlen -= searchlen; strid = searchlen / 2; //load data if( ty < strid) { row_vec[2 * ty] = fabsf(inputmatrix[row * matcol + 2*ty + startind]); max_index[2 * ty] = 2*ty + startind; row_vec[2*ty+1] = fabsf(inputmatrix[row * matcol + 2*ty + 1 + startind]); max_index[2*ty+1] = 2*ty + 1 + startind; } //if odd flag == 1 //add a extreme val at the end of the data array if(oddflag == 1 &&ty == 0) { row_vec[searchlen - 1] = fabsf(inputmatrix[row * matcol + (searchlen - 1) + startind]); max_index[searchlen - 1] = searchlen - 1 + startind; row_vec[searchlen] = 1e-10; max_index[searchlen] = 10086; } if(oddflag == 1) { strid += 1; } //do comparison while(strid >= 1) { __syncthreads(); if(ty < strid) { if(row_vec[ty] > row_vec[ty+strid]) { row_vec[ty] = row_vec[ty]; max_index[ty] = max_index[ty]; } else { row_vec[ty] = row_vec[ty+strid]; max_index[ty] = max_index[ty+strid]; } } if(strid == 1) strid = 0; else strid = (strid - 1)/2 + 1 ; } __syncthreads(); if(ty == 0) { //compare with the max val //renew the max val and max ind if(row_vec[0] > maxval ) { maxval = row_vec[0]; maxindx = max_index[0]; } } startind += searchlen; } if(ty == 0) { outputtable[_elnum + row * _sparsenum] = maxindx; } } //copy atoms in the dictionray to compute array //each thread process one batch __global__ void _buildc( float **dense, //the pointer to the dictionary float *sparse, //the pointer to each batch matrix int * _gammaindtab, //table indicates the atom indice to be copied const int _sparsenum, const int _wrkonsize, const int _atomnum //number of atoms to be copied for each batch ) { //determine the batch and stop some overdue thread int batchind = blockDim.x * blockIdx.x + threadIdx.x; float *srcpt = NULL; float *dstpt = NULL; int atomind; if(batchind < _wrkonsize) { //read the wrkontab and choose the batch matrix pointer srcpt = dense[batchind]; for(int i = 0; i<_atomnum; i++) { *(sparse + batchind * _atomnum + i) = 0; } //enter in the loop for(int i = 0; i < _sparsenum; i++) { //choose the atom pointer atomind = _gammaindtab[batchind * _sparsenum + i]; dstpt = sparse + atomind + batchind * _atomnum; //dst[batchind]; //read *dstpt = srcpt[i]; __syncthreads(); } } } //copy atoms in the dictionray to compute array //each thread process one batch __global__ void _memcpy_atom( float *src, //the pointer to the dictionary float **dst, //the pointer to each batch matrix int * _gammaindtab, //table indicates the atom indice to be copied const int _sparsenum, const int _wrkonsize, const int _atomnum, //number of atoms to be copied for each batch const int _atomsize //atom size ) { //determine the batch and stop some overdue thread int batchind = blockDim.x * blockIdx.x + threadIdx.x; float *srcpt = NULL; float *dstpt = NULL; int atomind; if(batchind < _wrkonsize) { //read the wrkontab and choose the batch matrix pointer dstpt = dst[batchind]; //enter in the loop for(int i = 0; i < _atomnum; i++) { //choose the atom pointer atomind = _gammaindtab[batchind * _sparsenum + i]; srcpt = src + atomind *_atomsize; //read for(int j = 0; j < _atomsize; j++) { dstpt[j] = srcpt[j]; } __syncthreads(); dstpt += _atomsize; } } } __global__ void _memcpy_sig( float **src, //the pointer to the dictionary float *dst, //the pointer to each batch matrix const int _wrkonsize, const int _atomsize //atom size ) { //determine the batch and stop some overdue thread int batchind = blockDim.x * blockIdx.x + threadIdx.x; float *srcpt = NULL; float *dstpt = NULL; if(batchind < _wrkonsize) { //read the wrkontab and choose the batch matrix pointer dstpt = dst + batchind*_atomsize;//dst[batchind]; srcpt = src[batchind]; //read for(int j = 0; j < _atomsize; j++) { dstpt[j] = srcpt[j]; } __syncthreads(); } } //each thread process one batch __global__ void _memcpy_b( float *src, //the pointer to the dictionary float **dst, //the pointer to each batch matrix const int _wrkonsize, const int _atomsize //atom size ) { //determine the batch and stop some overdue thread int batchind = blockDim.x * blockIdx.x + threadIdx.x; float *srcpt = NULL; float *dstpt = NULL; if(batchind < _wrkonsize) { //read the wrkontab and choose the batch matrix pointer dstpt = dst[batchind]; srcpt = src + batchind *_atomsize; //read for(int j = 0; j < _atomsize; j++) { dstpt[j] = srcpt[j]; } __syncthreads(); } } //function calls _memcpy_atom void memcpy_atom(int _blocksize,int elnum) { //compute grid size int blockperGrid = (batchsize - 1)/_blocksize + 1; //do loading _memcpy_atom<<<blockperGrid,_blocksize>>> ( dict, //the pointer to the dictionary gpuA_arr, //the pointer to each batch matrix gammaindtab_arr, //table indicates the atom indice to be copied sparsenum, batchsize, elnum, //number of atoms to be copied for each batch atomsize //atom size ); } void CLEANUP() { for(int i = 0; i < batchsize; ++i) { if(gpuA0[i]) cudaFree(gpuA0[i]); if(gpuB0[i]) cudaFree(gpuB0[i]); if(gpuC0[i]) cudaFree(gpuC0[i]); } if (gpuA0) free(gpuA0); if (gpuB0) free(gpuB0); if (gpuC0) free(gpuC0); if (gpuA_arr) cudaFree(gpuA_arr); if (gpuB_arr) cudaFree(gpuB_arr); if (gpuC_arr) cudaFree(gpuC_arr); cudaFree(dict); cudaFree(sigs); cudaFree(prkdx); cudaFree(gammaindtab_arr); cudaFree(b_arr); cudaFree(gamma_arr); } cublasHandle_t handle ; // CUBLAS context int __init(int _atomsize,int _atomnum, int _batchsize, int _sparsenum, int _blocksize) { cudaError_t err1, err2, err3; atomsize = _atomsize; atomnum = _atomnum; batchsize = _batchsize; sparsenum = _sparsenum; blocksize = _blocksize; gpuA0 = (float **)malloc(sizeof(*gpuA0)*batchsize); gpuB0 = (float **)malloc(sizeof(*gpuB0)*batchsize); gpuC0 = (float **)malloc(sizeof(*gpuB0)*batchsize); for(int i = 0; i < batchsize; i++) { err1 = cudaMalloc((void **)&gpuA0[i], sizeof(gpuA0[0][0]) * atomsize * atomnum); err2 = cudaMalloc((void **)&gpuB0[i], sizeof(gpuB0[0][0]) * atomsize); err3 = cudaMalloc((void **)&gpuC0[i], sizeof(gpuC0[0][0]) * atomsize); if ((err1 != cudaSuccess) || (err2 != cudaSuccess) || (err3 != cudaSuccess) ) { fprintf(stderr, "step1 : !!!! GPU memory allocation error\n"); return CUBLASTEST_FAILED; } } err1 = cudaMalloc((void **)&gpuA_arr, sizeof(*gpuA0) * batchsize); err2 = cudaMalloc((void **)&gpuB_arr, sizeof(*gpuB0) * batchsize); err3 = cudaMalloc((void **)&gpuC_arr, sizeof(*gpuC0) * batchsize); if ((err1 != cudaSuccess) || (err2 != cudaSuccess) || (err3 != cudaSuccess) ) { fprintf(stderr, "step2 : !!!! GPU memory allocation error\n"); return CUBLASTEST_FAILED; } batchsize = batchsize; err1 = cudaMalloc((void **)&dict, sizeof(*dict)*atomsize*atomnum); err2 = cudaMalloc((void **)&sigs, sizeof(*sigs)*atomsize*batchsize); err3 = cudaMalloc((void **)&prkdx, sizeof(*prkdx)*atomnum*batchsize); if ((err1 != cudaSuccess) || (err2 != cudaSuccess) || (err3 != cudaSuccess)) { fprintf(stderr, "step3 !!!! GPU memory allocation error\n"); return CUBLASTEST_FAILED; } cudaMalloc((void**)&gammaindtab_arr,sizeof(*gammaindtab_arr)*batchsize*sparsenum); cudaMalloc((void **)&b_arr, sizeof(*b_arr)*atomsize*batchsize); cublasCreate (& handle ); // initialize CUBLAS context cudaMalloc((void **)&gamma_arr,atomnum*batchsize*sizeof(*gamma_arr) ); err1 = cudaMemcpy(gpuA_arr, gpuA0,batchsize * sizeof(*gpuA_arr), cudaMemcpyHostToDevice); err2 = cudaMemcpy(gpuB_arr, gpuB0,batchsize * sizeof(*gpuB_arr), cudaMemcpyHostToDevice); err3 = cudaMemcpy(gpuC_arr, gpuC0,batchsize * sizeof(*gpuC_arr), cudaMemcpyHostToDevice); if ((err1 != cudaSuccess) ||(err2 != cudaSuccess) || (err3 != cudaSuccess)) { fprintf(stderr, "step3 : !!!! GPU memory allocation error\n"); return CUBLAS_STATUS_ALLOC_FAILED; } return EXIT_SUCCESS ; } int __omp(float *A, float *b, float *c,float tol,int _batchsize) { cublasStatus_t stat ; // CUBLAS functions status cudaError_t err1; int info; batchsize = _batchsize; int elnum; // the number of non-zero element cublasStatus_t status1, status2, status3; float alpha; float beta; /***************load dict and signals for compute prkdx: the inner products between signal sigs an dictionary atoms*************************************/ status1 = cublasSetMatrix(atomsize,atomnum,sizeof(*dict),A,atomsize,dict,atomsize); //a -> d_a status2 = cublasSetMatrix(atomsize,batchsize,sizeof(*sigs),b,atomsize,sigs,atomsize); //b -> d_b status3 = cublasSetMatrix(atomsize,batchsize,sizeof(*sigs),b,atomsize,b_arr,atomsize); //b -> d_b if ((status1 != CUBLAS_STATUS_SUCCESS)||(status2 != CUBLAS_STATUS_SUCCESS)||(status3 != CUBLAS_STATUS_SUCCESS)) { fprintf(stderr, "!!!! in omp:loc1 GPU access error (write)\n"); return CUBLASTEST_FAILED; } /************ main loop ****************************/ for(elnum = 0; elnum < sparsenum; elnum ++) { alpha = 1; beta = 0; /***********************compute prkdx*****************************/ stat = cublasSgemm(handle, CUBLAS_OP_T,CUBLAS_OP_N, atomnum,batchsize,atomsize, &alpha, dict, atomsize, sigs, atomsize, &beta, prkdx, atomnum ); if(stat != CUBLAS_STATUS_SUCCESS) { cudaError_t cudaStatus = cudaGetLastError(); fprintf(stderr, "!!!! GPU program execution error:compute prkdx \n"); return CUBLASTEST_FAILED; } /*********** find the atom corresponding to the maximum products with the sigs *********/ /*************** and load the atom pointer to the pointer table ***********************/ _findmax<<<batchsize,BATCHSIZE/2>>>(prkdx, 4, atomnum, gammaindtab_arr, elnum, sparsenum); /***************compute gamma*************************************/ //load the dictionary atoms and signals vector for each batch // ****************** this can be improved by dedicted kernel function***** // memory read from gpu to gpu memcpy_atom(blocksize,elnum + 1); _memcpy_b<<<(batchsize - 1)/blocksize + 1,blocksize>>>( b_arr, //the pointer to the patchsignals gpuB_arr, //the pointer to each batch matrix batchsize, atomsize //atom size ); // do the computation by cuBlas API stat = cublasSgelsBatched(handle,CUBLAS_OP_N,atomsize,elnum + 1,1,gpuA_arr,atomsize,gpuB_arr,atomsize, &info,NULL,batchsize); cudaDeviceSynchronize(); if(stat != CUBLAS_STATUS_SUCCESS) { cudaError_t cudaStatus = cudaGetLastError(); fprintf(stderr, "!!!! GPU program execution error : cublas Error=%d, cuda Error=%d,(%s)\n", status1, cudaStatus,cudaGetErrorString(cudaStatus)); return CUBLASTEST_FAILED; } if(elnum == sparsenum - 1) break; // /*************compute redisual**********************/ //load data //load dictionary from device to device, this can be done by dedicated kernel //load b from device to devie memcpy_atom(blocksize,elnum + 1); _memcpy_b<<<(batchsize - 1)/blocksize + 1,blocksize>>>( b_arr, //the pointer to the patch signals gpuC_arr, //the pointer to each batch matrix batchsize, atomsize //atom size ); alpha = -1; beta = 1; // *************do computation*********************** status1 = cublasSgemmBatched(handle, CUBLAS_OP_N, CUBLAS_OP_N, atomsize, 1, elnum + 1, &alpha, (const float**)gpuA_arr, atomsize, (const float**)gpuB_arr, atomnum, &beta, gpuC_arr, atomsize, batchsize); if (status1 != CUBLAS_STATUS_SUCCESS) { cudaError_t cudaStatus = cudaGetLastError(); fprintf(stderr, "!!!! GPU program execution error : cublas Error=%d, cuda Error=%d,(%s)\n", status1, cudaStatus,cudaGetErrorString(cudaStatus)); return CUBLASTEST_FAILED; } // get output residual _memcpy_sig<<<(batchsize - 1)/blocksize + 1,blocksize>>>( gpuC_arr, //the pointer to the residual sigs, batchsize, atomsize //atom size ); } int blockperGrid = (batchsize - 1)/blocksize + 1; _buildc<<<blockperGrid,blocksize>>>( gpuB_arr, //the pointer to the dictionary gamma_arr, //the pointer to each batch matrix gammaindtab_arr, //table indicates the atom indice to be copied sparsenum, batchsize, atomnum //number of atoms to be copied for each batch ); err1 = cudaMemcpy(c, gamma_arr, sizeof(float)*batchsize*atomnum, cudaMemcpyDeviceToHost); if((err1 != cudaSuccess)) { fprintf(stderr, "!!!! GPU access error in indx read(read) in A readback\n"); return CUBLASTEST_FAILED; } printf("end of prg\n"); return EXIT_SUCCESS ; } void __release() { CLEANUP(); cublasDestroy( handle ); // destroy CUBLAS context } extern "C" { void omp(float *A, float *B, float *C, float tol, int _batchsize) { __omp(A, B, C, tol,_batchsize); } int init(int _atomsize,int _atomnum, int _batchsize, int _sparsenum,int _blocksize) { return __init(_atomsize,_atomnum,_batchsize,_sparsenum,_blocksize); } void release() { __release(); } }
b5a8008b4a54ef499d7de7e806f892562544d08b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Simple optimized box blur //by: Bradley Fusting //Date: 5/1/2021 //This program reads an image and performs a simple averaging of pixels within a supplied radius. For optimization, //it does this by computing a running sum for each column within the radius, then averaging that sum. Then the same for //each row. This should allow it to be easily parallelized by column then by row, since each call is independent. #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <time.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" //Computes a single row of the destination image by summing radius pixels //Parameters: src: Teh src image as width*height*bpp 1d array // dest: pre-allocated array of size width*height*bpp to receive summed row // row: The current row number // pWidth: The width of the image * the bpp (i.e. number of bytes in a row) // rad: the width of the blur // bpp: The bits per pixel in the src image //Returns: None __global__ void computeRow(float* src,float* dest,int height,int pWidth,int radius,int bpp){ int i; int row = (blockIdx.x*blockDim.x)+threadIdx.x; if(row >= height-1 ){ return; } int bradius=radius*bpp; //initialize the first bpp elements so that nothing fails for (i=0;i<bpp;i++) dest[row*pWidth+i]=src[row*pWidth+i]; //start the sum up to radius*2 by only adding (nothing to subtract yet) for (i=bpp;i<bradius*2*bpp;i++) dest[row*pWidth+i]=src[row*pWidth+i]+dest[row*pWidth+i-bpp]; for (i=bradius*2+bpp;i<pWidth;i++) dest[row*pWidth+i]=src[row*pWidth+i]+dest[row*pWidth+i-bpp]-src[row*pWidth+i-2*bradius-bpp]; //now shift everything over by radius spaces and blank out the last radius items to account for sums at the end of the kernel, instead of the middle for (i=bradius;i<pWidth;i++){ dest[row*pWidth+i-bradius]=dest[row*pWidth+i]/(radius*2+1); } //now the first and last radius values make no sense, so blank them out for (i=0;i<bradius;i++){ dest[row*pWidth+i]=0; dest[(row+1)*pWidth-1-i]=0; } } //Computes a single column of the destination image by summing radius pixels //Parameters: src: Teh src image as width*height*bpp 1d array // dest: pre-allocated array of size width*height*bpp to receive summed row // col: The current column number // pWidth: The width of the image * the bpp (i.e. number of bytes in a row) // height: The height of the source image // radius: the width of the blur // bpp: The bits per pixel in the src image //Returns: None __global__ void computeColumn(uint8_t* src,float* dest,int co,int pWidth,int height,int radius,int bpp){ int col = (blockIdx.x*blockDim.x)+threadIdx.x; int i; if(col >= pWidth-1){ return; } //initialize the first element of each column dest[col]=src[col]; //start tue sum up to radius*2 by only adding for (i=1;i<=radius*2;i++) dest[i*pWidth+col]=src[i*pWidth+col]+dest[(i-1)*pWidth+col]; for (i=radius*2+1;i<height;i++) dest[i*pWidth+col]=src[i*pWidth+col]+dest[(i-1)*pWidth+col]-src[(i-2*radius-1)*pWidth+col]; //now shift everything up by radius spaces and blank out the last radius items to account for sums at the end of the kernel, instead of the middle for (i=radius;i<height;i++){ dest[(i-radius)*pWidth+col]=dest[i*pWidth+col]/(radius*2+1); } //now the first and last radius values make no sense, so blank them out for (i=0;i<radius;i++){ dest[i*pWidth+col]=0; dest[(height-1)*pWidth-i*pWidth+col]=0; } } //Usage: Prints the usage for this program //Parameters: name: The name of the program //Returns: Always returns -1 int Usage(char* name){ printf("%s: <filename> <blur radius>\n\tblur radius=pixels to average on any side of the current pixel\n",name); return -1; } int main(int argc,char** argv){ long t1,t2; int radius=0; int i; int width,height,bpp,pWidth; char* filename; uint8_t *img; float* dest,*mid; uint8_t *img2; int blockSize = 256; if (argc!=3) return Usage(argv[0]); filename=argv[1]; sscanf(argv[2],"%d",&radius); img=stbi_load(filename,&width,&height,&bpp,0); pWidth=width*bpp; //actual width in bytes of an image row int blockCount = (width+255)/256; hipMallocManaged(&img2, sizeof(uint8_t)*pWidth*height); hipMallocManaged(&dest, sizeof(float)*pWidth*height); hipMallocManaged(&mid, sizeof(float)*pWidth*height); hipMemcpy(img2, img, sizeof(uint8_t)*pWidth*height, hipMemcpyHostToDevice); t1=time(NULL); hipLaunchKernelGGL(( computeColumn), dim3(blockCount), dim3(blockSize), 0, 0, img2,mid,i,pWidth,height,radius,bpp); hipDeviceSynchronize(); stbi_image_free(img); //done with image hipFree(img2); hipLaunchKernelGGL(( computeRow), dim3((height+255)/256), dim3(blockSize), 0, 0, mid,dest,height,pWidth,radius,bpp); hipDeviceSynchronize(); t2=time(NULL); //done with mid hipFree(mid); //now back to int8 so we can save it hipMallocManaged(&img, sizeof(uint8_t)*pWidth*height); for (i=0;i<pWidth*height;i++){ img[i]=(uint8_t)dest[i]; } hipFree(dest); stbi_write_png("output.png",width,height,bpp,img,bpp*width); hipFree(img); printf("Blur with radius %d complete in %ld seconds\n",radius,t2-t1); }
b5a8008b4a54ef499d7de7e806f892562544d08b.cu
//Simple optimized box blur //by: Bradley Fusting //Date: 5/1/2021 //This program reads an image and performs a simple averaging of pixels within a supplied radius. For optimization, //it does this by computing a running sum for each column within the radius, then averaging that sum. Then the same for //each row. This should allow it to be easily parallelized by column then by row, since each call is independent. #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <time.h> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" #define STB_IMAGE_WRITE_IMPLEMENTATION #include "stb_image_write.h" //Computes a single row of the destination image by summing radius pixels //Parameters: src: Teh src image as width*height*bpp 1d array // dest: pre-allocated array of size width*height*bpp to receive summed row // row: The current row number // pWidth: The width of the image * the bpp (i.e. number of bytes in a row) // rad: the width of the blur // bpp: The bits per pixel in the src image //Returns: None __global__ void computeRow(float* src,float* dest,int height,int pWidth,int radius,int bpp){ int i; int row = (blockIdx.x*blockDim.x)+threadIdx.x; if(row >= height-1 ){ return; } int bradius=radius*bpp; //initialize the first bpp elements so that nothing fails for (i=0;i<bpp;i++) dest[row*pWidth+i]=src[row*pWidth+i]; //start the sum up to radius*2 by only adding (nothing to subtract yet) for (i=bpp;i<bradius*2*bpp;i++) dest[row*pWidth+i]=src[row*pWidth+i]+dest[row*pWidth+i-bpp]; for (i=bradius*2+bpp;i<pWidth;i++) dest[row*pWidth+i]=src[row*pWidth+i]+dest[row*pWidth+i-bpp]-src[row*pWidth+i-2*bradius-bpp]; //now shift everything over by radius spaces and blank out the last radius items to account for sums at the end of the kernel, instead of the middle for (i=bradius;i<pWidth;i++){ dest[row*pWidth+i-bradius]=dest[row*pWidth+i]/(radius*2+1); } //now the first and last radius values make no sense, so blank them out for (i=0;i<bradius;i++){ dest[row*pWidth+i]=0; dest[(row+1)*pWidth-1-i]=0; } } //Computes a single column of the destination image by summing radius pixels //Parameters: src: Teh src image as width*height*bpp 1d array // dest: pre-allocated array of size width*height*bpp to receive summed row // col: The current column number // pWidth: The width of the image * the bpp (i.e. number of bytes in a row) // height: The height of the source image // radius: the width of the blur // bpp: The bits per pixel in the src image //Returns: None __global__ void computeColumn(uint8_t* src,float* dest,int co,int pWidth,int height,int radius,int bpp){ int col = (blockIdx.x*blockDim.x)+threadIdx.x; int i; if(col >= pWidth-1){ return; } //initialize the first element of each column dest[col]=src[col]; //start tue sum up to radius*2 by only adding for (i=1;i<=radius*2;i++) dest[i*pWidth+col]=src[i*pWidth+col]+dest[(i-1)*pWidth+col]; for (i=radius*2+1;i<height;i++) dest[i*pWidth+col]=src[i*pWidth+col]+dest[(i-1)*pWidth+col]-src[(i-2*radius-1)*pWidth+col]; //now shift everything up by radius spaces and blank out the last radius items to account for sums at the end of the kernel, instead of the middle for (i=radius;i<height;i++){ dest[(i-radius)*pWidth+col]=dest[i*pWidth+col]/(radius*2+1); } //now the first and last radius values make no sense, so blank them out for (i=0;i<radius;i++){ dest[i*pWidth+col]=0; dest[(height-1)*pWidth-i*pWidth+col]=0; } } //Usage: Prints the usage for this program //Parameters: name: The name of the program //Returns: Always returns -1 int Usage(char* name){ printf("%s: <filename> <blur radius>\n\tblur radius=pixels to average on any side of the current pixel\n",name); return -1; } int main(int argc,char** argv){ long t1,t2; int radius=0; int i; int width,height,bpp,pWidth; char* filename; uint8_t *img; float* dest,*mid; uint8_t *img2; int blockSize = 256; if (argc!=3) return Usage(argv[0]); filename=argv[1]; sscanf(argv[2],"%d",&radius); img=stbi_load(filename,&width,&height,&bpp,0); pWidth=width*bpp; //actual width in bytes of an image row int blockCount = (width+255)/256; cudaMallocManaged(&img2, sizeof(uint8_t)*pWidth*height); cudaMallocManaged(&dest, sizeof(float)*pWidth*height); cudaMallocManaged(&mid, sizeof(float)*pWidth*height); cudaMemcpy(img2, img, sizeof(uint8_t)*pWidth*height, cudaMemcpyHostToDevice); t1=time(NULL); computeColumn<<<blockCount, blockSize>>>(img2,mid,i,pWidth,height,radius,bpp); cudaDeviceSynchronize(); stbi_image_free(img); //done with image cudaFree(img2); computeRow<<<(height+255)/256, blockSize>>>(mid,dest,height,pWidth,radius,bpp); cudaDeviceSynchronize(); t2=time(NULL); //done with mid cudaFree(mid); //now back to int8 so we can save it cudaMallocManaged(&img, sizeof(uint8_t)*pWidth*height); for (i=0;i<pWidth*height;i++){ img[i]=(uint8_t)dest[i]; } cudaFree(dest); stbi_write_png("output.png",width,height,bpp,img,bpp*width); cudaFree(img); printf("Blur with radius %d complete in %ld seconds\n",radius,t2-t1); }
eb10280d8d06338d53c547c1dd9c3f25d35a31c3.hip
// !!! This is a file automatically generated by hipify!!! #include "linalg/linalg_internal_gpu/cuSvd_internal.hpp" #include "cytnx_error.hpp" #include "Type.hpp" #include "utils/lapack_wrapper.h" namespace cytnx{ namespace linalg_internal{ /// cuSvd void cuSvd_internal_cd(const boost::intrusive_ptr<Storage_base> &in, boost::intrusive_ptr<Storage_base> &U, boost::intrusive_ptr<Storage_base> &vT, boost::intrusive_ptr<Storage_base> &S, const cytnx_int32 &M, const cytnx_int32 &N){ signed char jobu, jobv; // if U and vT are NULL ptr, then it will not be computed. jobu = ( U->dtype == Type.Void ) ? 'N' : 'S'; jobv = ( vT->dtype == Type.Void ) ? 'N' : 'S'; // create handles: hipsolverDnHandle_t cusolverH = NULL; checkCudaErrors(hipsolverDnCreate(&cusolverH)); hipDoubleComplex* Mij; checkCudaErrors(hipMalloc((void**)&Mij,M * N * sizeof(hipDoubleComplex))); checkCudaErrors(hipMemcpy(Mij,in->Mem,sizeof(cytnx_complex128)*M*N,hipMemcpyDeviceToDevice)); cytnx_int32 min = ::min(M, N); cytnx_int32 ldA = N, ldu = N, ldvT = min; cytnx_int32 lwork = 0; // query working space : checkCudaErrors(hipsolverDnZgesvd_bufferSize(cusolverH, N, M, &lwork)); // allocate working space: hipDoubleComplex *work; cytnx_double *rwork=NULL; checkCudaErrors(hipMalloc((void**)&work,lwork*sizeof(hipDoubleComplex))); //checkCudaErrors(hipMalloc((void**)&rwork,(min-1)*sizeof(cytnx_double64))); cytnx_int32 *devinfo ; checkCudaErrors(hipMalloc((void**)&devinfo,sizeof(cytnx_int32))); checkCudaErrors(hipMemset(devinfo,0,sizeof(cytnx_int32))); cytnx_int32 info; /// compute: checkCudaErrors(hipsolverDnZgesvd(cusolverH,jobv,jobu,N,M,Mij,ldA,(cytnx_double*)S->Mem,(hipDoubleComplex*)vT->Mem,ldu,(hipDoubleComplex*)U->Mem,ldvT,work,lwork,rwork,devinfo)); // get info checkCudaErrors(hipMemcpy(&info,devinfo,sizeof(cytnx_int32),hipMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'hipsolverDnZgesvd': cuBlas INFO = ", info); hipFree(work); hipFree(Mij); hipFree(devinfo); hipsolverDnDestroy(cusolverH); } void cuSvd_internal_cf(const boost::intrusive_ptr<Storage_base> &in, boost::intrusive_ptr<Storage_base> &U, boost::intrusive_ptr<Storage_base> &vT, boost::intrusive_ptr<Storage_base> &S, const cytnx_int32 &M, const cytnx_int32 &N){ signed char jobu, jobv; // if U and vT are NULL ptr, then it will not be computed. jobu = ( U->dtype == Type.Void ) ? 'N' : 'S'; jobv = ( vT->dtype == Type.Void ) ? 'N' : 'S'; // create handles: hipsolverDnHandle_t cusolverH = NULL; checkCudaErrors(hipsolverDnCreate(&cusolverH)); cuFloatComplex* Mij; checkCudaErrors(hipMalloc((void**)&Mij,M * N * sizeof(cuFloatComplex))); checkCudaErrors(hipMemcpy(Mij,in->Mem,sizeof(cytnx_complex128)*M*N,hipMemcpyDeviceToDevice)); cytnx_int32 min = ::min(M, N); cytnx_int32 ldA = N, ldu = N, ldvT = min; cytnx_int32 lwork = 0; // query working space : checkCudaErrors(hipsolverDnCgesvd_bufferSize(cusolverH, N, M, &lwork)); // allocate working space: cuFloatComplex *work; cytnx_float *rwork=NULL; checkCudaErrors(hipMalloc((void**)&work,lwork*sizeof(cuFloatComplex))); //checkCudaErrors(hipMalloc((void**)&rwork,(min-1)*sizeof(cytnx_float64))); cytnx_int32 *devinfo ; checkCudaErrors(hipMalloc((void**)&devinfo,sizeof(cytnx_int32))); checkCudaErrors(hipMemset(devinfo,0,sizeof(cytnx_int32))); cytnx_int32 info; /// compute: checkCudaErrors(hipsolverDnCgesvd(cusolverH,jobv,jobu,N,M,Mij,ldA,(cytnx_float*)S->Mem,(cuFloatComplex*)vT->Mem,ldu,(cuFloatComplex*)U->Mem,ldvT,work,lwork,rwork,devinfo)); // get info checkCudaErrors(hipMemcpy(&info,devinfo,sizeof(cytnx_int32),hipMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'hipsolverDnCgesvd': cuBlas INFO = ", info); hipFree(work); hipFree(Mij); hipFree(devinfo); hipsolverDnDestroy(cusolverH); } void cuSvd_internal_d( const boost::intrusive_ptr<Storage_base> &in, boost::intrusive_ptr<Storage_base> &U, boost::intrusive_ptr<Storage_base> &vT, boost::intrusive_ptr<Storage_base> &S, const cytnx_int32 &M, const cytnx_int32 &N){ signed char jobu, jobv; // if U and vT are NULL ptr, then it will not be computed. jobu = ( U->dtype == Type.Void ) ? 'N' : 'S'; jobv = ( vT->dtype == Type.Void ) ? 'N' : 'S'; // create handles: hipsolverDnHandle_t cusolverH = NULL; checkCudaErrors(hipsolverDnCreate(&cusolverH)); cytnx_double* Mij; checkCudaErrors(hipMalloc((void**)&Mij,M * N * sizeof(cytnx_double))); checkCudaErrors(hipMemcpy(Mij,in->Mem,sizeof(cytnx_double)*M*N,hipMemcpyDeviceToDevice)); cytnx_int32 min = ::min(M, N); cytnx_int32 ldA = N, ldu = N, ldvT = min; cytnx_int32 lwork = 0; // query working space : checkCudaErrors(hipsolverDnDgesvd_bufferSize(cusolverH, N, M, &lwork)); // allocate working space: cytnx_double *work; cytnx_double *rwork=NULL; checkCudaErrors(hipMalloc((void**)&work,lwork*sizeof(cytnx_double))); //checkCudaErrors(hipMalloc((void**)&rwork,(min-1)*sizeof(cytnx_double64))); cytnx_int32 *devinfo ; checkCudaErrors(hipMalloc((void**)&devinfo,sizeof(cytnx_int32))); checkCudaErrors(hipMemset(devinfo,0,sizeof(cytnx_int32))); cytnx_int32 info; /// compute: checkCudaErrors(hipsolverDnDgesvd(cusolverH,jobv,jobu,N,M,Mij,ldA,(cytnx_double*)S->Mem,(cytnx_double*)vT->Mem,ldu,(cytnx_double*)U->Mem,ldvT,work,lwork,rwork,devinfo)); // get info checkCudaErrors(hipMemcpy(&info,devinfo,sizeof(cytnx_int32),hipMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'hipsolverDnDgesvd': cuBlas INFO = ", info); hipFree(work); hipFree(Mij); hipFree(devinfo); hipsolverDnDestroy(cusolverH); } void cuSvd_internal_f( const boost::intrusive_ptr<Storage_base> &in, boost::intrusive_ptr<Storage_base> &U, boost::intrusive_ptr<Storage_base> &vT, boost::intrusive_ptr<Storage_base> &S, const cytnx_int32 &M, const cytnx_int32 &N){ signed char jobu, jobv; // if U and vT are NULL ptr, then it will not be computed. jobu = ( U->dtype == Type.Void ) ? 'N' : 'S'; jobv = ( vT->dtype == Type.Void ) ? 'N' : 'S'; // create handles: hipsolverDnHandle_t cusolverH = NULL; checkCudaErrors(hipsolverDnCreate(&cusolverH)); cytnx_float* Mij; checkCudaErrors(hipMalloc((void**)&Mij,M * N * sizeof(cytnx_float))); checkCudaErrors(hipMemcpy(Mij,in->Mem,sizeof(cytnx_float)*M*N,hipMemcpyDeviceToDevice)); cytnx_int32 min = ::min(M, N); cytnx_int32 ldA = N, ldu = N, ldvT = min; cytnx_int32 lwork = 0; // query working space : checkCudaErrors(hipsolverDnSgesvd_bufferSize(cusolverH, N, M, &lwork)); // allocate working space: cytnx_float *work; cytnx_float *rwork=NULL; checkCudaErrors(hipMalloc((void**)&work,lwork*sizeof(cytnx_float))); //checkCudaErrors(hipMalloc((void**)&rwork,(min-1)*sizeof(cytnx_float64))); cytnx_int32 *devinfo ; checkCudaErrors(hipMalloc((void**)&devinfo,sizeof(cytnx_int32))); checkCudaErrors(hipMemset(devinfo,0,sizeof(cytnx_int32))); cytnx_int32 info; /// compute: checkCudaErrors(hipsolverDnSgesvd(cusolverH,jobv,jobu,N,M,Mij,ldA,(cytnx_float*)S->Mem,(cytnx_float*)vT->Mem,ldu,(cytnx_float*)U->Mem,ldvT,work,lwork,rwork,devinfo)); // get info checkCudaErrors(hipMemcpy(&info,devinfo,sizeof(cytnx_int32),hipMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'hipsolverDnSgesvd': cuBlas INFO = ", info); hipFree(work); hipFree(Mij); hipFree(devinfo); hipsolverDnDestroy(cusolverH); } }//linalg_internal }//cytnx
eb10280d8d06338d53c547c1dd9c3f25d35a31c3.cu
#include "linalg/linalg_internal_gpu/cuSvd_internal.hpp" #include "cytnx_error.hpp" #include "Type.hpp" #include "utils/lapack_wrapper.h" namespace cytnx{ namespace linalg_internal{ /// cuSvd void cuSvd_internal_cd(const boost::intrusive_ptr<Storage_base> &in, boost::intrusive_ptr<Storage_base> &U, boost::intrusive_ptr<Storage_base> &vT, boost::intrusive_ptr<Storage_base> &S, const cytnx_int32 &M, const cytnx_int32 &N){ signed char jobu, jobv; // if U and vT are NULL ptr, then it will not be computed. jobu = ( U->dtype == Type.Void ) ? 'N' : 'S'; jobv = ( vT->dtype == Type.Void ) ? 'N' : 'S'; // create handles: cusolverDnHandle_t cusolverH = NULL; checkCudaErrors(cusolverDnCreate(&cusolverH)); cuDoubleComplex* Mij; checkCudaErrors(cudaMalloc((void**)&Mij,M * N * sizeof(cuDoubleComplex))); checkCudaErrors(cudaMemcpy(Mij,in->Mem,sizeof(cytnx_complex128)*M*N,cudaMemcpyDeviceToDevice)); cytnx_int32 min = std::min(M, N); cytnx_int32 ldA = N, ldu = N, ldvT = min; cytnx_int32 lwork = 0; // query working space : checkCudaErrors(cusolverDnZgesvd_bufferSize(cusolverH, N, M, &lwork)); // allocate working space: cuDoubleComplex *work; cytnx_double *rwork=NULL; checkCudaErrors(cudaMalloc((void**)&work,lwork*sizeof(cuDoubleComplex))); //checkCudaErrors(cudaMalloc((void**)&rwork,(min-1)*sizeof(cytnx_double64))); cytnx_int32 *devinfo ; checkCudaErrors(cudaMalloc((void**)&devinfo,sizeof(cytnx_int32))); checkCudaErrors(cudaMemset(devinfo,0,sizeof(cytnx_int32))); cytnx_int32 info; /// compute: checkCudaErrors(cusolverDnZgesvd(cusolverH,jobv,jobu,N,M,Mij,ldA,(cytnx_double*)S->Mem,(cuDoubleComplex*)vT->Mem,ldu,(cuDoubleComplex*)U->Mem,ldvT,work,lwork,rwork,devinfo)); // get info checkCudaErrors(cudaMemcpy(&info,devinfo,sizeof(cytnx_int32),cudaMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'cusolverDnZgesvd': cuBlas INFO = ", info); cudaFree(work); cudaFree(Mij); cudaFree(devinfo); cusolverDnDestroy(cusolverH); } void cuSvd_internal_cf(const boost::intrusive_ptr<Storage_base> &in, boost::intrusive_ptr<Storage_base> &U, boost::intrusive_ptr<Storage_base> &vT, boost::intrusive_ptr<Storage_base> &S, const cytnx_int32 &M, const cytnx_int32 &N){ signed char jobu, jobv; // if U and vT are NULL ptr, then it will not be computed. jobu = ( U->dtype == Type.Void ) ? 'N' : 'S'; jobv = ( vT->dtype == Type.Void ) ? 'N' : 'S'; // create handles: cusolverDnHandle_t cusolverH = NULL; checkCudaErrors(cusolverDnCreate(&cusolverH)); cuFloatComplex* Mij; checkCudaErrors(cudaMalloc((void**)&Mij,M * N * sizeof(cuFloatComplex))); checkCudaErrors(cudaMemcpy(Mij,in->Mem,sizeof(cytnx_complex128)*M*N,cudaMemcpyDeviceToDevice)); cytnx_int32 min = std::min(M, N); cytnx_int32 ldA = N, ldu = N, ldvT = min; cytnx_int32 lwork = 0; // query working space : checkCudaErrors(cusolverDnCgesvd_bufferSize(cusolverH, N, M, &lwork)); // allocate working space: cuFloatComplex *work; cytnx_float *rwork=NULL; checkCudaErrors(cudaMalloc((void**)&work,lwork*sizeof(cuFloatComplex))); //checkCudaErrors(cudaMalloc((void**)&rwork,(min-1)*sizeof(cytnx_float64))); cytnx_int32 *devinfo ; checkCudaErrors(cudaMalloc((void**)&devinfo,sizeof(cytnx_int32))); checkCudaErrors(cudaMemset(devinfo,0,sizeof(cytnx_int32))); cytnx_int32 info; /// compute: checkCudaErrors(cusolverDnCgesvd(cusolverH,jobv,jobu,N,M,Mij,ldA,(cytnx_float*)S->Mem,(cuFloatComplex*)vT->Mem,ldu,(cuFloatComplex*)U->Mem,ldvT,work,lwork,rwork,devinfo)); // get info checkCudaErrors(cudaMemcpy(&info,devinfo,sizeof(cytnx_int32),cudaMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'cusolverDnCgesvd': cuBlas INFO = ", info); cudaFree(work); cudaFree(Mij); cudaFree(devinfo); cusolverDnDestroy(cusolverH); } void cuSvd_internal_d( const boost::intrusive_ptr<Storage_base> &in, boost::intrusive_ptr<Storage_base> &U, boost::intrusive_ptr<Storage_base> &vT, boost::intrusive_ptr<Storage_base> &S, const cytnx_int32 &M, const cytnx_int32 &N){ signed char jobu, jobv; // if U and vT are NULL ptr, then it will not be computed. jobu = ( U->dtype == Type.Void ) ? 'N' : 'S'; jobv = ( vT->dtype == Type.Void ) ? 'N' : 'S'; // create handles: cusolverDnHandle_t cusolverH = NULL; checkCudaErrors(cusolverDnCreate(&cusolverH)); cytnx_double* Mij; checkCudaErrors(cudaMalloc((void**)&Mij,M * N * sizeof(cytnx_double))); checkCudaErrors(cudaMemcpy(Mij,in->Mem,sizeof(cytnx_double)*M*N,cudaMemcpyDeviceToDevice)); cytnx_int32 min = std::min(M, N); cytnx_int32 ldA = N, ldu = N, ldvT = min; cytnx_int32 lwork = 0; // query working space : checkCudaErrors(cusolverDnDgesvd_bufferSize(cusolverH, N, M, &lwork)); // allocate working space: cytnx_double *work; cytnx_double *rwork=NULL; checkCudaErrors(cudaMalloc((void**)&work,lwork*sizeof(cytnx_double))); //checkCudaErrors(cudaMalloc((void**)&rwork,(min-1)*sizeof(cytnx_double64))); cytnx_int32 *devinfo ; checkCudaErrors(cudaMalloc((void**)&devinfo,sizeof(cytnx_int32))); checkCudaErrors(cudaMemset(devinfo,0,sizeof(cytnx_int32))); cytnx_int32 info; /// compute: checkCudaErrors(cusolverDnDgesvd(cusolverH,jobv,jobu,N,M,Mij,ldA,(cytnx_double*)S->Mem,(cytnx_double*)vT->Mem,ldu,(cytnx_double*)U->Mem,ldvT,work,lwork,rwork,devinfo)); // get info checkCudaErrors(cudaMemcpy(&info,devinfo,sizeof(cytnx_int32),cudaMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'cusolverDnDgesvd': cuBlas INFO = ", info); cudaFree(work); cudaFree(Mij); cudaFree(devinfo); cusolverDnDestroy(cusolverH); } void cuSvd_internal_f( const boost::intrusive_ptr<Storage_base> &in, boost::intrusive_ptr<Storage_base> &U, boost::intrusive_ptr<Storage_base> &vT, boost::intrusive_ptr<Storage_base> &S, const cytnx_int32 &M, const cytnx_int32 &N){ signed char jobu, jobv; // if U and vT are NULL ptr, then it will not be computed. jobu = ( U->dtype == Type.Void ) ? 'N' : 'S'; jobv = ( vT->dtype == Type.Void ) ? 'N' : 'S'; // create handles: cusolverDnHandle_t cusolverH = NULL; checkCudaErrors(cusolverDnCreate(&cusolverH)); cytnx_float* Mij; checkCudaErrors(cudaMalloc((void**)&Mij,M * N * sizeof(cytnx_float))); checkCudaErrors(cudaMemcpy(Mij,in->Mem,sizeof(cytnx_float)*M*N,cudaMemcpyDeviceToDevice)); cytnx_int32 min = std::min(M, N); cytnx_int32 ldA = N, ldu = N, ldvT = min; cytnx_int32 lwork = 0; // query working space : checkCudaErrors(cusolverDnSgesvd_bufferSize(cusolverH, N, M, &lwork)); // allocate working space: cytnx_float *work; cytnx_float *rwork=NULL; checkCudaErrors(cudaMalloc((void**)&work,lwork*sizeof(cytnx_float))); //checkCudaErrors(cudaMalloc((void**)&rwork,(min-1)*sizeof(cytnx_float64))); cytnx_int32 *devinfo ; checkCudaErrors(cudaMalloc((void**)&devinfo,sizeof(cytnx_int32))); checkCudaErrors(cudaMemset(devinfo,0,sizeof(cytnx_int32))); cytnx_int32 info; /// compute: checkCudaErrors(cusolverDnSgesvd(cusolverH,jobv,jobu,N,M,Mij,ldA,(cytnx_float*)S->Mem,(cytnx_float*)vT->Mem,ldu,(cytnx_float*)U->Mem,ldvT,work,lwork,rwork,devinfo)); // get info checkCudaErrors(cudaMemcpy(&info,devinfo,sizeof(cytnx_int32),cudaMemcpyDeviceToHost)); cytnx_error_msg(info != 0, "%s %d", "Error in cuBlas function 'cusolverDnSgesvd': cuBlas INFO = ", info); cudaFree(work); cudaFree(Mij); cudaFree(devinfo); cusolverDnDestroy(cusolverH); } }//linalg_internal }//cytnx
adb7326ab378d40155dc9f49454d2a182ed6f8fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> /* * Initialize array values on the host. */ void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } /* * Double elements in parallel on the GPU. */ __global__ void doubleElements(int *a, int N) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { a[i] *= 2; } } /* * Check all elements have been doubled on the host. */ bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { int N = 100; int *a; size_t size = N * sizeof(int); /* * Refactor this memory allocation to provide a pointer * `a` that can be used on both the host and the device. */ //a = (int *)malloc(size); hipMallocManaged(&a, size); init(a, N); size_t threads_per_block = 10; size_t number_of_blocks = 10; hipLaunchKernelGGL(( doubleElements), dim3(number_of_blocks), dim3(threads_per_block), 0, 0, a, N); hipDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); /* * Refactor to free memory that has been allocated to be * accessed by both the host and the device. */ hipFree(a); }
adb7326ab378d40155dc9f49454d2a182ed6f8fd.cu
#include <stdio.h> /* * Initialize array values on the host. */ void init(int *a, int N) { int i; for (i = 0; i < N; ++i) { a[i] = i; } } /* * Double elements in parallel on the GPU. */ __global__ void doubleElements(int *a, int N) { int i; i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) { a[i] *= 2; } } /* * Check all elements have been doubled on the host. */ bool checkElementsAreDoubled(int *a, int N) { int i; for (i = 0; i < N; ++i) { if (a[i] != i*2) return false; } return true; } int main() { int N = 100; int *a; size_t size = N * sizeof(int); /* * Refactor this memory allocation to provide a pointer * `a` that can be used on both the host and the device. */ //a = (int *)malloc(size); cudaMallocManaged(&a, size); init(a, N); size_t threads_per_block = 10; size_t number_of_blocks = 10; doubleElements<<<number_of_blocks, threads_per_block>>>(a, N); cudaDeviceSynchronize(); bool areDoubled = checkElementsAreDoubled(a, N); printf("All elements were doubled? %s\n", areDoubled ? "TRUE" : "FALSE"); /* * Refactor to free memory that has been allocated to be * accessed by both the host and the device. */ cudaFree(a); }
fc09d4888518604252579513adffa25e337b1e4d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_minus_2_a; int xdim0_update_halo_kernel3_minus_2_a_h = -1; __constant__ int ydim0_update_halo_kernel3_minus_2_a; int ydim0_update_halo_kernel3_minus_2_a_h = -1; __constant__ int xdim1_update_halo_kernel3_minus_2_a; int xdim1_update_halo_kernel3_minus_2_a_h = -1; __constant__ int ydim1_update_halo_kernel3_minus_2_a; int ydim1_update_halo_kernel3_minus_2_a_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel3_minus_2_a * (y) + \ xdim0_update_halo_kernel3_minus_2_a * ydim0_update_halo_kernel3_minus_2_a * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel3_minus_2_a * (y) + \ xdim1_update_halo_kernel3_minus_2_a * ydim1_update_halo_kernel3_minus_2_a * \ (z)) // user function __device__ inline void update_halo_kernel3_minus_2_a_gpu(double *vol_flux_x, double *mass_flux_x, const int *fields) { if (fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0, 0, 0)] = -(vol_flux_x[OPS_ACC0(2, 0, 0)]); if (fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0, 0, 0)] = -(mass_flux_x[OPS_ACC1(2, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_minus_2_a(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_minus_2_a + idx_z * 1 * 1 * xdim0_update_halo_kernel3_minus_2_a * ydim0_update_halo_kernel3_minus_2_a; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_minus_2_a + idx_z * 1 * 1 * xdim1_update_halo_kernel3_minus_2_a * ydim1_update_halo_kernel3_minus_2_a; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_minus_2_a_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_2_a(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel3_minus_2_a_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 64)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(64, "update_halo_kernel3_minus_2_a"); OPS_kernels[64].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_minus_2_a_h || ydim0 != ydim0_update_halo_kernel3_minus_2_a_h || xdim1 != xdim1_update_halo_kernel3_minus_2_a_h || ydim1 != ydim1_update_halo_kernel3_minus_2_a_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel3_minus_2_a, &xdim0, sizeof(int)); xdim0_update_halo_kernel3_minus_2_a_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel3_minus_2_a, &ydim0, sizeof(int)); ydim0_update_halo_kernel3_minus_2_a_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel3_minus_2_a, &xdim1, sizeof(int)); xdim1_update_halo_kernel3_minus_2_a_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel3_minus_2_a, &ydim1, sizeof(int)); ydim1_update_halo_kernel3_minus_2_a_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[64].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel3_minus_2_a), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[64].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[64].mpi_time += t2 - t1; OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_2_a(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 64; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 64; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel3_minus_2_a_execute; if (OPS_diags > 1) { ops_timing_realloc(64, "update_halo_kernel3_minus_2_a"); } ops_enqueue_kernel(desc); } #endif
fc09d4888518604252579513adffa25e337b1e4d.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel3_minus_2_a; int xdim0_update_halo_kernel3_minus_2_a_h = -1; __constant__ int ydim0_update_halo_kernel3_minus_2_a; int ydim0_update_halo_kernel3_minus_2_a_h = -1; __constant__ int xdim1_update_halo_kernel3_minus_2_a; int xdim1_update_halo_kernel3_minus_2_a_h = -1; __constant__ int ydim1_update_halo_kernel3_minus_2_a; int ydim1_update_halo_kernel3_minus_2_a_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel3_minus_2_a * (y) + \ xdim0_update_halo_kernel3_minus_2_a * ydim0_update_halo_kernel3_minus_2_a * \ (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel3_minus_2_a * (y) + \ xdim1_update_halo_kernel3_minus_2_a * ydim1_update_halo_kernel3_minus_2_a * \ (z)) // user function __device__ inline void update_halo_kernel3_minus_2_a_gpu(double *vol_flux_x, double *mass_flux_x, const int *fields) { if (fields[FIELD_VOL_FLUX_X] == 1) vol_flux_x[OPS_ACC0(0, 0, 0)] = -(vol_flux_x[OPS_ACC0(2, 0, 0)]); if (fields[FIELD_MASS_FLUX_X] == 1) mass_flux_x[OPS_ACC1(0, 0, 0)] = -(mass_flux_x[OPS_ACC1(2, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel3_minus_2_a(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel3_minus_2_a + idx_z * 1 * 1 * xdim0_update_halo_kernel3_minus_2_a * ydim0_update_halo_kernel3_minus_2_a; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel3_minus_2_a + idx_z * 1 * 1 * xdim1_update_halo_kernel3_minus_2_a * ydim1_update_halo_kernel3_minus_2_a; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel3_minus_2_a_gpu(arg0, arg1, arg2); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_2_a(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { #else void ops_par_loop_update_halo_kernel3_minus_2_a_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; #endif // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 3, range, 64)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(64, "update_halo_kernel3_minus_2_a"); OPS_kernels[64].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel3_minus_2_a_h || ydim0 != ydim0_update_halo_kernel3_minus_2_a_h || xdim1 != xdim1_update_halo_kernel3_minus_2_a_h || ydim1 != ydim1_update_halo_kernel3_minus_2_a_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel3_minus_2_a, &xdim0, sizeof(int)); xdim0_update_halo_kernel3_minus_2_a_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel3_minus_2_a, &ydim0, sizeof(int)); ydim0_update_halo_kernel3_minus_2_a_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel3_minus_2_a, &xdim1, sizeof(int)); xdim1_update_halo_kernel3_minus_2_a_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel3_minus_2_a, &ydim1, sizeof(int)); ydim1_update_halo_kernel3_minus_2_a_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); char *p_a[3]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[64].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel3_minus_2_a<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[64].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[64].mpi_time += t2 - t1; OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[64].transfer += ops_compute_transfer(dim, start, end, &arg1); } } #ifdef OPS_LAZY void ops_par_loop_update_halo_kernel3_minus_2_a(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 64; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 64; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 3; desc->args = (ops_arg *)malloc(3 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; char *tmp = (char *)malloc(NUM_FIELDS * sizeof(int)); memcpy(tmp, arg2.data, NUM_FIELDS * sizeof(int)); desc->args[2].data = tmp; desc->function = ops_par_loop_update_halo_kernel3_minus_2_a_execute; if (OPS_diags > 1) { ops_timing_realloc(64, "update_halo_kernel3_minus_2_a"); } ops_enqueue_kernel(desc); } #endif
1d988eb6c9d474920cf808afbf5e821e576ed2cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <algorithm> #include <cmath> #include "../util/math_functions.hpp" #include "./lstm_layer_Junhyuk.hpp" namespace caffe { __device__ real_t sigmoid(const real_t x) { return real_t(1) / (real_t(1) + exp(-x)); } __device__ real_t tanh(const real_t x) { return real_t(2) * sigmoid(real_t(2) * x) - real_t(1); } __global__ void ClipAdd(const int nthreads, const int dim, int t, const real_t* clip, const real_t* add_vec, real_t* data) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / dim; const real_t clip_t = clip ? clip[n] : real_t(t > 0); data[index] += clip_t * add_vec[index]; } } __global__ void ActivationForward(const int nthreads, const int H, const real_t* pre_gate, real_t* gate) { CUDA_KERNEL_LOOP(index, nthreads) { const int d = index % (4*H); gate[index] = d < 3*H ? sigmoid(pre_gate[index]) : tanh(pre_gate[index]); } } __global__ void LSTMForward(const int nthreads, const int H, const int t, const real_t* c_prev, const real_t* gate, const real_t* clip, real_t* c_t, real_t* h_t) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / H; const int d = index % H; const real_t* offset = gate + 4*H*n; const real_t i_t = offset[d]; const real_t f_t = offset[H + d]; const real_t o_t = offset[2*H + d]; const real_t g_t = offset[3*H + d]; const real_t c_t_1 = c_prev[index]; const real_t clip_t = clip ? clip[n] : real_t(t > 0); c_t[index] = clip_t * f_t * c_t_1 + i_t * g_t; h_t[index] = o_t * tanh(c_t[index]); } } __global__ void LSTMBackward(const int nthreads, const int H, const int t, const real_t* c_prev, const real_t* gate, const real_t* c_t, const real_t* clip, real_t* dc_t, const real_t* dh_t, real_t* dc_prev, real_t* gate_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / H; const int d = index % H; const real_t* gate_t = gate + 4*H*n; const real_t i_t = gate_t[d]; const real_t f_t = gate_t[H + d]; const real_t o_t = gate_t[2*H + d]; const real_t g_t = gate_t[3*H + d]; const real_t c_t_1 = c_prev[index]; const real_t c = c_t[index]; const real_t tanh_c = tanh(c); const real_t clip_t = clip ? clip[n] : real_t(t > 0); real_t* dc_t_1 = dc_prev + index; real_t* gate_diff_t = gate_diff + 4*H*n; real_t* di_t = gate_diff_t + d; real_t* df_t = gate_diff_t + H + d; real_t* do_t = gate_diff_t + 2*H + d; real_t* dg_t = gate_diff_t + 3*H + d; // Output gate : tanh(c(t)) * h_diff(t) *do_t = dh_t[index] * tanh_c; // Cell state : o(t) * tanh'(c(t)) * h_diff(t) + f(t+1) * c_diff(t+1) dc_t[index] += dh_t[index] * o_t * (real_t(1) - tanh_c * tanh_c); // c_diff(t-1) += f(t) * c_diff(t) *dc_t_1 = clip_t * dc_t[index] * f_t; // Forget gate : c(t-1) * c_diff(t) *df_t = clip_t * dc_t[index] * c_t_1; // Input gate : g(t) * c_diff(t) *di_t = dc_t[index] * g_t; // Input modulation gate : i(t) * c_diff(t) *dg_t = dc_t[index] * i_t; } } __global__ void ActivationBackward(const int nthreads, const int H, const real_t clip_threshold, const real_t* gate, const real_t* gate_diff, real_t* pre_gate_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int d = index % (4 * H); const real_t gate_val = gate[index]; if (d < 3 * H) { pre_gate_diff[index] = gate_diff[index] * gate_val * (real_t(1) - gate_val); } else { pre_gate_diff[index] = gate_diff[index] * (real_t(1) - gate_val * gate_val); } if (clip_threshold > real_t(0)) { if (pre_gate_diff[index] < -clip_threshold) { pre_gate_diff[index] = -clip_threshold; } else if (pre_gate_diff[index] > clip_threshold) { pre_gate_diff[index] = clip_threshold; } } } } void LstmLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { CHECK_EQ(top[0]->gpu_data(), top_.gpu_data()); real_t* top_data = top_.mutable_gpu_data(); const real_t* bottom_data = bottom[0]->gpu_data(); const real_t* clip = NULL; if (bottom.size() > 1) { clip = bottom[1]->gpu_data(); CHECK_EQ(bottom[1]->num(), bottom[1]->count()); } const real_t* weight_i = this->blobs_[0]->gpu_data(); const real_t* weight_h = this->blobs_[1]->gpu_data(); const real_t* bias = this->blobs_[2]->gpu_data(); real_t* pre_gate_data = pre_gate_.mutable_gpu_data(); real_t* gate_data = gate_.mutable_gpu_data(); real_t* cell_data = cell_.mutable_gpu_data(); // Initialize previous state if (clip) { caffe_copy(c_0_.count(), c_T_.gpu_data(), c_0_.mutable_gpu_data()); caffe_copy(h_0_.count(), h_T_.gpu_data(), h_0_.mutable_gpu_data()); } else { caffe_gpu_set(c_0_.count(), real_t(0.), c_0_.mutable_gpu_data()); caffe_gpu_set(h_0_.count(), real_t(0.), h_0_.mutable_gpu_data()); } // Compute input to hidden forward propagation caffe_gpu_gemm(CblasNoTrans, CblasTrans, T_*N_, 4*H_, I_, real_t(1.), bottom_data, weight_i, real_t(0.), pre_gate_data); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, 4*H_, 1, real_t(1.), bias_multiplier_.gpu_data(), bias, real_t(1.), pre_gate_data); // Compute recurrent forward propagation for (int t = 0; t < T_; ++t) { real_t* h_t = top_data + top_.offset(t); real_t* c_t = cell_data + cell_.offset(t); real_t* pre_gate_t = pre_gate_data + pre_gate_.offset(t); real_t* gate_t = gate_data + gate_.offset(t); const real_t* clip_t = clip ? clip + bottom[1]->offset(t) : NULL; const real_t* h_t_1 = t > 0 ? (h_t - top_.offset(1)) : h_0_.gpu_data(); const real_t* c_t_1 = t > 0 ? (c_t - cell_.offset(1)) : c_0_.gpu_data(); caffe_gpu_gemm(CblasNoTrans, CblasTrans, N_, 4*H_, H_, real_t(1.), h_t_1, weight_h, real_t(0.), h_to_gate_.mutable_gpu_data()); hipLaunchKernelGGL(( ClipAdd), dim3(CAFFE_GET_BLOCKS(4*N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, 4*N_*H_, 4*H_, t, clip_t, h_to_gate_.gpu_data(), pre_gate_t); CUDA_POST_KERNEL_CHECK; hipLaunchKernelGGL(( ActivationForward), dim3(CAFFE_GET_BLOCKS(4*N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, 4*N_*H_, H_, pre_gate_t, gate_t); CUDA_POST_KERNEL_CHECK; hipLaunchKernelGGL(( LSTMForward), dim3(CAFFE_GET_BLOCKS(N_*H_)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N_*H_, H_, t, c_t_1, gate_t, clip_t, c_t, h_t); CUDA_POST_KERNEL_CHECK; } // Preserve cell state and output value for truncated BPTT caffe_copy(N_*H_, cell_data + cell_.offset(T_-1), c_T_.mutable_gpu_data()); caffe_copy(N_*H_, top_data + top_.offset(T_-1), h_T_.mutable_gpu_data()); } } // namespace caffe
1d988eb6c9d474920cf808afbf5e821e576ed2cd.cu
#include <vector> #include <algorithm> #include <cmath> #include "../util/math_functions.hpp" #include "./lstm_layer_Junhyuk.hpp" namespace caffe { __device__ real_t sigmoid(const real_t x) { return real_t(1) / (real_t(1) + exp(-x)); } __device__ real_t tanh(const real_t x) { return real_t(2) * sigmoid(real_t(2) * x) - real_t(1); } __global__ void ClipAdd(const int nthreads, const int dim, int t, const real_t* clip, const real_t* add_vec, real_t* data) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / dim; const real_t clip_t = clip ? clip[n] : real_t(t > 0); data[index] += clip_t * add_vec[index]; } } __global__ void ActivationForward(const int nthreads, const int H, const real_t* pre_gate, real_t* gate) { CUDA_KERNEL_LOOP(index, nthreads) { const int d = index % (4*H); gate[index] = d < 3*H ? sigmoid(pre_gate[index]) : tanh(pre_gate[index]); } } __global__ void LSTMForward(const int nthreads, const int H, const int t, const real_t* c_prev, const real_t* gate, const real_t* clip, real_t* c_t, real_t* h_t) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / H; const int d = index % H; const real_t* offset = gate + 4*H*n; const real_t i_t = offset[d]; const real_t f_t = offset[H + d]; const real_t o_t = offset[2*H + d]; const real_t g_t = offset[3*H + d]; const real_t c_t_1 = c_prev[index]; const real_t clip_t = clip ? clip[n] : real_t(t > 0); c_t[index] = clip_t * f_t * c_t_1 + i_t * g_t; h_t[index] = o_t * tanh(c_t[index]); } } __global__ void LSTMBackward(const int nthreads, const int H, const int t, const real_t* c_prev, const real_t* gate, const real_t* c_t, const real_t* clip, real_t* dc_t, const real_t* dh_t, real_t* dc_prev, real_t* gate_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / H; const int d = index % H; const real_t* gate_t = gate + 4*H*n; const real_t i_t = gate_t[d]; const real_t f_t = gate_t[H + d]; const real_t o_t = gate_t[2*H + d]; const real_t g_t = gate_t[3*H + d]; const real_t c_t_1 = c_prev[index]; const real_t c = c_t[index]; const real_t tanh_c = tanh(c); const real_t clip_t = clip ? clip[n] : real_t(t > 0); real_t* dc_t_1 = dc_prev + index; real_t* gate_diff_t = gate_diff + 4*H*n; real_t* di_t = gate_diff_t + d; real_t* df_t = gate_diff_t + H + d; real_t* do_t = gate_diff_t + 2*H + d; real_t* dg_t = gate_diff_t + 3*H + d; // Output gate : tanh(c(t)) * h_diff(t) *do_t = dh_t[index] * tanh_c; // Cell state : o(t) * tanh'(c(t)) * h_diff(t) + f(t+1) * c_diff(t+1) dc_t[index] += dh_t[index] * o_t * (real_t(1) - tanh_c * tanh_c); // c_diff(t-1) += f(t) * c_diff(t) *dc_t_1 = clip_t * dc_t[index] * f_t; // Forget gate : c(t-1) * c_diff(t) *df_t = clip_t * dc_t[index] * c_t_1; // Input gate : g(t) * c_diff(t) *di_t = dc_t[index] * g_t; // Input modulation gate : i(t) * c_diff(t) *dg_t = dc_t[index] * i_t; } } __global__ void ActivationBackward(const int nthreads, const int H, const real_t clip_threshold, const real_t* gate, const real_t* gate_diff, real_t* pre_gate_diff) { CUDA_KERNEL_LOOP(index, nthreads) { const int d = index % (4 * H); const real_t gate_val = gate[index]; if (d < 3 * H) { pre_gate_diff[index] = gate_diff[index] * gate_val * (real_t(1) - gate_val); } else { pre_gate_diff[index] = gate_diff[index] * (real_t(1) - gate_val * gate_val); } if (clip_threshold > real_t(0)) { if (pre_gate_diff[index] < -clip_threshold) { pre_gate_diff[index] = -clip_threshold; } else if (pre_gate_diff[index] > clip_threshold) { pre_gate_diff[index] = clip_threshold; } } } } void LstmLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { CHECK_EQ(top[0]->gpu_data(), top_.gpu_data()); real_t* top_data = top_.mutable_gpu_data(); const real_t* bottom_data = bottom[0]->gpu_data(); const real_t* clip = NULL; if (bottom.size() > 1) { clip = bottom[1]->gpu_data(); CHECK_EQ(bottom[1]->num(), bottom[1]->count()); } const real_t* weight_i = this->blobs_[0]->gpu_data(); const real_t* weight_h = this->blobs_[1]->gpu_data(); const real_t* bias = this->blobs_[2]->gpu_data(); real_t* pre_gate_data = pre_gate_.mutable_gpu_data(); real_t* gate_data = gate_.mutable_gpu_data(); real_t* cell_data = cell_.mutable_gpu_data(); // Initialize previous state if (clip) { caffe_copy(c_0_.count(), c_T_.gpu_data(), c_0_.mutable_gpu_data()); caffe_copy(h_0_.count(), h_T_.gpu_data(), h_0_.mutable_gpu_data()); } else { caffe_gpu_set(c_0_.count(), real_t(0.), c_0_.mutable_gpu_data()); caffe_gpu_set(h_0_.count(), real_t(0.), h_0_.mutable_gpu_data()); } // Compute input to hidden forward propagation caffe_gpu_gemm(CblasNoTrans, CblasTrans, T_*N_, 4*H_, I_, real_t(1.), bottom_data, weight_i, real_t(0.), pre_gate_data); caffe_gpu_gemm(CblasNoTrans, CblasNoTrans, T_*N_, 4*H_, 1, real_t(1.), bias_multiplier_.gpu_data(), bias, real_t(1.), pre_gate_data); // Compute recurrent forward propagation for (int t = 0; t < T_; ++t) { real_t* h_t = top_data + top_.offset(t); real_t* c_t = cell_data + cell_.offset(t); real_t* pre_gate_t = pre_gate_data + pre_gate_.offset(t); real_t* gate_t = gate_data + gate_.offset(t); const real_t* clip_t = clip ? clip + bottom[1]->offset(t) : NULL; const real_t* h_t_1 = t > 0 ? (h_t - top_.offset(1)) : h_0_.gpu_data(); const real_t* c_t_1 = t > 0 ? (c_t - cell_.offset(1)) : c_0_.gpu_data(); caffe_gpu_gemm(CblasNoTrans, CblasTrans, N_, 4*H_, H_, real_t(1.), h_t_1, weight_h, real_t(0.), h_to_gate_.mutable_gpu_data()); ClipAdd<<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>( 4*N_*H_, 4*H_, t, clip_t, h_to_gate_.gpu_data(), pre_gate_t); CUDA_POST_KERNEL_CHECK; ActivationForward<<<CAFFE_GET_BLOCKS(4*N_*H_), CAFFE_CUDA_NUM_THREADS>>>( 4*N_*H_, H_, pre_gate_t, gate_t); CUDA_POST_KERNEL_CHECK; LSTMForward<<<CAFFE_GET_BLOCKS(N_*H_), CAFFE_CUDA_NUM_THREADS>>>( N_*H_, H_, t, c_t_1, gate_t, clip_t, c_t, h_t); CUDA_POST_KERNEL_CHECK; } // Preserve cell state and output value for truncated BPTT caffe_copy(N_*H_, cell_data + cell_.offset(T_-1), c_T_.mutable_gpu_data()); caffe_copy(N_*H_, top_data + top_.offset(T_-1), h_T_.mutable_gpu_data()); } } // namespace caffe
506b947e67cf9fbe514ad4f7489d738ba93ca9c5.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_scan_cuda #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #include <hip/hip_fp16.h> #endif #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; typedef Tensor<float, 1>::DimensionPair DimPair; template<int DataLayout> void test_cuda_cumsum(int m_size, int k_size, int n_size) { std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl; Tensor<float, 3, DataLayout> t_input(m_size, k_size, n_size); Tensor<float, 3, DataLayout> t_result(m_size, k_size, n_size); Tensor<float, 3, DataLayout> t_result_gpu(m_size, k_size, n_size); t_input.setRandom(); std::size_t t_input_bytes = t_input.size() * sizeof(float); std::size_t t_result_bytes = t_result.size() * sizeof(float); float* d_t_input; float* d_t_result; hipMalloc((void**)(&d_t_input), t_input_bytes); hipMalloc((void**)(&d_t_result), t_result_bytes); hipMemcpy(d_t_input, t_input.data(), t_input_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> > gpu_t_input(d_t_input, Eigen::array<int, 3>(m_size, k_size, n_size)); Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> > gpu_t_result(d_t_result, Eigen::array<int, 3>(m_size, k_size, n_size)); gpu_t_result.device(gpu_device) = gpu_t_input.cumsum(1); t_result = t_input.cumsum(1); hipMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, hipMemcpyDeviceToHost); for (size_t i = 0; i < t_result.size(); i++) { if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) { continue; } if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) { continue; } std::cout << "mismatch detected at index " << i << ": " << t_result(i) << " vs " << t_result_gpu(i) << std::endl; assert(false); } hipFree((void*)d_t_input); hipFree((void*)d_t_result); } void test_cxx11_tensor_scan_cuda() { CALL_SUBTEST_1(test_cuda_cumsum<ColMajor>(128, 128, 128)); CALL_SUBTEST_2(test_cuda_cumsum<RowMajor>(128, 128, 128)); }
506b947e67cf9fbe514ad4f7489d738ba93ca9c5.cu
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_NO_COMPLEX #define EIGEN_TEST_FUNC cxx11_tensor_scan_cuda #define EIGEN_DEFAULT_DENSE_INDEX_TYPE int #define EIGEN_USE_GPU #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #include <cuda_fp16.h> #endif #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; typedef Tensor<float, 1>::DimensionPair DimPair; template<int DataLayout> void test_cuda_cumsum(int m_size, int k_size, int n_size) { std::cout << "Testing for (" << m_size << "," << k_size << "," << n_size << ")" << std::endl; Tensor<float, 3, DataLayout> t_input(m_size, k_size, n_size); Tensor<float, 3, DataLayout> t_result(m_size, k_size, n_size); Tensor<float, 3, DataLayout> t_result_gpu(m_size, k_size, n_size); t_input.setRandom(); std::size_t t_input_bytes = t_input.size() * sizeof(float); std::size_t t_result_bytes = t_result.size() * sizeof(float); float* d_t_input; float* d_t_result; cudaMalloc((void**)(&d_t_input), t_input_bytes); cudaMalloc((void**)(&d_t_result), t_result_bytes); cudaMemcpy(d_t_input, t_input.data(), t_input_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> > gpu_t_input(d_t_input, Eigen::array<int, 3>(m_size, k_size, n_size)); Eigen::TensorMap<Eigen::Tensor<float, 3, DataLayout> > gpu_t_result(d_t_result, Eigen::array<int, 3>(m_size, k_size, n_size)); gpu_t_result.device(gpu_device) = gpu_t_input.cumsum(1); t_result = t_input.cumsum(1); cudaMemcpy(t_result_gpu.data(), d_t_result, t_result_bytes, cudaMemcpyDeviceToHost); for (size_t i = 0; i < t_result.size(); i++) { if (fabs(t_result(i) - t_result_gpu(i)) < 1e-4f) { continue; } if (Eigen::internal::isApprox(t_result(i), t_result_gpu(i), 1e-4f)) { continue; } std::cout << "mismatch detected at index " << i << ": " << t_result(i) << " vs " << t_result_gpu(i) << std::endl; assert(false); } cudaFree((void*)d_t_input); cudaFree((void*)d_t_result); } void test_cxx11_tensor_scan_cuda() { CALL_SUBTEST_1(test_cuda_cumsum<ColMajor>(128, 128, 128)); CALL_SUBTEST_2(test_cuda_cumsum<RowMajor>(128, 128, 128)); }
46117fb9f4300ebf2fcfd53c4cd463345f31e90a.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "stdio.h" #include <vector> #include <hip/hip_runtime.h> #include "TROOT.h" #include "TApplication.h" #include "TSystem.h" #include "TMinuit.h" #include "TRandom3.h" #include "TVectorT.h" #include "TCanvas.h" #include "TH1D.h" #include "TF1.h" #include "math.h" //#include "cuPrintf.hip" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> /* * ### Description for myself ### * * ### ATTENTION: Following description is for an old version of this file. It incorpoorated a general unary_function thingy. Since it didn't work (workaround involved static_cast andor templates!) I skipped this part. ### * * I want to generalize the function, which is used for fitting. At least a bit. * Since Minuit->SetFCN is called with &FitFcn I have to change FitFcn: * # Instead of my old GaussianFunctor() I use a general, dereferenced function pointer. This one is set in the main method to my new GaussionFunctor substitute SumFunctor() (fStruct = new SumFunctor()). * # SumFunctor() ... * + inherits from thrust::unary_function and has to, in order to declare it generally in the top of this file, use it as an abstract function in FitFcn() and define it specifically in the main method. * + is the central thingy of this fit. * + it does compiler workaround stuff to retrieve the functions I'd like to use for fitting (which acutally is the old gaussion, just wrapped in a confusing present box) * + it retrieves the important parameter values from the global dev_params[] array * + it does the calculation invoking the workaround-retrieved gauss_functions * + it returns the value and * + has two public member void pointers which are needed for compiler-workaround stuff * # Compiler workaround stuff ... * + the old dev_gaussian can't be used in this new method, because IDONTKNOW - anyway a pointer to the old dev_gaussian has to be used (a function pointer to be precise) * + workaround is, * - to make a function pointer to it (declare this in head, fill it in main to skip compiler errors) * - pass dev_gaussion as a void pointer to the device (since function pointer won't work, yay another workaround) by setting SumFunctor()s func1 and func2 to the pointers to dev_gaussian * - reinterpret the void pointers to dev_gaussians as function pointers using reinterpret_cast template * - dereference them and invoke them with the three, good old variables x, mean and sigma; this is so easy because our function pointer typedef is just declared in the manner that the dev_gaussian method call (double, double, double) will be of the same structur * + sadly, a simple SumFunctor()->func1 = &dev_gauss, as I would think of, wouldn't work, so there's another strange workaround involving cudaMemcpyFROMSymbol (!) * * * ### End of Description ### */ // helper function TVectorD stdVectorToRootVector (std::vector<double> vector) { TVectorD tempVector(vector.size()); for (unsigned int i = 0; i < vector.size(); i++) tempVector[i] = vector[i]; return tempVector; } __constant__ __device__ double dev_params[5]; thrust::device_vector<double>* d_theEvents; __device__ double dev_gaussian (double x, double mean, double sigma) { return exp(-0.5*pow((x - mean)/sigma, 2)) / (sigma * sqrt(2 * M_PI)); } typedef double(*dev_function_pointer)(double, double, double); __device__ dev_function_pointer pointer_to_gaussian = dev_gaussian; struct SumFunctor : public thrust::unary_function<double, double> { __device__ double operator() (double x) { dev_function_pointer f1 = reinterpret_cast<dev_function_pointer>(func1); dev_function_pointer f2 = reinterpret_cast<dev_function_pointer>(func2); double weight1 = dev_params[0]; double mean1 = dev_params[1]; double sigma1 = dev_params[2]; double mean2 = dev_params[3]; double sigma2 = dev_params[4]; double first_gauss = weight1 * (*f1)(x, mean1, sigma1); double second_gauss = (1-weight1) * (*f2)(x, mean2, sigma2); return -2 * log(first_gauss + second_gauss); } void* func1; void* func2; }; SumFunctor* fStruct = 0; void dev_FitFcn (int& npar, double* deriv, double& fun, double* param, int flg) { hipMemcpyToSymbol("dev_params", param, npar*sizeof(double), 0, hipMemcpyHostToDevice); fun = thrust::transform_reduce(d_theEvents->begin(), d_theEvents->end(), (*fStruct), 0.0, thrust::plus<double>()); // std::cout << fun << std::endl; // DEBUG } int main(int argc, char** argv) { // gSystem->Load("libMinuit"); std::cout << "############################" << std::endl << "## You're lucky! Because of the default TMinuit output into the shell, I implemented a bunch of line separators!" << std::endl << "############################" << std::endl << std::endl; int sizeOfVector = 10000; if (argc > 1) sizeOfVector = atoi(argv[1]); TRandom3 myRandom(23); double myMean1 = 1.7; double mySigma1 = 0.2; double myMean2 = 3; double mySigma2 = 0.6; double myG1DrawProbability = 0.58; // (0,1] std::cout << "Mean1 = " << myMean1 << ", mySigma1 = " << mySigma1 << ", myMean2 = " << myMean2 << ", myMean2 = " << myMean2 << ", weight1 = " << myG1DrawProbability << std::endl; std::vector<double> theEvents; for (int i = 0; i < sizeOfVector; i++) { if (myRandom.Uniform() <= myG1DrawProbability) { theEvents.push_back(myRandom.Gaus(myMean1, mySigma1)); } else { theEvents.push_back(myRandom.Gaus(myMean2, mySigma2)); } } thrust::device_vector<double> d_localEvents(theEvents); d_theEvents = &d_localEvents; TMinuit minuit(5); std::cout << "## TMINUIT:: Defining parameters ##" << std::endl; // DefineParameter syntax is: // int paramter number, // char parmeter name, // double initial value, // double initial error, // double lower limit, // double upper limit minuit.DefineParameter(0, "weight1", 0.5, 0.01, 0., 1.); minuit.DefineParameter(1, "mean1", myMean1, 0.1, myMean1-1, myMean1+1); // add +-1 for uncertainties minuit.DefineParameter(2, "sigma1", mySigma1, 0.1, mySigma1-1, mySigma1+1); minuit.DefineParameter(3, "mean2", myMean2, 0.1, myMean2-1, myMean2+1); minuit.DefineParameter(4, "sigma3", mySigma2, 0.1, mySigma2-1, mySigma2+1); fStruct = new SumFunctor(); // trick hipMemcpyFromSymbol((void**) &(fStruct->func1), "pointer_to_gaussian", sizeof(void*), 0, hipMemcpyDeviceToHost); hipMemcpyFromSymbol((void**) &(fStruct->func2), "pointer_to_gaussian", sizeof(void*), 0, hipMemcpyDeviceToHost); std::cout << "## TMINUIT:: Setting Function ##" << std::endl; minuit.SetFCN(&dev_FitFcn); std::cout << "## TMINUIT:: Calling Migrad() ##" << std::endl; minuit.Migrad(); /* ############ * ### VISUALIZATION and OUTPUT ### */ TH1D * secondHist = new TH1D("secondHist", "Titel", 100, 1, 4); for (int i = 0; i < theEvents.size(); i++) secondHist->Fill(theEvents[i]); TF1 * g1 = new TF1("g1", "gaus(0)+gaus(3)", 1, 4); std::vector<double> theReturnedParameters; // Errors are not saved for (int i = 0; i < 5; i++) { double tempVal1, tempVal2; minuit.GetParameter(i, tempVal1, tempVal2); theReturnedParameters.push_back(tempVal1); // std::cout << "### Pushed back parameter " << i << " = " << tempVal1 << std::endl; // DEBUG } for (int i = 0; i < 5; i++) { int k = i; if (i > 2) k = i + 1; // std::cout << "### k = " << k << " - param[k] = " << theReturnedParameters[i] << std::endl; // DEBUG g1->SetParameter(k, theReturnedParameters[i]); } g1->SetParameter(3, 1 - theReturnedParameters[0]); // second weight std::cout << "### Deviation of fitted parameters to original values:" << std::endl; std::cout << "### Delta_Weight1 = " << theReturnedParameters[0] << " - " << myG1DrawProbability << " = " << theReturnedParameters[0] - myG1DrawProbability << std::endl; std::cout << "### Delta_Mean1 = " << theReturnedParameters[1] << " - " << myMean1 << " = " << theReturnedParameters[1] - myMean1 << std::endl; std::cout << "### Delta Sigma1 = " << theReturnedParameters[2] << " - " << mySigma1 << " = " << theReturnedParameters[2] - mySigma1 << std::endl; std::cout << "### Delta_Mean2 = " << theReturnedParameters[3] << " - " << myMean2 << " = " << theReturnedParameters[3] - myMean2 << std::endl; std::cout << "### Delta Sigma2 = " << theReturnedParameters[4] << " - " << mySigma2 << " = " << theReturnedParameters[4] - mySigma2 << std::endl; g1->SetParameter(0, 0.6); g1->SetLineColor(kBlue); TApplication *theApp = new TApplication("app", &argc, argv, 0, -1); TCanvas * c1 = new TCanvas("c1", "default", 100, 10, 800, 600); secondHist->Scale(1/secondHist->Integral()*4*M_PI*M_PI); secondHist->Draw("hist"); g1->Draw("SAME"); // g1->Print(); // DEBUG c1->Update(); theApp->Run(); }
46117fb9f4300ebf2fcfd53c4cd463345f31e90a.cu
#include <iostream> #include "stdio.h" #include <vector> #include <cuda.h> #include "TROOT.h" #include "TApplication.h" #include "TSystem.h" #include "TMinuit.h" #include "TRandom3.h" #include "TVectorT.h" #include "TCanvas.h" #include "TH1D.h" #include "TF1.h" #include "math.h" //#include "cuPrintf.cu" #include <thrust/device_vector.h> #include <thrust/host_vector.h> #include <thrust/functional.h> #include <thrust/transform_reduce.h> /* * ### Description for myself ### * * ### ATTENTION: Following description is for an old version of this file. It incorpoorated a general unary_function thingy. Since it didn't work (workaround involved static_cast andor templates!) I skipped this part. ### * * I want to generalize the function, which is used for fitting. At least a bit. * Since Minuit->SetFCN is called with &FitFcn I have to change FitFcn: * # Instead of my old GaussianFunctor() I use a general, dereferenced function pointer. This one is set in the main method to my new GaussionFunctor substitute SumFunctor() (fStruct = new SumFunctor()). * # SumFunctor() ... * + inherits from thrust::unary_function and has to, in order to declare it generally in the top of this file, use it as an abstract function in FitFcn() and define it specifically in the main method. * + is the central thingy of this fit. * + it does compiler workaround stuff to retrieve the functions I'd like to use for fitting (which acutally is the old gaussion, just wrapped in a confusing present box) * + it retrieves the important parameter values from the global dev_params[] array * + it does the calculation invoking the workaround-retrieved gauss_functions * + it returns the value and * + has two public member void pointers which are needed for compiler-workaround stuff * # Compiler workaround stuff ... * + the old dev_gaussian can't be used in this new method, because IDONTKNOW - anyway a pointer to the old dev_gaussian has to be used (a function pointer to be precise) * + workaround is, * - to make a function pointer to it (declare this in head, fill it in main to skip compiler errors) * - pass dev_gaussion as a void pointer to the device (since function pointer won't work, yay another workaround) by setting SumFunctor()s func1 and func2 to the pointers to dev_gaussian * - reinterpret the void pointers to dev_gaussians as function pointers using reinterpret_cast template * - dereference them and invoke them with the three, good old variables x, mean and sigma; this is so easy because our function pointer typedef is just declared in the manner that the dev_gaussian method call (double, double, double) will be of the same structur * + sadly, a simple SumFunctor()->func1 = &dev_gauss, as I would think of, wouldn't work, so there's another strange workaround involving cudaMemcpyFROMSymbol (!) * * * ### End of Description ### */ // helper function TVectorD stdVectorToRootVector (std::vector<double> vector) { TVectorD tempVector(vector.size()); for (unsigned int i = 0; i < vector.size(); i++) tempVector[i] = vector[i]; return tempVector; } __constant__ __device__ double dev_params[5]; thrust::device_vector<double>* d_theEvents; __device__ double dev_gaussian (double x, double mean, double sigma) { return exp(-0.5*pow((x - mean)/sigma, 2)) / (sigma * sqrt(2 * M_PI)); } typedef double(*dev_function_pointer)(double, double, double); __device__ dev_function_pointer pointer_to_gaussian = dev_gaussian; struct SumFunctor : public thrust::unary_function<double, double> { __device__ double operator() (double x) { dev_function_pointer f1 = reinterpret_cast<dev_function_pointer>(func1); dev_function_pointer f2 = reinterpret_cast<dev_function_pointer>(func2); double weight1 = dev_params[0]; double mean1 = dev_params[1]; double sigma1 = dev_params[2]; double mean2 = dev_params[3]; double sigma2 = dev_params[4]; double first_gauss = weight1 * (*f1)(x, mean1, sigma1); double second_gauss = (1-weight1) * (*f2)(x, mean2, sigma2); return -2 * log(first_gauss + second_gauss); } void* func1; void* func2; }; SumFunctor* fStruct = 0; void dev_FitFcn (int& npar, double* deriv, double& fun, double* param, int flg) { cudaMemcpyToSymbol("dev_params", param, npar*sizeof(double), 0, cudaMemcpyHostToDevice); fun = thrust::transform_reduce(d_theEvents->begin(), d_theEvents->end(), (*fStruct), 0.0, thrust::plus<double>()); // std::cout << fun << std::endl; // DEBUG } int main(int argc, char** argv) { // gSystem->Load("libMinuit"); std::cout << "############################" << std::endl << "## You're lucky! Because of the default TMinuit output into the shell, I implemented a bunch of line separators!" << std::endl << "############################" << std::endl << std::endl; int sizeOfVector = 10000; if (argc > 1) sizeOfVector = atoi(argv[1]); TRandom3 myRandom(23); double myMean1 = 1.7; double mySigma1 = 0.2; double myMean2 = 3; double mySigma2 = 0.6; double myG1DrawProbability = 0.58; // (0,1] std::cout << "Mean1 = " << myMean1 << ", mySigma1 = " << mySigma1 << ", myMean2 = " << myMean2 << ", myMean2 = " << myMean2 << ", weight1 = " << myG1DrawProbability << std::endl; std::vector<double> theEvents; for (int i = 0; i < sizeOfVector; i++) { if (myRandom.Uniform() <= myG1DrawProbability) { theEvents.push_back(myRandom.Gaus(myMean1, mySigma1)); } else { theEvents.push_back(myRandom.Gaus(myMean2, mySigma2)); } } thrust::device_vector<double> d_localEvents(theEvents); d_theEvents = &d_localEvents; TMinuit minuit(5); std::cout << "## TMINUIT:: Defining parameters ##" << std::endl; // DefineParameter syntax is: // int paramter number, // char parmeter name, // double initial value, // double initial error, // double lower limit, // double upper limit minuit.DefineParameter(0, "weight1", 0.5, 0.01, 0., 1.); minuit.DefineParameter(1, "mean1", myMean1, 0.1, myMean1-1, myMean1+1); // add +-1 for uncertainties minuit.DefineParameter(2, "sigma1", mySigma1, 0.1, mySigma1-1, mySigma1+1); minuit.DefineParameter(3, "mean2", myMean2, 0.1, myMean2-1, myMean2+1); minuit.DefineParameter(4, "sigma3", mySigma2, 0.1, mySigma2-1, mySigma2+1); fStruct = new SumFunctor(); // trick cudaMemcpyFromSymbol((void**) &(fStruct->func1), "pointer_to_gaussian", sizeof(void*), 0, cudaMemcpyDeviceToHost); cudaMemcpyFromSymbol((void**) &(fStruct->func2), "pointer_to_gaussian", sizeof(void*), 0, cudaMemcpyDeviceToHost); std::cout << "## TMINUIT:: Setting Function ##" << std::endl; minuit.SetFCN(&dev_FitFcn); std::cout << "## TMINUIT:: Calling Migrad() ##" << std::endl; minuit.Migrad(); /* ############ * ### VISUALIZATION and OUTPUT ### */ TH1D * secondHist = new TH1D("secondHist", "Titel", 100, 1, 4); for (int i = 0; i < theEvents.size(); i++) secondHist->Fill(theEvents[i]); TF1 * g1 = new TF1("g1", "gaus(0)+gaus(3)", 1, 4); std::vector<double> theReturnedParameters; // Errors are not saved for (int i = 0; i < 5; i++) { double tempVal1, tempVal2; minuit.GetParameter(i, tempVal1, tempVal2); theReturnedParameters.push_back(tempVal1); // std::cout << "### Pushed back parameter " << i << " = " << tempVal1 << std::endl; // DEBUG } for (int i = 0; i < 5; i++) { int k = i; if (i > 2) k = i + 1; // std::cout << "### k = " << k << " - param[k] = " << theReturnedParameters[i] << std::endl; // DEBUG g1->SetParameter(k, theReturnedParameters[i]); } g1->SetParameter(3, 1 - theReturnedParameters[0]); // second weight std::cout << "### Deviation of fitted parameters to original values:" << std::endl; std::cout << "### Delta_Weight1 = " << theReturnedParameters[0] << " - " << myG1DrawProbability << " = " << theReturnedParameters[0] - myG1DrawProbability << std::endl; std::cout << "### Delta_Mean1 = " << theReturnedParameters[1] << " - " << myMean1 << " = " << theReturnedParameters[1] - myMean1 << std::endl; std::cout << "### Delta Sigma1 = " << theReturnedParameters[2] << " - " << mySigma1 << " = " << theReturnedParameters[2] - mySigma1 << std::endl; std::cout << "### Delta_Mean2 = " << theReturnedParameters[3] << " - " << myMean2 << " = " << theReturnedParameters[3] - myMean2 << std::endl; std::cout << "### Delta Sigma2 = " << theReturnedParameters[4] << " - " << mySigma2 << " = " << theReturnedParameters[4] - mySigma2 << std::endl; g1->SetParameter(0, 0.6); g1->SetLineColor(kBlue); TApplication *theApp = new TApplication("app", &argc, argv, 0, -1); TCanvas * c1 = new TCanvas("c1", "default", 100, 10, 800, 600); secondHist->Scale(1/secondHist->Integral()*4*M_PI*M_PI); secondHist->Draw("hist"); g1->Draw("SAME"); // g1->Print(); // DEBUG c1->Update(); theApp->Run(); }
1789ff2294ac669137b9ca06e06b2cc6dccb1bd2.hip
// !!! This is a file automatically generated by hipify!!! /* * PRIME NUMBERS GENERATOR * * Last name: RIVERA RUIZ * First name: DANIEL * Net ID: drr342 * */ #include <hip/hip_runtime.h> #include <stdlib.h> #include <stdio.h> #define min(X, Y) ((X) < (Y) ? (X) : (Y)) __global__ void calculate (int *, int); void toFile(int *, int); int main(int argc, char * argv[]) { int N = atoi(argv[1]); size_t size = sizeof(int) * (N - 1); int * primes = (int*) malloc(size); int * dPrimes; hipMalloc(&dPrimes, size); hipMemset(dPrimes, 0, size); dim3 dimGrid(ceil(N / 1000.0)); dim3 dimBlock(min(N, 1000)); hipLaunchKernelGGL(( calculate), dim3(dimGrid), dim3(dimBlock), 0, 0, dPrimes, N); hipMemcpy(primes, dPrimes, size, hipMemcpyDeviceToHost); toFile(primes, N); free(primes); hipFree(dPrimes); return 0; } __global__ void calculate(int * dPrimes, int N) { int threadId = blockIdx.x * blockDim.x + threadIdx.x + 2; int mult = 2 * threadId; while (mult <= N) { dPrimes[mult - 2] = 1; mult += threadId; } } void toFile(int * primes, int N) { char * fileName = (char*) malloc(13 * sizeof(char)); sprintf(fileName, "%d.txt", N); FILE * fp; fp = fopen(fileName,"w"); for (int i = 0; i < N - 1; i++) { if (primes[i] == 0) fprintf(fp, "%d ", i + 2); } fprintf(fp, "\n"); fclose(fp); free(fileName); }
1789ff2294ac669137b9ca06e06b2cc6dccb1bd2.cu
/* * PRIME NUMBERS GENERATOR * * Last name: RIVERA RUIZ * First name: DANIEL * Net ID: drr342 * */ #include <cuda.h> #include <stdlib.h> #include <stdio.h> #define min(X, Y) ((X) < (Y) ? (X) : (Y)) __global__ void calculate (int *, int); void toFile(int *, int); int main(int argc, char * argv[]) { int N = atoi(argv[1]); size_t size = sizeof(int) * (N - 1); int * primes = (int*) malloc(size); int * dPrimes; cudaMalloc(&dPrimes, size); cudaMemset(dPrimes, 0, size); dim3 dimGrid(ceil(N / 1000.0)); dim3 dimBlock(min(N, 1000)); calculate<<<dimGrid, dimBlock>>>(dPrimes, N); cudaMemcpy(primes, dPrimes, size, cudaMemcpyDeviceToHost); toFile(primes, N); free(primes); cudaFree(dPrimes); return 0; } __global__ void calculate(int * dPrimes, int N) { int threadId = blockIdx.x * blockDim.x + threadIdx.x + 2; int mult = 2 * threadId; while (mult <= N) { dPrimes[mult - 2] = 1; mult += threadId; } } void toFile(int * primes, int N) { char * fileName = (char*) malloc(13 * sizeof(char)); sprintf(fileName, "%d.txt", N); FILE * fp; fp = fopen(fileName,"w"); for (int i = 0; i < N - 1; i++) { if (primes[i] == 0) fprintf(fp, "%d ", i + 2); } fprintf(fp, "\n"); fclose(fp); free(fileName); }
4c88ce12bfd665730b5ee3495a0b7f53f3328d55.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/fil/fil.h> #include "benchmark.cuh" #include <cuml/common/logger.hpp> #include <cuml/ensemble/randomforest.hpp> #include <cuml/tree/algo_helper.h> #include <treelite/c_api.h> #include <treelite/tree.h> #include <utility> namespace ML { namespace Bench { namespace fil { struct Params { DatasetParams data; RegressionParams blobs; ModelHandle model; ML::fil::storage_type_t storage; ML::fil::algo_t algo; RF_params rf; int predict_repetitions; }; class FIL : public RegressionFixture<float> { typedef RegressionFixture<float> Base; public: FIL(const std::string& name, const Params& p) /* fitting to linear combinations in "y" normally yields trees that check values of all significant columns, as well as their linear combinations in "X". During inference, the exact threshold values do not affect speed. The distribution of column popularity does not affect speed barring lots of uninformative columns in succession. Hence, this method represents real datasets well enough for both classification and regression. */ : RegressionFixture<float>(name, p.data, p.blobs), model(p.model), p_rest(p) { } static void regression_to_classification(float* y, int nrows, int nclasses, hipStream_t stream) { raft::linalg::unaryOp( y, y, nrows, [=] __device__(float a) { return float(lroundf(fabsf(a) * 1000. * nclasses) % nclasses); }, stream); } protected: void runBenchmark(::benchmark::State& state) override { if (!params.rowMajor) { state.SkipWithError("FIL only supports row-major inputs"); } if (params.nclasses > 1) { // convert regression ranges into [0..nclasses-1] regression_to_classification(data.y.data(), params.nrows, params.nclasses, stream); } // create model ML::RandomForestRegressorF rf_model; auto* mPtr = &rf_model; size_t train_nrows = ::min(params.nrows, 1000); fit(*handle, mPtr, data.X.data(), train_nrows, params.ncols, data.y.data(), p_rest.rf); handle->sync_stream(stream); ML::build_treelite_forest(&model, &rf_model, params.ncols); ML::fil::treelite_params_t tl_params = { .algo = p_rest.algo, .output_class = params.nclasses > 1, // cuML RF forest .threshold = 1.f / params.nclasses, // Fixture::DatasetParams .storage_type = p_rest.storage, .blocks_per_sm = 8, .threads_per_tree = 1, .n_items = 0, .pforest_shape_str = nullptr}; ML::fil::forest_variant forest_variant; ML::fil::from_treelite(*handle, &forest_variant, model, &tl_params); forest = std::get<ML::fil::forest_t<float>>(forest_variant); // only time prediction this->loopOnState(state, [this]() { // Dataset<D, L> allocates y assuming one output value per input row, // so not supporting predict_proba yet for (int i = 0; i < p_rest.predict_repetitions; i++) { ML::fil::predict(*this->handle, this->forest, this->data.y.data(), this->data.X.data(), this->params.nrows, false); } }); } void allocateBuffers(const ::benchmark::State& state) override { Base::allocateBuffers(state); } void deallocateBuffers(const ::benchmark::State& state) override { ML::fil::free(*handle, forest); Base::deallocateBuffers(state); } private: ML::fil::forest_t<float> forest; ModelHandle model; Params p_rest; }; struct FilBenchParams { int nrows; int ncols; int nclasses; int max_depth; int ntrees; ML::fil::storage_type_t storage; ML::fil::algo_t algo; }; std::vector<Params> getInputs() { std::vector<Params> out; Params p; p.data.rowMajor = true; p.blobs = {.n_informative = -1, // Just a placeholder value, anyway changed below .effective_rank = -1, // Just a placeholder value, anyway changed below .bias = 0.f, .tail_strength = 0.1, .noise = 0.01, .shuffle = false, .seed = 12345ULL}; p.rf = set_rf_params(10, /*max_depth */ (1 << 20), /* max_leaves */ 1.f, /* max_features */ 32, /* max_n_bins */ 3, /* min_samples_leaf */ 3, /* min_samples_split */ 0.0f, /* min_impurity_decrease */ true, /* bootstrap */ 1, /* n_trees */ 1.f, /* max_samples */ 1234ULL, /* seed */ ML::CRITERION::MSE, /* split_criterion */ 8, /* n_streams */ 128 /* max_batch_size */ ); using ML::fil::algo_t; using ML::fil::storage_type_t; std::vector<FilBenchParams> var_params = { {(int)1e6, 20, 1, 5, 1000, storage_type_t::DENSE, algo_t::BATCH_TREE_REORG}, {(int)1e6, 20, 2, 5, 1000, storage_type_t::DENSE, algo_t::BATCH_TREE_REORG}}; for (auto& i : var_params) { p.data.nrows = i.nrows; p.data.ncols = i.ncols; p.blobs.n_informative = i.ncols / 3; p.blobs.effective_rank = i.ncols / 3; p.data.nclasses = i.nclasses; p.rf.tree_params.max_depth = i.max_depth; p.rf.n_trees = i.ntrees; p.storage = i.storage; p.algo = i.algo; p.predict_repetitions = 10; out.push_back(p); } return out; } ML_BENCH_REGISTER(Params, FIL, "", getInputs()); } // end namespace fil } // end namespace Bench } // end namespace ML
4c88ce12bfd665730b5ee3495a0b7f53f3328d55.cu
/* * Copyright (c) 2019-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cuml/fil/fil.h> #include "benchmark.cuh" #include <cuml/common/logger.hpp> #include <cuml/ensemble/randomforest.hpp> #include <cuml/tree/algo_helper.h> #include <treelite/c_api.h> #include <treelite/tree.h> #include <utility> namespace ML { namespace Bench { namespace fil { struct Params { DatasetParams data; RegressionParams blobs; ModelHandle model; ML::fil::storage_type_t storage; ML::fil::algo_t algo; RF_params rf; int predict_repetitions; }; class FIL : public RegressionFixture<float> { typedef RegressionFixture<float> Base; public: FIL(const std::string& name, const Params& p) /* fitting to linear combinations in "y" normally yields trees that check values of all significant columns, as well as their linear combinations in "X". During inference, the exact threshold values do not affect speed. The distribution of column popularity does not affect speed barring lots of uninformative columns in succession. Hence, this method represents real datasets well enough for both classification and regression. */ : RegressionFixture<float>(name, p.data, p.blobs), model(p.model), p_rest(p) { } static void regression_to_classification(float* y, int nrows, int nclasses, cudaStream_t stream) { raft::linalg::unaryOp( y, y, nrows, [=] __device__(float a) { return float(lroundf(fabsf(a) * 1000. * nclasses) % nclasses); }, stream); } protected: void runBenchmark(::benchmark::State& state) override { if (!params.rowMajor) { state.SkipWithError("FIL only supports row-major inputs"); } if (params.nclasses > 1) { // convert regression ranges into [0..nclasses-1] regression_to_classification(data.y.data(), params.nrows, params.nclasses, stream); } // create model ML::RandomForestRegressorF rf_model; auto* mPtr = &rf_model; size_t train_nrows = std::min(params.nrows, 1000); fit(*handle, mPtr, data.X.data(), train_nrows, params.ncols, data.y.data(), p_rest.rf); handle->sync_stream(stream); ML::build_treelite_forest(&model, &rf_model, params.ncols); ML::fil::treelite_params_t tl_params = { .algo = p_rest.algo, .output_class = params.nclasses > 1, // cuML RF forest .threshold = 1.f / params.nclasses, // Fixture::DatasetParams .storage_type = p_rest.storage, .blocks_per_sm = 8, .threads_per_tree = 1, .n_items = 0, .pforest_shape_str = nullptr}; ML::fil::forest_variant forest_variant; ML::fil::from_treelite(*handle, &forest_variant, model, &tl_params); forest = std::get<ML::fil::forest_t<float>>(forest_variant); // only time prediction this->loopOnState(state, [this]() { // Dataset<D, L> allocates y assuming one output value per input row, // so not supporting predict_proba yet for (int i = 0; i < p_rest.predict_repetitions; i++) { ML::fil::predict(*this->handle, this->forest, this->data.y.data(), this->data.X.data(), this->params.nrows, false); } }); } void allocateBuffers(const ::benchmark::State& state) override { Base::allocateBuffers(state); } void deallocateBuffers(const ::benchmark::State& state) override { ML::fil::free(*handle, forest); Base::deallocateBuffers(state); } private: ML::fil::forest_t<float> forest; ModelHandle model; Params p_rest; }; struct FilBenchParams { int nrows; int ncols; int nclasses; int max_depth; int ntrees; ML::fil::storage_type_t storage; ML::fil::algo_t algo; }; std::vector<Params> getInputs() { std::vector<Params> out; Params p; p.data.rowMajor = true; p.blobs = {.n_informative = -1, // Just a placeholder value, anyway changed below .effective_rank = -1, // Just a placeholder value, anyway changed below .bias = 0.f, .tail_strength = 0.1, .noise = 0.01, .shuffle = false, .seed = 12345ULL}; p.rf = set_rf_params(10, /*max_depth */ (1 << 20), /* max_leaves */ 1.f, /* max_features */ 32, /* max_n_bins */ 3, /* min_samples_leaf */ 3, /* min_samples_split */ 0.0f, /* min_impurity_decrease */ true, /* bootstrap */ 1, /* n_trees */ 1.f, /* max_samples */ 1234ULL, /* seed */ ML::CRITERION::MSE, /* split_criterion */ 8, /* n_streams */ 128 /* max_batch_size */ ); using ML::fil::algo_t; using ML::fil::storage_type_t; std::vector<FilBenchParams> var_params = { {(int)1e6, 20, 1, 5, 1000, storage_type_t::DENSE, algo_t::BATCH_TREE_REORG}, {(int)1e6, 20, 2, 5, 1000, storage_type_t::DENSE, algo_t::BATCH_TREE_REORG}}; for (auto& i : var_params) { p.data.nrows = i.nrows; p.data.ncols = i.ncols; p.blobs.n_informative = i.ncols / 3; p.blobs.effective_rank = i.ncols / 3; p.data.nclasses = i.nclasses; p.rf.tree_params.max_depth = i.max_depth; p.rf.n_trees = i.ntrees; p.storage = i.storage; p.algo = i.algo; p.predict_repetitions = 10; out.push_back(p); } return out; } ML_BENCH_REGISTER(Params, FIL, "", getInputs()); } // end namespace fil } // end namespace Bench } // end namespace ML
9082b917d900e3ecb34f7e0e08f17fd28bdcf28f.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <hip/hip_runtime.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/device_vector.h> #include <thrust/partition.h> #include <thrust/device_vector.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define COMPACTION 1 #define SORTBYMATERIAL 0 #define CACHE 0 #define ANTIALIASING 0 //#define TOGGLEKD #define DOFTOGGLE 0 #define FULLLIGHT 1 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK hipDeviceSynchronize(); hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } struct _test_bounce_ { __host__ __device__ bool operator()(const PathSegment tmp) { bool have_bounce = false; if (tmp.remainingBounces > 0) have_bounce = true; return (have_bounce); } }; struct _test_material_ { __host__ __device__ bool operator()(const ShadeableIntersection _first, const ShadeableIntersection _second) { bool returnval = _first.materialId > _second.materialId ? false : true; return returnval; } }; __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static mesh * dev_meshs = NULL; static Triangle * dev_triangles = NULL; static ShadeableIntersection * dev_intersections = NULL; static ShadeableIntersection * dev_intersections_cache = NULL; static GPUKDtreeNode *dev_KDtreenode = NULL; static int *dev_gputriidxlst; static int *dev_idxchecker; static const int MAX_NODE_SIZE = 70000; //for dispersion wavelength static float *dev_wavelen; // TODO: static variables for device memory, any extra info you need, etc // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; hipMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); hipMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); hipMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); hipMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); hipMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), hipMemcpyHostToDevice); hipMalloc(&dev_meshs, scene->meshs.size() * sizeof(mesh)); hipMemcpy(dev_meshs, scene->meshs.data(), scene->meshs.size() * sizeof(mesh), hipMemcpyHostToDevice); hipMalloc(&dev_triangles, scene->triangles.size() * sizeof(Triangle)); hipMemcpy(dev_triangles, scene->triangles.data(), scene->triangles.size() * sizeof(Triangle), hipMemcpyHostToDevice); hipMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); hipMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), hipMemcpyHostToDevice); hipMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); hipMalloc(&dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection)); hipMemset(dev_intersections_cache, 0, pixelcount * sizeof(ShadeableIntersection)); hipMalloc(&dev_idxchecker, scene->KDtreeforGPU.size() * sizeof(int)); hipMalloc(&dev_wavelen, pixelcount * sizeof(float)); hipMemcpy(dev_wavelen, scene->wavelen.data(), scene->wavelen.size() * sizeof(float), hipMemcpyHostToDevice); #ifdef TOGGLEKD hipMalloc(&dev_KDtreenode, scene->KDtreeforGPU.size() * sizeof(GPUKDtreeNode)); hipMemcpy(dev_KDtreenode, scene->KDtreeforGPU.data(), scene->KDtreeforGPU.size() * sizeof(GPUKDtreeNode), hipMemcpyHostToDevice); hipMalloc(&dev_gputriidxlst, scene->triangleidxforGPU.size() * sizeof(int)); hipMemcpy(dev_gputriidxlst, scene->triangleidxforGPU.data(), scene->triangleidxforGPU.size() * sizeof(int), hipMemcpyHostToDevice); #endif // TOGGLEKD // TODO: initialize any extra device memeory you need checkCUDAError("pathtraceInit"); } void pathtraceFree() { hipFree(dev_image); // no-op if dev_image is null hipFree(dev_paths); hipFree(dev_geoms); hipFree(dev_meshs); hipFree(dev_triangles); hipFree(dev_materials); hipFree(dev_intersections); hipFree(dev_wavelen); #ifdef TOGGLEKD hipFree(dev_KDtreenode); hipFree(dev_gputriidxlst); #endif hipFree(dev_idxchecker); // TODO: clean up any extra device memory you created checkCUDAError("pathtraceFree"); } //disksample code from https://pub.dartlang.org/documentation/dartray/0.0.1/core/ConcentricSampleDisk.html __device__ __host__ glm::vec2 ConcentricSampleDisk(float rand_x, float rand_y) { float r, theta; float sx = 2 * rand_x - 1; float sy = 2 * rand_y - 1; if (sx == 0.0 && sy == 0.0) { return glm::vec2(0.f); } if (sx >= -sy) { if (sx > sy) { r = sx; if (sy > 0.0) theta = sy / r; else theta = 8.0f + sy / r; } else { r = sy; theta = 2.0f - sx / r; } } else { if (sx <= sy) { r = -sx; theta = 4.0f - sy / r; } else { r = -sy; theta = 6.0f + sx / r; } } theta *= PI / 4.f; return glm::vec2(r * cosf(theta), r * sinf(theta)); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments, float* wavelen) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); thrust::default_random_engine rng1 = makeSeededRandomEngine(iter, x, y); thrust::default_random_engine rng = makeSeededRandomEngine(iter, x+(cam.resolution.x)*y, 0); thrust::uniform_real_distribution<float> u01(-0.5f, 0.5f); thrust::uniform_real_distribution<float> u02(0, 1.f); #if ANTIALIASING == 1 // TODO: implement antialiasing by jittering the ray segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x+u01(rng)- (float)cam.resolution.x * 0.5f ) - cam.up * cam.pixelLength.y * ((float)y+ u01(rng) - (float)cam.resolution.y * 0.5f ) ); #else segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) ); #endif #if DOFTOGGLE == 1 float rand_x = u01(rng); float rand_y = u01(rng); float camlenrad = 0.6; float focallen = 10; glm::vec2 raysampled = camlenrad*ConcentricSampleDisk(rand_x, rand_y); glm::vec3 physicallength = segment.ray.origin + glm::abs(focallen / segment.ray.direction.z)*segment.ray.direction; segment.ray.origin = segment.ray.origin + raysampled.x*cam.right + raysampled.y*cam.up; segment.ray.direction = glm::normalize(physicallength - segment.ray.origin); #endif segment.ray.wavelength = u01(rng1)+0.5f; segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } __global__ void ComputeBSDF( int num_paths , PathSegment *pathSegments , ShadeableIntersection *intersections ) { int path_idx = blockIdx.x * blockDim.x + threadIdx.x; if (path_idx < num_paths) { if (intersections[path_idx].materialId == 0)//diffuse? { } } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , int geoms_size , ShadeableIntersection * intersections , mesh* meshs , Triangle* triangle1 #ifdef TOGGLEKD , GPUKDtreeNode* node , int node_size , int* gputrilst , int trisize , int* idxchecker #endif ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; bool have_mesh = false; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; have_mesh = false; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == MESH) { have_mesh = true; if (geom.meshid != -1); { #ifdef TOGGLEKD bool isTraversed[MAX_NODE_SIZE] = { false }; mesh & Tempmesh = meshs[geom.meshid]; glm::vec3 maxbound = Tempmesh.maxbound; glm::vec3 minbound = Tempmesh.minbound; if (!aabbBoxIntersectlocal(geom, pathSegment.ray, minbound, maxbound)) { t = -1; continue; } bool hitgeom = false; float near = -1; GPUKDtreeNode* curnode = &node[0]; int curid = 0; float dis = FLT_MAX; //idxchecker[0] = 1; int count = 0; while (curid!=-1) { curnode = &node[curid]; bool lefthit = false; bool righthit = false; if(curnode->leftidx!=-1) lefthit= KDtreeintersectBB(pathSegment.ray, node[curnode->leftidx].minB, node[curnode->leftidx].maxB, near); if(curnode->rightidx!=-1) righthit = KDtreeintersectBB(pathSegment.ray, node[curnode->rightidx].minB, node[curnode->rightidx].maxB, near); if (!lefthit&&curnode->leftidx != -1) { isTraversed[curnode->leftidx] = true; } if (!righthit&&curnode->rightidx != -1) { isTraversed[curnode->rightidx] = true; } while (curnode->leftidx != -1 && isTraversed[curnode->leftidx] == false) { curid = curnode->leftidx; curnode = &node[curid]; } if (!isTraversed[curid]) { isTraversed[curnode->curidx] = true; if (curnode->isleafnode) { int size = curnode->trsize; if (size > 0) { int start = curnode->GPUtriangleidxinLst; int end = start + size; for (int j = start; j < end; ++j) { int triidxnow = gputrilst[j]; t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, triangle1[triidxnow]); dis = t; if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } } } } if (curnode->rightidx != -1 && isTraversed[curnode->rightidx] == false) { curid = curnode->rightidx; curnode = &node[curid]; } else { curid = curnode->parentidx; curnode = &node[curid]; } } /*int startidx, endidx; int size = 0; bool ishit = KDhit(geom, node, pathSegment.ray, startidx, endidx, gputrilst, size); if (ishit) { for (int j = startidx; j < endidx; ++j) { Triangle curTri = triangle1[gputrilst[j]]; t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, curTri); if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } }*/ #else mesh & Tempmesh = meshs[geom.meshid]; glm::vec3 maxbound = Tempmesh.maxbound; glm::vec3 minbound = Tempmesh.minbound; int startidx = meshs[geom.meshid].TriStartIndex; int trisize = meshs[geom.meshid].TriSize; if (!aabbBoxIntersectlocal(geom,pathSegment.ray, minbound, maxbound)) { t = -1; continue; } for (int j = startidx; j < trisize + startidx; ++j) { Triangle & triii = triangle1[j]; t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside,triii); if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } #endif } } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (!have_mesh) { if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something pathSegments[path_index].it = outside ? 0.f : t_min; intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial ( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; pathSegments[idx].color *= u01(rng); // apply some noise because why not } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); } } } __global__ void shadeMaterial( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; if (pathSegments[idx].remainingBounces > 0) { scatterRay(pathSegments[idx], intersection.t*pathSegments[idx].ray.direction+pathSegments[idx].ray.origin, intersection.surfaceNormal, materials[intersection.materialId], rng); } } else { #if FULLLIGHT == 0 pathSegments[idx].color = glm::vec3(0.0f); #else pathSegments[idx].color *= glm::vec3(0.1f,0.1f,0.1f); #endif pathSegments[idx].remainingBounces = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } __global__ void initializechecker(int checknums, int* checker) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < checknums) { checker[index] = -1; } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; dim3 nums = (hst_scene->KDtreeforGPU.size() + blockSize1d - 1) / blockSize1d; initializechecker << <nums, blockSize1d >> >(hst_scene->KDtreeforGPU.size(),dev_idxchecker); /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing hipLaunchKernelGGL(( generateRayFromCamera) , dim3(blocksPerGrid2d), dim3(blockSize2d) , 0, 0, cam, iter, traceDepth, dev_paths,dev_wavelen); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; int output_num_paths = num_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks int count = 0; bool iterationComplete = false; while (!iterationComplete) { // clean shading chunks hipMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; #if (CACHE ==1&&ANTIALIASING == 0&&DOFTOGGLE == 0) if(iter==1&&depth==0) { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_meshs , dev_triangles #ifdef TOGGLEKD , dev_KDtreenode , hst_scene->KDtreeforGPU.size() , dev_gputriidxlst , hst_scene->triangles.size() , dev_idxchecker #endif ); hipMemcpy(dev_intersections_cache, dev_intersections, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); } if (iter > 1 && depth == 0) { hipMemcpy(dev_intersections, dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection), hipMemcpyDeviceToDevice); } else { if (depth > 0) { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_meshs , dev_triangles #ifdef TOGGLEKD , dev_KDtreenode , hst_scene->KDtreeforGPU.size() , dev_gputriidxlst , hst_scene->triangles.size() , dev_idxchecker #endif ); } } #else computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_meshs , dev_triangles #ifdef TOGGLEKD , dev_KDtreenode , hst_scene->KDtreeforGPU.size() , dev_gputriidxlst , hst_scene->triangles.size() , dev_idxchecker #endif ); #endif checkCUDAError("trace one bounce"); hipDeviceSynchronize(); depth++; //SORT dev_paths and dev_intersections by material id, don't toggle on unless number of materials is large, or it would slow the program down. #if SORTBYMATERIAL == 1 thrust::sort_by_key(thrust::device,dev_intersections, dev_intersections + num_paths, dev_paths, _test_material_()); #endif // SORTBYMATERIAL // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. hipLaunchKernelGGL(( shadeMaterial), dim3(numblocksPathSegmentTracing), dim3(blockSize1d), 0, 0, iter, num_paths, dev_intersections, dev_paths, dev_materials ); //compaction pathsegments using thrust's partition #if COMPACTION==1 PathSegment *_iter_second_begin_ = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, _test_bounce_()); num_paths = _iter_second_begin_ - dev_paths; if (num_paths > 0) continue; else iterationComplete = true; #endif #if COMPACTION==1 #elif COMPACTION==0 count ++ ; if(count>8) iterationComplete = true; #endif // TODO: should be based off stream compaction results. } // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; hipLaunchKernelGGL(( finalGather), dim3(numBlocksPixels), dim3(blockSize1d), 0, 0, output_num_paths, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering hipLaunchKernelGGL(( sendImageToPBO), dim3(blocksPerGrid2d), dim3(blockSize2d), 0, 0, pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU hipMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), hipMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
9082b917d900e3ecb34f7e0e08f17fd28bdcf28f.cu
#include <cstdio> #include <cuda.h> #include <cmath> #include <thrust/execution_policy.h> #include <thrust/random.h> #include <thrust/remove.h> #include <thrust/device_vector.h> #include <thrust/partition.h> #include <thrust/device_vector.h> #include "sceneStructs.h" #include "scene.h" #include "glm/glm.hpp" #include "glm/gtx/norm.hpp" #include "utilities.h" #include "pathtrace.h" #include "intersections.h" #include "interactions.h" #define ERRORCHECK 1 #define COMPACTION 1 #define SORTBYMATERIAL 0 #define CACHE 0 #define ANTIALIASING 0 //#define TOGGLEKD #define DOFTOGGLE 0 #define FULLLIGHT 1 #define FILENAME (strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__) #define checkCUDAError(msg) checkCUDAErrorFn(msg, FILENAME, __LINE__) void checkCUDAErrorFn(const char *msg, const char *file, int line) { #if ERRORCHECK cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); # ifdef _WIN32 getchar(); # endif exit(EXIT_FAILURE); #endif } struct _test_bounce_ { __host__ __device__ bool operator()(const PathSegment tmp) { bool have_bounce = false; if (tmp.remainingBounces > 0) have_bounce = true; return (have_bounce); } }; struct _test_material_ { __host__ __device__ bool operator()(const ShadeableIntersection _first, const ShadeableIntersection _second) { bool returnval = _first.materialId > _second.materialId ? false : true; return returnval; } }; __host__ __device__ thrust::default_random_engine makeSeededRandomEngine(int iter, int index, int depth) { int h = utilhash((1 << 31) | (depth << 22) | iter) ^ utilhash(index); return thrust::default_random_engine(h); } //Kernel that writes the image to the OpenGL PBO directly. __global__ void sendImageToPBO(uchar4* pbo, glm::ivec2 resolution, int iter, glm::vec3* image) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < resolution.x && y < resolution.y) { int index = x + (y * resolution.x); glm::vec3 pix = image[index]; glm::ivec3 color; color.x = glm::clamp((int) (pix.x / iter * 255.0), 0, 255); color.y = glm::clamp((int) (pix.y / iter * 255.0), 0, 255); color.z = glm::clamp((int) (pix.z / iter * 255.0), 0, 255); // Each thread writes one pixel location in the texture (textel) pbo[index].w = 0; pbo[index].x = color.x; pbo[index].y = color.y; pbo[index].z = color.z; } } static Scene * hst_scene = NULL; static glm::vec3 * dev_image = NULL; static Geom * dev_geoms = NULL; static Material * dev_materials = NULL; static PathSegment * dev_paths = NULL; static mesh * dev_meshs = NULL; static Triangle * dev_triangles = NULL; static ShadeableIntersection * dev_intersections = NULL; static ShadeableIntersection * dev_intersections_cache = NULL; static GPUKDtreeNode *dev_KDtreenode = NULL; static int *dev_gputriidxlst; static int *dev_idxchecker; static const int MAX_NODE_SIZE = 70000; //for dispersion wavelength static float *dev_wavelen; // TODO: static variables for device memory, any extra info you need, etc // ... void pathtraceInit(Scene *scene) { hst_scene = scene; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; cudaMalloc(&dev_image, pixelcount * sizeof(glm::vec3)); cudaMemset(dev_image, 0, pixelcount * sizeof(glm::vec3)); cudaMalloc(&dev_paths, pixelcount * sizeof(PathSegment)); cudaMalloc(&dev_geoms, scene->geoms.size() * sizeof(Geom)); cudaMemcpy(dev_geoms, scene->geoms.data(), scene->geoms.size() * sizeof(Geom), cudaMemcpyHostToDevice); cudaMalloc(&dev_meshs, scene->meshs.size() * sizeof(mesh)); cudaMemcpy(dev_meshs, scene->meshs.data(), scene->meshs.size() * sizeof(mesh), cudaMemcpyHostToDevice); cudaMalloc(&dev_triangles, scene->triangles.size() * sizeof(Triangle)); cudaMemcpy(dev_triangles, scene->triangles.data(), scene->triangles.size() * sizeof(Triangle), cudaMemcpyHostToDevice); cudaMalloc(&dev_materials, scene->materials.size() * sizeof(Material)); cudaMemcpy(dev_materials, scene->materials.data(), scene->materials.size() * sizeof(Material), cudaMemcpyHostToDevice); cudaMalloc(&dev_intersections, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); cudaMalloc(&dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection)); cudaMemset(dev_intersections_cache, 0, pixelcount * sizeof(ShadeableIntersection)); cudaMalloc(&dev_idxchecker, scene->KDtreeforGPU.size() * sizeof(int)); cudaMalloc(&dev_wavelen, pixelcount * sizeof(float)); cudaMemcpy(dev_wavelen, scene->wavelen.data(), scene->wavelen.size() * sizeof(float), cudaMemcpyHostToDevice); #ifdef TOGGLEKD cudaMalloc(&dev_KDtreenode, scene->KDtreeforGPU.size() * sizeof(GPUKDtreeNode)); cudaMemcpy(dev_KDtreenode, scene->KDtreeforGPU.data(), scene->KDtreeforGPU.size() * sizeof(GPUKDtreeNode), cudaMemcpyHostToDevice); cudaMalloc(&dev_gputriidxlst, scene->triangleidxforGPU.size() * sizeof(int)); cudaMemcpy(dev_gputriidxlst, scene->triangleidxforGPU.data(), scene->triangleidxforGPU.size() * sizeof(int), cudaMemcpyHostToDevice); #endif // TOGGLEKD // TODO: initialize any extra device memeory you need checkCUDAError("pathtraceInit"); } void pathtraceFree() { cudaFree(dev_image); // no-op if dev_image is null cudaFree(dev_paths); cudaFree(dev_geoms); cudaFree(dev_meshs); cudaFree(dev_triangles); cudaFree(dev_materials); cudaFree(dev_intersections); cudaFree(dev_wavelen); #ifdef TOGGLEKD cudaFree(dev_KDtreenode); cudaFree(dev_gputriidxlst); #endif cudaFree(dev_idxchecker); // TODO: clean up any extra device memory you created checkCUDAError("pathtraceFree"); } //disksample code from https://pub.dartlang.org/documentation/dartray/0.0.1/core/ConcentricSampleDisk.html __device__ __host__ glm::vec2 ConcentricSampleDisk(float rand_x, float rand_y) { float r, theta; float sx = 2 * rand_x - 1; float sy = 2 * rand_y - 1; if (sx == 0.0 && sy == 0.0) { return glm::vec2(0.f); } if (sx >= -sy) { if (sx > sy) { r = sx; if (sy > 0.0) theta = sy / r; else theta = 8.0f + sy / r; } else { r = sy; theta = 2.0f - sx / r; } } else { if (sx <= sy) { r = -sx; theta = 4.0f - sy / r; } else { r = -sy; theta = 6.0f + sx / r; } } theta *= PI / 4.f; return glm::vec2(r * cosf(theta), r * sinf(theta)); } /** * Generate PathSegments with rays from the camera through the screen into the * scene, which is the first bounce of rays. * * Antialiasing - add rays for sub-pixel sampling * motion blur - jitter rays "in time" * lens effect - jitter ray origin positions based on a lens */ __global__ void generateRayFromCamera(Camera cam, int iter, int traceDepth, PathSegment* pathSegments, float* wavelen) { int x = (blockIdx.x * blockDim.x) + threadIdx.x; int y = (blockIdx.y * blockDim.y) + threadIdx.y; if (x < cam.resolution.x && y < cam.resolution.y) { int index = x + (y * cam.resolution.x); PathSegment & segment = pathSegments[index]; segment.ray.origin = cam.position; segment.color = glm::vec3(1.0f, 1.0f, 1.0f); thrust::default_random_engine rng1 = makeSeededRandomEngine(iter, x, y); thrust::default_random_engine rng = makeSeededRandomEngine(iter, x+(cam.resolution.x)*y, 0); thrust::uniform_real_distribution<float> u01(-0.5f, 0.5f); thrust::uniform_real_distribution<float> u02(0, 1.f); #if ANTIALIASING == 1 // TODO: implement antialiasing by jittering the ray segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x+u01(rng)- (float)cam.resolution.x * 0.5f ) - cam.up * cam.pixelLength.y * ((float)y+ u01(rng) - (float)cam.resolution.y * 0.5f ) ); #else segment.ray.direction = glm::normalize(cam.view - cam.right * cam.pixelLength.x * ((float)x - (float)cam.resolution.x * 0.5f) - cam.up * cam.pixelLength.y * ((float)y - (float)cam.resolution.y * 0.5f) ); #endif #if DOFTOGGLE == 1 float rand_x = u01(rng); float rand_y = u01(rng); float camlenrad = 0.6; float focallen = 10; glm::vec2 raysampled = camlenrad*ConcentricSampleDisk(rand_x, rand_y); glm::vec3 physicallength = segment.ray.origin + glm::abs(focallen / segment.ray.direction.z)*segment.ray.direction; segment.ray.origin = segment.ray.origin + raysampled.x*cam.right + raysampled.y*cam.up; segment.ray.direction = glm::normalize(physicallength - segment.ray.origin); #endif segment.ray.wavelength = u01(rng1)+0.5f; segment.pixelIndex = index; segment.remainingBounces = traceDepth; } } __global__ void ComputeBSDF( int num_paths , PathSegment *pathSegments , ShadeableIntersection *intersections ) { int path_idx = blockIdx.x * blockDim.x + threadIdx.x; if (path_idx < num_paths) { if (intersections[path_idx].materialId == 0)//diffuse? { } } } // TODO: // computeIntersections handles generating ray intersections ONLY. // Generating new rays is handled in your shader(s). // Feel free to modify the code below. __global__ void computeIntersections( int depth , int num_paths , PathSegment * pathSegments , Geom * geoms , int geoms_size , ShadeableIntersection * intersections , mesh* meshs , Triangle* triangle1 #ifdef TOGGLEKD , GPUKDtreeNode* node , int node_size , int* gputrilst , int trisize , int* idxchecker #endif ) { int path_index = blockIdx.x * blockDim.x + threadIdx.x; if (path_index < num_paths) { PathSegment pathSegment = pathSegments[path_index]; float t; glm::vec3 intersect_point; glm::vec3 normal; float t_min = FLT_MAX; int hit_geom_index = -1; bool outside = true; bool have_mesh = false; glm::vec3 tmp_intersect; glm::vec3 tmp_normal; // naive parse through global geoms for (int i = 0; i < geoms_size; i++) { Geom & geom = geoms[i]; have_mesh = false; if (geom.type == CUBE) { t = boxIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == SPHERE) { t = sphereIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside); } else if (geom.type == MESH) { have_mesh = true; if (geom.meshid != -1); { #ifdef TOGGLEKD bool isTraversed[MAX_NODE_SIZE] = { false }; mesh & Tempmesh = meshs[geom.meshid]; glm::vec3 maxbound = Tempmesh.maxbound; glm::vec3 minbound = Tempmesh.minbound; if (!aabbBoxIntersectlocal(geom, pathSegment.ray, minbound, maxbound)) { t = -1; continue; } bool hitgeom = false; float near = -1; GPUKDtreeNode* curnode = &node[0]; int curid = 0; float dis = FLT_MAX; //idxchecker[0] = 1; int count = 0; while (curid!=-1) { curnode = &node[curid]; bool lefthit = false; bool righthit = false; if(curnode->leftidx!=-1) lefthit= KDtreeintersectBB(pathSegment.ray, node[curnode->leftidx].minB, node[curnode->leftidx].maxB, near); if(curnode->rightidx!=-1) righthit = KDtreeintersectBB(pathSegment.ray, node[curnode->rightidx].minB, node[curnode->rightidx].maxB, near); if (!lefthit&&curnode->leftidx != -1) { isTraversed[curnode->leftidx] = true; } if (!righthit&&curnode->rightidx != -1) { isTraversed[curnode->rightidx] = true; } while (curnode->leftidx != -1 && isTraversed[curnode->leftidx] == false) { curid = curnode->leftidx; curnode = &node[curid]; } if (!isTraversed[curid]) { isTraversed[curnode->curidx] = true; if (curnode->isleafnode) { int size = curnode->trsize; if (size > 0) { int start = curnode->GPUtriangleidxinLst; int end = start + size; for (int j = start; j < end; ++j) { int triidxnow = gputrilst[j]; t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, triangle1[triidxnow]); dis = t; if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } } } } if (curnode->rightidx != -1 && isTraversed[curnode->rightidx] == false) { curid = curnode->rightidx; curnode = &node[curid]; } else { curid = curnode->parentidx; curnode = &node[curid]; } } /*int startidx, endidx; int size = 0; bool ishit = KDhit(geom, node, pathSegment.ray, startidx, endidx, gputrilst, size); if (ishit) { for (int j = startidx; j < endidx; ++j) { Triangle curTri = triangle1[gputrilst[j]]; t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside, curTri); if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } }*/ #else mesh & Tempmesh = meshs[geom.meshid]; glm::vec3 maxbound = Tempmesh.maxbound; glm::vec3 minbound = Tempmesh.minbound; int startidx = meshs[geom.meshid].TriStartIndex; int trisize = meshs[geom.meshid].TriSize; if (!aabbBoxIntersectlocal(geom,pathSegment.ray, minbound, maxbound)) { t = -1; continue; } for (int j = startidx; j < trisize + startidx; ++j) { Triangle & triii = triangle1[j]; t = triangleIntersectionTest(geom, pathSegment.ray, tmp_intersect, tmp_normal, outside,triii); if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } #endif } } // TODO: add more intersection tests here... triangle? metaball? CSG? // Compute the minimum t from the intersection tests to determine what // scene geometry object was hit first. if (!have_mesh) { if (t > 0.0f && t_min > t) { t_min = t; hit_geom_index = i; intersect_point = tmp_intersect; normal = tmp_normal; } } } if (hit_geom_index == -1) { intersections[path_index].t = -1.0f; } else { //The ray hits something pathSegments[path_index].it = outside ? 0.f : t_min; intersections[path_index].t = t_min; intersections[path_index].materialId = geoms[hit_geom_index].materialid; intersections[path_index].surfaceNormal = normal; } } } // LOOK: "fake" shader demonstrating what you might do with the info in // a ShadeableIntersection, as well as how to use thrust's random number // generator. Observe that since the thrust random number generator basically // adds "noise" to the iteration, the image should start off noisy and get // cleaner as more iterations are computed. // // Note that this shader does NOT do a BSDF evaluation! // Your shaders should handle that - this can allow techniques such as // bump mapping. __global__ void shadeFakeMaterial ( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; // If the material indicates that the object was a light, "light" the ray if (material.emittance > 0.0f) { pathSegments[idx].color *= (materialColor * material.emittance); } // Otherwise, do some pseudo-lighting computation. This is actually more // like what you would expect from shading in a rasterizer like OpenGL. // TODO: replace this! you should be able to start with basically a one-liner else { float lightTerm = glm::dot(intersection.surfaceNormal, glm::vec3(0.0f, 1.0f, 0.0f)); pathSegments[idx].color *= (materialColor * lightTerm) * 0.3f + ((1.0f - intersection.t * 0.02f) * materialColor) * 0.7f; pathSegments[idx].color *= u01(rng); // apply some noise because why not } // If there was no intersection, color the ray black. // Lots of renderers use 4 channel color, RGBA, where A = alpha, often // used for opacity, in which case they can indicate "no opacity". // This can be useful for post-processing and image compositing. } else { pathSegments[idx].color = glm::vec3(0.0f); } } } __global__ void shadeMaterial( int iter , int num_paths , ShadeableIntersection * shadeableIntersections , PathSegment * pathSegments , Material * materials ) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < num_paths) { ShadeableIntersection intersection = shadeableIntersections[idx]; if (intersection.t > 0.0f) { // if the intersection exists... // Set up the RNG // LOOK: this is how you use thrust's RNG! Please look at // makeSeededRandomEngine as well. thrust::default_random_engine rng = makeSeededRandomEngine(iter, idx, 0); thrust::uniform_real_distribution<float> u01(0, 1); Material material = materials[intersection.materialId]; glm::vec3 materialColor = material.color; if (pathSegments[idx].remainingBounces > 0) { scatterRay(pathSegments[idx], intersection.t*pathSegments[idx].ray.direction+pathSegments[idx].ray.origin, intersection.surfaceNormal, materials[intersection.materialId], rng); } } else { #if FULLLIGHT == 0 pathSegments[idx].color = glm::vec3(0.0f); #else pathSegments[idx].color *= glm::vec3(0.1f,0.1f,0.1f); #endif pathSegments[idx].remainingBounces = 0; } } } // Add the current iteration's output to the overall image __global__ void finalGather(int nPaths, glm::vec3 * image, PathSegment * iterationPaths) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < nPaths) { PathSegment iterationPath = iterationPaths[index]; image[iterationPath.pixelIndex] += iterationPath.color; } } __global__ void initializechecker(int checknums, int* checker) { int index = (blockIdx.x * blockDim.x) + threadIdx.x; if (index < checknums) { checker[index] = -1; } } /** * Wrapper for the __global__ call that sets up the kernel calls and does a ton * of memory management */ void pathtrace(uchar4 *pbo, int frame, int iter) { const int traceDepth = hst_scene->state.traceDepth; const Camera &cam = hst_scene->state.camera; const int pixelcount = cam.resolution.x * cam.resolution.y; // 2D block for generating ray from camera const dim3 blockSize2d(8, 8); const dim3 blocksPerGrid2d( (cam.resolution.x + blockSize2d.x - 1) / blockSize2d.x, (cam.resolution.y + blockSize2d.y - 1) / blockSize2d.y); // 1D block for path tracing const int blockSize1d = 128; dim3 nums = (hst_scene->KDtreeforGPU.size() + blockSize1d - 1) / blockSize1d; initializechecker << <nums, blockSize1d >> >(hst_scene->KDtreeforGPU.size(),dev_idxchecker); /////////////////////////////////////////////////////////////////////////// // Recap: // * Initialize array of path rays (using rays that come out of the camera) // * You can pass the Camera object to that kernel. // * Each path ray must carry at minimum a (ray, color) pair, // * where color starts as the multiplicative identity, white = (1, 1, 1). // * This has already been done for you. // * For each depth: // * Compute an intersection in the scene for each path ray. // A very naive version of this has been implemented for you, but feel // free to add more primitives and/or a better algorithm. // Currently, intersection distance is recorded as a parametric distance, // t, or a "distance along the ray." t = -1.0 indicates no intersection. // * Color is attenuated (multiplied) by reflections off of any object // * TODO: Stream compact away all of the terminated paths. // You may use either your implementation or `thrust::remove_if` or its // cousins. // * Note that you can't really use a 2D kernel launch any more - switch // to 1D. // * TODO: Shade the rays that intersected something or didn't bottom out. // That is, color the ray by performing a color computation according // to the shader, then generate a new ray to continue the ray path. // We recommend just updating the ray's PathSegment in place. // Note that this step may come before or after stream compaction, // since some shaders you write may also cause a path to terminate. // * Finally, add this iteration's results to the image. This has been done // for you. // TODO: perform one iteration of path tracing generateRayFromCamera <<<blocksPerGrid2d, blockSize2d >>>(cam, iter, traceDepth, dev_paths,dev_wavelen); checkCUDAError("generate camera ray"); int depth = 0; PathSegment* dev_path_end = dev_paths + pixelcount; int num_paths = dev_path_end - dev_paths; int output_num_paths = num_paths; // --- PathSegment Tracing Stage --- // Shoot ray into scene, bounce between objects, push shading chunks int count = 0; bool iterationComplete = false; while (!iterationComplete) { // clean shading chunks cudaMemset(dev_intersections, 0, pixelcount * sizeof(ShadeableIntersection)); // tracing dim3 numblocksPathSegmentTracing = (num_paths + blockSize1d - 1) / blockSize1d; #if (CACHE ==1&&ANTIALIASING == 0&&DOFTOGGLE == 0) if(iter==1&&depth==0) { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_meshs , dev_triangles #ifdef TOGGLEKD , dev_KDtreenode , hst_scene->KDtreeforGPU.size() , dev_gputriidxlst , hst_scene->triangles.size() , dev_idxchecker #endif ); cudaMemcpy(dev_intersections_cache, dev_intersections, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); } if (iter > 1 && depth == 0) { cudaMemcpy(dev_intersections, dev_intersections_cache, pixelcount * sizeof(ShadeableIntersection), cudaMemcpyDeviceToDevice); } else { if (depth > 0) { computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_meshs , dev_triangles #ifdef TOGGLEKD , dev_KDtreenode , hst_scene->KDtreeforGPU.size() , dev_gputriidxlst , hst_scene->triangles.size() , dev_idxchecker #endif ); } } #else computeIntersections << <numblocksPathSegmentTracing, blockSize1d >> > ( depth , num_paths , dev_paths , dev_geoms , hst_scene->geoms.size() , dev_intersections , dev_meshs , dev_triangles #ifdef TOGGLEKD , dev_KDtreenode , hst_scene->KDtreeforGPU.size() , dev_gputriidxlst , hst_scene->triangles.size() , dev_idxchecker #endif ); #endif checkCUDAError("trace one bounce"); cudaDeviceSynchronize(); depth++; //SORT dev_paths and dev_intersections by material id, don't toggle on unless number of materials is large, or it would slow the program down. #if SORTBYMATERIAL == 1 thrust::sort_by_key(thrust::device,dev_intersections, dev_intersections + num_paths, dev_paths, _test_material_()); #endif // SORTBYMATERIAL // TODO: // --- Shading Stage --- // Shade path segments based on intersections and generate new rays by // evaluating the BSDF. // Start off with just a big kernel that handles all the different // materials you have in the scenefile. // TODO: compare between directly shading the path segments and shading // path segments that have been reshuffled to be contiguous in memory. shadeMaterial<<<numblocksPathSegmentTracing, blockSize1d>>> ( iter, num_paths, dev_intersections, dev_paths, dev_materials ); //compaction pathsegments using thrust's partition #if COMPACTION==1 PathSegment *_iter_second_begin_ = thrust::partition(thrust::device, dev_paths, dev_paths + num_paths, _test_bounce_()); num_paths = _iter_second_begin_ - dev_paths; if (num_paths > 0) continue; else iterationComplete = true; #endif #if COMPACTION==1 #elif COMPACTION==0 count ++ ; if(count>8) iterationComplete = true; #endif // TODO: should be based off stream compaction results. } // Assemble this iteration and apply it to the image dim3 numBlocksPixels = (pixelcount + blockSize1d - 1) / blockSize1d; finalGather<<<numBlocksPixels, blockSize1d>>>(output_num_paths, dev_image, dev_paths); /////////////////////////////////////////////////////////////////////////// // Send results to OpenGL buffer for rendering sendImageToPBO<<<blocksPerGrid2d, blockSize2d>>>(pbo, cam.resolution, iter, dev_image); // Retrieve image from GPU cudaMemcpy(hst_scene->state.image.data(), dev_image, pixelcount * sizeof(glm::vec3), cudaMemcpyDeviceToHost); checkCUDAError("pathtrace"); }
5da120bf64871065b614ace3403ece5deeee6cb3.hip
// !!! This is a file automatically generated by hipify!!! #include "BufferCompaction.h" #include "GpuMemUtils.h" #include "GpuRtConstants.h" #include "ResultSetBufferAccessors.h" #include "ResultSetSortImpl.h" #include "SortUtils.cuh" #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #define FORCE_CPU_VERSION #include "BufferEntryUtils.h" #undef FORCE_CPU_VERSION namespace { template <class K, class V, class I> std::vector<uint32_t> do_radix_sort(const ExecutorDeviceType device_type, ThrustAllocator& thrust_allocator, const int8_t* groupby_buffer, V dev_oe_col_buffer_begin, V dev_oe_col_buffer_end, I dev_idx_buff_begin, const size_t dev_idx_buff_size, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n) { if (dev_idx_buff_size == 0) { return {}; } if (oe.is_desc) { if (device_type == ExecutorDeviceType::GPU) { thrust::sort_by_key(thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>()); } else { thrust::sort_by_key(dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>()); } } else { if (device_type == ExecutorDeviceType::GPU) { thrust::sort_by_key(thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); } else { thrust::sort_by_key( dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); } } // Speculatively transfer only the top_n first, most of the time it'll be enough. thrust::host_vector<uint32_t> host_vector_result( dev_idx_buff_begin, dev_idx_buff_begin + ::min(top_n, dev_idx_buff_size)); // Sometimes, radix sort can bring to the front entries which are empty. // For example, ascending sort on COUNT(*) will bring non-existent groups // to the front of dev_idx_buff since they're 0 in our system. Re-do the // transfer in that case to bring the entire dev_idx_buff; existing logic // in row iteration will take care of skipping the empty rows. for (size_t i = 0; i < host_vector_result.size(); ++i) { const auto entry_idx = host_vector_result[i]; if (is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) { host_vector_result = thrust::host_vector<uint32_t>( dev_idx_buff_begin, dev_idx_buff_begin + dev_idx_buff_size); break; } } std::vector<uint32_t> result; result.reserve(::min(top_n, host_vector_result.size())); for (size_t i = 0; i < host_vector_result.size(); ++i) { const auto entry_idx = host_vector_result[i]; if (!is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) { result.push_back(entry_idx); if (result.size() >= top_n) { break; } } } return result; } void add_nulls(std::vector<uint32_t>& idx_buff, const std::vector<uint32_t>& null_idx_buff, const PodOrderEntry& oe) { if (null_idx_buff.empty()) { return; } const auto insertion_point = oe.nulls_first ? idx_buff.begin() : idx_buff.end(); idx_buff.insert(insertion_point, null_idx_buff.begin(), null_idx_buff.end()); } template <typename T> thrust::device_ptr<T> get_device_copy_ptr(const thrust::host_vector<T>& host_vec, ThrustAllocator& thrust_allocator) { if (host_vec.empty()) { return thrust::device_ptr<T>(static_cast<T*>(nullptr)); } const auto host_vec_bytes = host_vec.size() * sizeof(T); T* dev_ptr = reinterpret_cast<T*>( thrust_allocator.allocateScopedBuffer(align_to_int64(host_vec_bytes))); copy_to_nvidia_gpu(thrust_allocator.getDataMgr(), reinterpret_cast<hipDeviceptr_t>(dev_ptr), &host_vec[0], host_vec_bytes, thrust_allocator.getDeviceId()); return thrust::device_ptr<T>(dev_ptr); } template <class K> std::vector<uint32_t> baseline_sort_fp(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const thrust::host_vector<int64_t>& oe_col_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { thrust::host_vector<uint32_t> neg_idx_buff; thrust::host_vector<uint32_t> pos_idx_buff; std::vector<uint32_t> null_idx_buff; thrust::host_vector<int64_t> neg_oe_col_buffer; thrust::host_vector<int64_t> pos_oe_col_buffer; const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); neg_idx_buff.reserve(slice_entry_count); pos_idx_buff.reserve(slice_entry_count); null_idx_buff.reserve(slice_entry_count); neg_oe_col_buffer.reserve(slice_entry_count); pos_oe_col_buffer.reserve(slice_entry_count); size_t oe_col_buffer_idx = 0; const auto& oe_info = layout.oe_target_info; const auto col_ti = oe_info.agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false) : oe_info.sql_type; // Execlude AVG b/c collect_order_entry_column already makes its pair collapse into a // double const bool float_argument_input = takes_float_argument(oe_info) && oe_info.agg_kind != kAVG; auto is_negative = float_argument_input ? [](const int64_t v) -> bool { return (v & (1 << 31)) != 0; } : [](const int64_t v) -> bool { return v < 0; }; for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) && oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(col_ti, float_argument_input)) { null_idx_buff.push_back(i); continue; } if (is_negative(oe_col_buffer[oe_col_buffer_idx])) { // sign bit works the same for // integer and floating point neg_idx_buff.push_back(i); neg_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } else { pos_idx_buff.push_back(i); pos_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } } std::vector<uint32_t> pos_result; ThrustAllocator thrust_allocator(data_mgr, device_id); if (device_type == ExecutorDeviceType::GPU) { const auto dev_pos_idx_buff = get_device_copy_ptr(pos_idx_buff, thrust_allocator); const auto dev_pos_oe_col_buffer = get_device_copy_ptr(pos_oe_col_buffer, thrust_allocator); pos_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_pos_oe_col_buffer, dev_pos_oe_col_buffer + pos_oe_col_buffer.size(), dev_pos_idx_buff, pos_idx_buff.size(), oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); pos_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, pos_oe_col_buffer.begin(), pos_oe_col_buffer.end(), pos_idx_buff.begin(), pos_idx_buff.size(), oe, layout, top_n); } std::vector<uint32_t> neg_result; PodOrderEntry reverse_oe{oe.tle_no, !oe.is_desc, oe.nulls_first}; if (device_type == ExecutorDeviceType::GPU) { const auto dev_neg_idx_buff = get_device_copy_ptr(neg_idx_buff, thrust_allocator); const auto dev_neg_oe_col_buffer = get_device_copy_ptr(neg_oe_col_buffer, thrust_allocator); neg_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_neg_oe_col_buffer, dev_neg_oe_col_buffer + neg_oe_col_buffer.size(), dev_neg_idx_buff, neg_idx_buff.size(), reverse_oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); neg_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, neg_oe_col_buffer.begin(), neg_oe_col_buffer.end(), neg_idx_buff.begin(), neg_idx_buff.size(), reverse_oe, layout, top_n); } if (oe.is_desc) { pos_result.insert(pos_result.end(), neg_result.begin(), neg_result.end()); add_nulls(pos_result, null_idx_buff, oe); return pos_result; } neg_result.insert(neg_result.end(), pos_result.begin(), pos_result.end()); add_nulls(neg_result, null_idx_buff, oe); return neg_result; } template <class K> std::vector<uint32_t> baseline_sort_int(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const thrust::host_vector<int64_t>& oe_col_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { const auto& entry_ti = get_compact_type(layout.oe_target_info); std::vector<uint32_t> null_idx_buff; thrust::host_vector<uint32_t> notnull_idx_buff; const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); null_idx_buff.reserve(slice_entry_count); notnull_idx_buff.reserve(slice_entry_count); thrust::host_vector<int64_t> notnull_oe_col_buffer; notnull_oe_col_buffer.reserve(slice_entry_count); size_t oe_col_buffer_idx = 0; for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) && oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(entry_ti, false)) { null_idx_buff.push_back(i); } else { notnull_idx_buff.push_back(i); notnull_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } } std::vector<uint32_t> notnull_result; ThrustAllocator thrust_allocator(data_mgr, device_id); if (device_type == ExecutorDeviceType::GPU) { const auto dev_notnull_idx_buff = get_device_copy_ptr(notnull_idx_buff, thrust_allocator); const auto dev_notnull_oe_col_buffer = get_device_copy_ptr(notnull_oe_col_buffer, thrust_allocator); notnull_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_notnull_oe_col_buffer, dev_notnull_oe_col_buffer + notnull_oe_col_buffer.size(), dev_notnull_idx_buff, notnull_idx_buff.size(), oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); notnull_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, notnull_oe_col_buffer.begin(), notnull_oe_col_buffer.end(), notnull_idx_buff.begin(), notnull_idx_buff.size(), oe, layout, top_n); } add_nulls(notnull_result, null_idx_buff, oe); return notnull_result; } template <class K> thrust::host_vector<int64_t> collect_order_entry_column( const int8_t* groupby_buffer, const GroupByBufferLayoutInfo& layout, const size_t start, const size_t step) { thrust::host_vector<int64_t> oe_col_buffer; const auto row_ptr = groupby_buffer + start * layout.row_bytes; auto crt_group_ptr1 = layout.target_groupby_index >= 0 ? row_ptr + layout.target_groupby_index * sizeof(K) : row_ptr + layout.col_off; const int8_t* crt_group_ptr2{nullptr}; if (layout.oe_target_info.agg_kind == kAVG) { crt_group_ptr2 = crt_group_ptr1 + layout.col_bytes; } const auto entry_ti = get_compact_type(layout.oe_target_info); const bool float_argument_input = takes_float_argument(layout.oe_target_info); const auto step_bytes = layout.row_bytes * step; const auto col_bytes = float_argument_input ? entry_ti.get_size() : layout.col_bytes; for (size_t i = start; i < layout.entry_count; i += step) { auto val1 = read_int_from_buff(crt_group_ptr1, col_bytes > 0 ? col_bytes : sizeof(K)); if (crt_group_ptr2) { const auto val2 = read_int_from_buff(crt_group_ptr2, 8); const auto avg_val = pair_to_double({val1, val2}, entry_ti, float_argument_input); val1 = *reinterpret_cast<const int64_t*>(&avg_val); } oe_col_buffer.push_back(val1); crt_group_ptr1 += step_bytes; if (crt_group_ptr2) { crt_group_ptr2 += step_bytes; } } return oe_col_buffer; } } // namespace template <class K> std::vector<uint32_t> baseline_sort(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { auto oe_col_buffer = collect_order_entry_column<K>(groupby_buffer, layout, start, step); const auto& entry_ti = get_compact_type(layout.oe_target_info); CHECK(entry_ti.is_number()); if (entry_ti.is_fp() || layout.oe_target_info.agg_kind == kAVG) { return baseline_sort_fp<K>(device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); } // Because of how we represent nulls for integral types, they'd be at the // wrong position in these two cases. Separate them into a different buffer. if ((oe.is_desc && oe.nulls_first) || (!oe.is_desc && !oe.nulls_first)) { return baseline_sort_int<K>(device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); } ThrustAllocator thrust_allocator(data_mgr, device_id); // Fastest path, no need to separate nulls away since they'll end up at the // right place as a side effect of how we're representing nulls. if (device_type == ExecutorDeviceType::GPU) { if (oe_col_buffer.empty()) { return {}; } const auto dev_idx_buff = get_device_ptr<uint32_t>(oe_col_buffer.size(), thrust_allocator); thrust::sequence(dev_idx_buff, dev_idx_buff + oe_col_buffer.size(), start, step); const auto dev_oe_col_buffer = get_device_copy_ptr(oe_col_buffer, thrust_allocator); return do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_oe_col_buffer, dev_oe_col_buffer + oe_col_buffer.size(), dev_idx_buff, oe_col_buffer.size(), oe, layout, top_n); } CHECK(device_type == ExecutorDeviceType::CPU); thrust::host_vector<uint32_t> host_idx_buff(oe_col_buffer.size()); thrust::sequence(host_idx_buff.begin(), host_idx_buff.end(), start, step); return do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, oe_col_buffer.begin(), oe_col_buffer.end(), host_idx_buff.begin(), host_idx_buff.size(), oe, layout, top_n); } template std::vector<uint32_t> baseline_sort<int32_t>( const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step); template std::vector<uint32_t> baseline_sort<int64_t>( const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step);
5da120bf64871065b614ace3403ece5deeee6cb3.cu
#include "BufferCompaction.h" #include "GpuMemUtils.h" #include "GpuRtConstants.h" #include "ResultSetBufferAccessors.h" #include "ResultSetSortImpl.h" #include "SortUtils.cuh" #include <thrust/copy.h> #include <thrust/execution_policy.h> #include <thrust/host_vector.h> #include <thrust/sort.h> #define FORCE_CPU_VERSION #include "BufferEntryUtils.h" #undef FORCE_CPU_VERSION namespace { template <class K, class V, class I> std::vector<uint32_t> do_radix_sort(const ExecutorDeviceType device_type, ThrustAllocator& thrust_allocator, const int8_t* groupby_buffer, V dev_oe_col_buffer_begin, V dev_oe_col_buffer_end, I dev_idx_buff_begin, const size_t dev_idx_buff_size, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n) { if (dev_idx_buff_size == 0) { return {}; } if (oe.is_desc) { if (device_type == ExecutorDeviceType::GPU) { thrust::sort_by_key(thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>()); } else { thrust::sort_by_key(dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin, thrust::greater<int64_t>()); } } else { if (device_type == ExecutorDeviceType::GPU) { thrust::sort_by_key(thrust::device(thrust_allocator), dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); } else { thrust::sort_by_key( dev_oe_col_buffer_begin, dev_oe_col_buffer_end, dev_idx_buff_begin); } } // Speculatively transfer only the top_n first, most of the time it'll be enough. thrust::host_vector<uint32_t> host_vector_result( dev_idx_buff_begin, dev_idx_buff_begin + std::min(top_n, dev_idx_buff_size)); // Sometimes, radix sort can bring to the front entries which are empty. // For example, ascending sort on COUNT(*) will bring non-existent groups // to the front of dev_idx_buff since they're 0 in our system. Re-do the // transfer in that case to bring the entire dev_idx_buff; existing logic // in row iteration will take care of skipping the empty rows. for (size_t i = 0; i < host_vector_result.size(); ++i) { const auto entry_idx = host_vector_result[i]; if (is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) { host_vector_result = thrust::host_vector<uint32_t>( dev_idx_buff_begin, dev_idx_buff_begin + dev_idx_buff_size); break; } } std::vector<uint32_t> result; result.reserve(std::min(top_n, host_vector_result.size())); for (size_t i = 0; i < host_vector_result.size(); ++i) { const auto entry_idx = host_vector_result[i]; if (!is_empty_entry<K>(entry_idx, groupby_buffer, layout.row_bytes)) { result.push_back(entry_idx); if (result.size() >= top_n) { break; } } } return result; } void add_nulls(std::vector<uint32_t>& idx_buff, const std::vector<uint32_t>& null_idx_buff, const PodOrderEntry& oe) { if (null_idx_buff.empty()) { return; } const auto insertion_point = oe.nulls_first ? idx_buff.begin() : idx_buff.end(); idx_buff.insert(insertion_point, null_idx_buff.begin(), null_idx_buff.end()); } template <typename T> thrust::device_ptr<T> get_device_copy_ptr(const thrust::host_vector<T>& host_vec, ThrustAllocator& thrust_allocator) { if (host_vec.empty()) { return thrust::device_ptr<T>(static_cast<T*>(nullptr)); } const auto host_vec_bytes = host_vec.size() * sizeof(T); T* dev_ptr = reinterpret_cast<T*>( thrust_allocator.allocateScopedBuffer(align_to_int64(host_vec_bytes))); copy_to_nvidia_gpu(thrust_allocator.getDataMgr(), reinterpret_cast<CUdeviceptr>(dev_ptr), &host_vec[0], host_vec_bytes, thrust_allocator.getDeviceId()); return thrust::device_ptr<T>(dev_ptr); } template <class K> std::vector<uint32_t> baseline_sort_fp(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const thrust::host_vector<int64_t>& oe_col_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { thrust::host_vector<uint32_t> neg_idx_buff; thrust::host_vector<uint32_t> pos_idx_buff; std::vector<uint32_t> null_idx_buff; thrust::host_vector<int64_t> neg_oe_col_buffer; thrust::host_vector<int64_t> pos_oe_col_buffer; const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); neg_idx_buff.reserve(slice_entry_count); pos_idx_buff.reserve(slice_entry_count); null_idx_buff.reserve(slice_entry_count); neg_oe_col_buffer.reserve(slice_entry_count); pos_oe_col_buffer.reserve(slice_entry_count); size_t oe_col_buffer_idx = 0; const auto& oe_info = layout.oe_target_info; const auto col_ti = oe_info.agg_kind == kAVG ? SQLTypeInfo(kDOUBLE, false) : oe_info.sql_type; // Execlude AVG b/c collect_order_entry_column already makes its pair collapse into a // double const bool float_argument_input = takes_float_argument(oe_info) && oe_info.agg_kind != kAVG; auto is_negative = float_argument_input ? [](const int64_t v) -> bool { return (v & (1 << 31)) != 0; } : [](const int64_t v) -> bool { return v < 0; }; for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) && oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(col_ti, float_argument_input)) { null_idx_buff.push_back(i); continue; } if (is_negative(oe_col_buffer[oe_col_buffer_idx])) { // sign bit works the same for // integer and floating point neg_idx_buff.push_back(i); neg_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } else { pos_idx_buff.push_back(i); pos_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } } std::vector<uint32_t> pos_result; ThrustAllocator thrust_allocator(data_mgr, device_id); if (device_type == ExecutorDeviceType::GPU) { const auto dev_pos_idx_buff = get_device_copy_ptr(pos_idx_buff, thrust_allocator); const auto dev_pos_oe_col_buffer = get_device_copy_ptr(pos_oe_col_buffer, thrust_allocator); pos_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_pos_oe_col_buffer, dev_pos_oe_col_buffer + pos_oe_col_buffer.size(), dev_pos_idx_buff, pos_idx_buff.size(), oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); pos_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, pos_oe_col_buffer.begin(), pos_oe_col_buffer.end(), pos_idx_buff.begin(), pos_idx_buff.size(), oe, layout, top_n); } std::vector<uint32_t> neg_result; PodOrderEntry reverse_oe{oe.tle_no, !oe.is_desc, oe.nulls_first}; if (device_type == ExecutorDeviceType::GPU) { const auto dev_neg_idx_buff = get_device_copy_ptr(neg_idx_buff, thrust_allocator); const auto dev_neg_oe_col_buffer = get_device_copy_ptr(neg_oe_col_buffer, thrust_allocator); neg_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_neg_oe_col_buffer, dev_neg_oe_col_buffer + neg_oe_col_buffer.size(), dev_neg_idx_buff, neg_idx_buff.size(), reverse_oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); neg_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, neg_oe_col_buffer.begin(), neg_oe_col_buffer.end(), neg_idx_buff.begin(), neg_idx_buff.size(), reverse_oe, layout, top_n); } if (oe.is_desc) { pos_result.insert(pos_result.end(), neg_result.begin(), neg_result.end()); add_nulls(pos_result, null_idx_buff, oe); return pos_result; } neg_result.insert(neg_result.end(), pos_result.begin(), pos_result.end()); add_nulls(neg_result, null_idx_buff, oe); return neg_result; } template <class K> std::vector<uint32_t> baseline_sort_int(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const thrust::host_vector<int64_t>& oe_col_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { const auto& entry_ti = get_compact_type(layout.oe_target_info); std::vector<uint32_t> null_idx_buff; thrust::host_vector<uint32_t> notnull_idx_buff; const auto slice_entry_count = layout.entry_count / step + (layout.entry_count % step ? 1 : 0); null_idx_buff.reserve(slice_entry_count); notnull_idx_buff.reserve(slice_entry_count); thrust::host_vector<int64_t> notnull_oe_col_buffer; notnull_oe_col_buffer.reserve(slice_entry_count); size_t oe_col_buffer_idx = 0; for (size_t i = start; i < layout.entry_count; i += step, ++oe_col_buffer_idx) { if (!is_empty_entry<K>(i, groupby_buffer, layout.row_bytes) && oe_col_buffer[oe_col_buffer_idx] == null_val_bit_pattern(entry_ti, false)) { null_idx_buff.push_back(i); } else { notnull_idx_buff.push_back(i); notnull_oe_col_buffer.push_back(oe_col_buffer[oe_col_buffer_idx]); } } std::vector<uint32_t> notnull_result; ThrustAllocator thrust_allocator(data_mgr, device_id); if (device_type == ExecutorDeviceType::GPU) { const auto dev_notnull_idx_buff = get_device_copy_ptr(notnull_idx_buff, thrust_allocator); const auto dev_notnull_oe_col_buffer = get_device_copy_ptr(notnull_oe_col_buffer, thrust_allocator); notnull_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_notnull_oe_col_buffer, dev_notnull_oe_col_buffer + notnull_oe_col_buffer.size(), dev_notnull_idx_buff, notnull_idx_buff.size(), oe, layout, top_n); } else { CHECK(device_type == ExecutorDeviceType::CPU); notnull_result = do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, notnull_oe_col_buffer.begin(), notnull_oe_col_buffer.end(), notnull_idx_buff.begin(), notnull_idx_buff.size(), oe, layout, top_n); } add_nulls(notnull_result, null_idx_buff, oe); return notnull_result; } template <class K> thrust::host_vector<int64_t> collect_order_entry_column( const int8_t* groupby_buffer, const GroupByBufferLayoutInfo& layout, const size_t start, const size_t step) { thrust::host_vector<int64_t> oe_col_buffer; const auto row_ptr = groupby_buffer + start * layout.row_bytes; auto crt_group_ptr1 = layout.target_groupby_index >= 0 ? row_ptr + layout.target_groupby_index * sizeof(K) : row_ptr + layout.col_off; const int8_t* crt_group_ptr2{nullptr}; if (layout.oe_target_info.agg_kind == kAVG) { crt_group_ptr2 = crt_group_ptr1 + layout.col_bytes; } const auto entry_ti = get_compact_type(layout.oe_target_info); const bool float_argument_input = takes_float_argument(layout.oe_target_info); const auto step_bytes = layout.row_bytes * step; const auto col_bytes = float_argument_input ? entry_ti.get_size() : layout.col_bytes; for (size_t i = start; i < layout.entry_count; i += step) { auto val1 = read_int_from_buff(crt_group_ptr1, col_bytes > 0 ? col_bytes : sizeof(K)); if (crt_group_ptr2) { const auto val2 = read_int_from_buff(crt_group_ptr2, 8); const auto avg_val = pair_to_double({val1, val2}, entry_ti, float_argument_input); val1 = *reinterpret_cast<const int64_t*>(&avg_val); } oe_col_buffer.push_back(val1); crt_group_ptr1 += step_bytes; if (crt_group_ptr2) { crt_group_ptr2 += step_bytes; } } return oe_col_buffer; } } // namespace template <class K> std::vector<uint32_t> baseline_sort(const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step) { auto oe_col_buffer = collect_order_entry_column<K>(groupby_buffer, layout, start, step); const auto& entry_ti = get_compact_type(layout.oe_target_info); CHECK(entry_ti.is_number()); if (entry_ti.is_fp() || layout.oe_target_info.agg_kind == kAVG) { return baseline_sort_fp<K>(device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); } // Because of how we represent nulls for integral types, they'd be at the // wrong position in these two cases. Separate them into a different buffer. if ((oe.is_desc && oe.nulls_first) || (!oe.is_desc && !oe.nulls_first)) { return baseline_sort_int<K>(device_type, device_id, data_mgr, groupby_buffer, oe_col_buffer, oe, layout, top_n, start, step); } ThrustAllocator thrust_allocator(data_mgr, device_id); // Fastest path, no need to separate nulls away since they'll end up at the // right place as a side effect of how we're representing nulls. if (device_type == ExecutorDeviceType::GPU) { if (oe_col_buffer.empty()) { return {}; } const auto dev_idx_buff = get_device_ptr<uint32_t>(oe_col_buffer.size(), thrust_allocator); thrust::sequence(dev_idx_buff, dev_idx_buff + oe_col_buffer.size(), start, step); const auto dev_oe_col_buffer = get_device_copy_ptr(oe_col_buffer, thrust_allocator); return do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, dev_oe_col_buffer, dev_oe_col_buffer + oe_col_buffer.size(), dev_idx_buff, oe_col_buffer.size(), oe, layout, top_n); } CHECK(device_type == ExecutorDeviceType::CPU); thrust::host_vector<uint32_t> host_idx_buff(oe_col_buffer.size()); thrust::sequence(host_idx_buff.begin(), host_idx_buff.end(), start, step); return do_radix_sort<K>(device_type, thrust_allocator, groupby_buffer, oe_col_buffer.begin(), oe_col_buffer.end(), host_idx_buff.begin(), host_idx_buff.size(), oe, layout, top_n); } template std::vector<uint32_t> baseline_sort<int32_t>( const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step); template std::vector<uint32_t> baseline_sort<int64_t>( const ExecutorDeviceType device_type, const int device_id, Data_Namespace::DataMgr* data_mgr, const int8_t* groupby_buffer, const PodOrderEntry& oe, const GroupByBufferLayoutInfo& layout, const size_t top_n, const size_t start, const size_t step);
ad8bc2c4bf759c739a1d937e4b6500b69c617103.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Cuckoo Cycle, a memory-hard proof-of-work // Copyright (c) 2013-2016 John Tromp // The edge-trimming memory optimization is due to Dave Andersen // http://da-data.blogspot.com/2014/03/a-public-review-of-cuckoo-cycle.html #include <stdint.h> #include <string.h> #include "cuckoo.h" #if SIZESHIFT <= 32 typedef u32 nonce_t; typedef u32 node_t; #else typedef u64 nonce_t; typedef u64 node_t; #endif #include <openssl/sha.h> // d(evice s)ipnode #if (__CUDA_ARCH__ >= 320) // redefine ROTL to use funnel shifter, 3% speed gain static __device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b) { return make_uint2(a.x ^ b.x, a.y ^ b.y); } static __device__ __forceinline__ void operator^= (uint2 &a, uint2 b) { a.x ^= b.x, a.y ^= b.y; } static __device__ __forceinline__ void operator+= (uint2 &a, uint2 b) { asm("{\n\tadd.cc.u32 %0,%2,%4;\n\taddc.u32 %1,%3,%5;\n\t}\n\t" : "=r"(a.x), "=r"(a.y) : "r"(a.x), "r"(a.y), "r"(b.x), "r"(b.y)); } #undef ROTL __inline__ __device__ uint2 ROTL(const uint2 a, const int offset) { uint2 result; if (offset >= 32) { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset)); } else { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset)); } return result; } __device__ __forceinline__ uint2 vectorize(const uint64_t x) { uint2 result; asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x)); return result; } __device__ __forceinline__ uint64_t devectorize(uint2 x) { uint64_t result; asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y)); return result; } __device__ node_t dipnode(siphash_keys &keys, nonce_t nce, u32 uorv) { uint2 nonce = vectorize(2*nce + uorv); uint2 v0 = vectorize(keys.k0 ^ 0x736f6d6570736575ULL), v1 = vectorize(keys.k1 ^ 0x646f72616e646f6dULL), v2 = vectorize(keys.k0 ^ 0x6c7967656e657261ULL), v3 = vectorize(keys.k1 ^ 0x7465646279746573ULL) ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= vectorize(0xff); SIPROUND; SIPROUND; SIPROUND; SIPROUND; return devectorize(v0 ^ v1 ^ v2 ^ v3) & NODEMASK; } #else __device__ node_t dipnode(siphash_keys &keys, nonce_t nce, u32 uorv) { u64 nonce = 2*nce + uorv; u64 v0 = keys.k0 ^ 0x736f6d6570736575ULL, v1 = keys.k0 ^ 0x646f72616e646f6dULL, v2 = keys.k0 ^ 0x6c7967656e657261ULL, v3 = keys.k0 ^ 0x7465646279746573ULL ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= 0xff; SIPROUND; SIPROUND; SIPROUND; SIPROUND; return (v0 ^ v1 ^ v2 ^ v3) & NODEMASK; } #endif #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <set> // algorithm parameters #ifndef PART_BITS // #bits used to partition edge set processing to save memory // a value of 0 does no partitioning and is fastest // a value of 1 partitions in two, making twice_set the // same size as shrinkingset at about 33% slowdown // higher values are not that interesting #define PART_BITS 0 #endif #ifndef IDXSHIFT // we want sizeof(cuckoo_hash) == sizeof(twice_set), so // CUCKOO_SIZE * sizeof(u64) == TWICE_WORDS * sizeof(u32) // CUCKOO_SIZE * 2 == TWICE_WORDS // (SIZE >> IDXSHIFT) * 2 == 2 * ONCE_BITS / 32 // SIZE >> IDXSHIFT == HALFSIZE >> PART_BITS >> 5 // IDXSHIFT == 1 + PART_BITS + 5 #define IDXSHIFT (PART_BITS + 6) #endif // grow with cube root of size, hardly affected by trimming #define MAXPATHLEN (8 << (SIZESHIFT/3)) #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } // set that starts out full and gets reset by threads on disjoint words class shrinkingset { public: u32 *bits; __device__ void reset(nonce_t n) { bits[n/32] |= 1 << (n%32); } __device__ bool test(node_t n) const { return !((bits[n/32] >> (n%32)) & 1); } __device__ u32 block(node_t n) const { return ~bits[n/32]; } }; #define PART_MASK ((1 << PART_BITS) - 1) #define ONCE_BITS (HALFSIZE >> PART_BITS) #define TWICE_WORDS ((2 * ONCE_BITS) / 32) class twice_set { public: u32 *bits; __device__ void reset() { memset(bits, 0, TWICE_WORDS * sizeof(u32)); } __device__ void set(node_t u) { node_t idx = u/16; u32 bit = 1 << (2 * (u%16)); u32 old = atomicOr(&bits[idx], bit); u32 bit2 = bit<<1; if ((old & (bit2|bit)) == bit) atomicOr(&bits[idx], bit2); } __device__ u32 test(node_t u) const { return (bits[u/16] >> (2 * (u%16))) & 2; } }; #define CUCKOO_SIZE (SIZE >> IDXSHIFT) #define CUCKOO_MASK (CUCKOO_SIZE - 1) // number of (least significant) key bits that survives leftshift by SIZESHIFT #define KEYBITS (64-SIZESHIFT) #define KEYMASK ((1L << KEYBITS) - 1) #define MAXDRIFT (1L << (KEYBITS - IDXSHIFT)) class cuckoo_hash { public: u64 *cuckoo; cuckoo_hash() { cuckoo = (u64 *)calloc(CUCKOO_SIZE, sizeof(u64)); assert(cuckoo != 0); } ~cuckoo_hash() { free(cuckoo); } void set(node_t u, node_t v) { u64 niew = (u64)u << SIZESHIFT | v; for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { #ifdef ATOMIC u64 old = 0; if (cuckoo[ui].compare_exchange_strong(old, niew, std::memory_order_relaxed)) return; if ((old >> SIZESHIFT) == (u & KEYMASK)) { cuckoo[ui].store(niew, std::memory_order_relaxed); #else u64 old = cuckoo[ui]; if (old == 0 || (old >> SIZESHIFT) == (u & KEYMASK)) { cuckoo[ui] = niew; #endif return; } } } node_t operator[](node_t u) const { for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { #ifdef ATOMIC u64 cu = cuckoo[ui].load(std::memory_order_relaxed); #else u64 cu = cuckoo[ui]; #endif if (!cu) return 0; if ((cu >> SIZESHIFT) == (u & KEYMASK)) { assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT); return (node_t)(cu & (SIZE-1)); } } } }; class cuckoo_ctx { public: siphash_keys sip_keys; shrinkingset alive; twice_set nonleaf; int nthreads; cuckoo_ctx(const u32 n_threads) { nthreads = n_threads; } void setheadernonce(char* headernonce, const u32 nonce) { ((u32 *)headernonce)[HEADERLEN/sizeof(u32)-1] = htole32(nonce); // place nonce at end setheader(&sip_keys, headernonce); } }; __global__ void count_node_deg(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_keys sip_keys = ctx->sip_keys; // local copy sip context; 2.5% speed gain int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) { u32 alive32 = alive.block(block); for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs u32 ffs = __ffs(alive32); nonce += ffs; alive32 >>= ffs; node_t u = dipnode(sip_keys, nonce, uorv); if ((u & PART_MASK) == part) { nonleaf.set(u >> PART_BITS); } } } } __global__ void kill_leaf_edges(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_keys sip_keys = ctx->sip_keys; int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) { u32 alive32 = alive.block(block); for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs u32 ffs = __ffs(alive32); nonce += ffs; alive32 >>= ffs; node_t u = dipnode(sip_keys, nonce, uorv); if ((u & PART_MASK) == part) { if (!nonleaf.test(u >> PART_BITS)) { alive.reset(nonce); } } } } } u32 path(cuckoo_hash &cuckoo, node_t u, node_t *us) { u32 nu; for (nu = 0; u; u = cuckoo[u]) { if (nu >= MAXPATHLEN) { while (nu-- && us[nu] != u) ; if (nu == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); exit(0); } us[nu++] = u; } return nu-1; } typedef std::pair<node_t,node_t> edge; #include <unistd.h> int main(int argc, char **argv) { int nthreads = 16384; int trims = 32; int tpb = 0; int nonce = 0; int range = 1; const char *header = ""; int c; while ((c = getopt (argc, argv, "h:n:m:r:t:p:")) != -1) { switch (c) { case 'h': header = optarg; break; case 'n': nonce = atoi(optarg); break; case 'm': trims = atoi(optarg); break; case 't': nthreads = atoi(optarg); break; case 'p': tpb = atoi(optarg); break; case 'r': range = atoi(optarg); break; } } if (!tpb) // if not set, then default threads per block to roughly square root of threads for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ; printf("Looking for %d-cycle on cuckoo%d(\"%s\",%d", PROOFSIZE, SIZESHIFT, header, nonce); if (range > 1) printf("-%d", nonce+range-1); printf(") with 50%% edges, %d trims, %d threads %d per block\n", trims, nthreads, tpb); cuckoo_ctx ctx(nthreads); char headernonce[HEADERLEN]; u32 hdrlen = strlen(header); memcpy(headernonce, header, hdrlen); memset(headernonce+hdrlen, 0, sizeof(headernonce)-hdrlen); u64 edgeBytes = HALFSIZE/8, nodeBytes = TWICE_WORDS*sizeof(u32); checkCudaErrors(hipMalloc((void**)&ctx.alive.bits, edgeBytes)); checkCudaErrors(hipMalloc((void**)&ctx.nonleaf.bits, nodeBytes)); int edgeUnit=0, nodeUnit=0; u64 eb = edgeBytes, nb = nodeBytes; for (; eb >= 1024; eb>>=10) edgeUnit++; for (; nb >= 1024; nb>>=10) nodeUnit++; printf("Using %d%cB edge and %d%cB node memory.\n", (int)eb, " KMGT"[edgeUnit], (int)nb, " KMGT"[nodeUnit]); cuckoo_ctx *device_ctx; checkCudaErrors(hipMalloc((void**)&device_ctx, sizeof(cuckoo_ctx))); hipEvent_t start, stop; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&stop)); for (int r = 0; r < range; r++) { hipEventRecord(start, NULL); checkCudaErrors(hipMemset(ctx.alive.bits, 0, edgeBytes)); ctx.setheadernonce(headernonce, nonce + r); hipMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), hipMemcpyHostToDevice); for (u32 round=0; round < trims; round++) { for (u32 uorv = 0; uorv < 2; uorv++) { for (u32 part = 0; part <= PART_MASK; part++) { checkCudaErrors(hipMemset(ctx.nonleaf.bits, 0, nodeBytes)); hipLaunchKernelGGL(( count_node_deg), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_ctx, uorv, part); hipLaunchKernelGGL(( kill_leaf_edges), dim3(nthreads/tpb),dim3(tpb) , 0, 0, device_ctx, uorv, part); } } } u64 *bits; bits = (u64 *)calloc(HALFSIZE/64, sizeof(u64)); assert(bits != 0); hipMemcpy(bits, ctx.alive.bits, (HALFSIZE/64) * sizeof(u64), hipMemcpyDeviceToHost); hipEventRecord(stop, NULL); hipEventSynchronize(stop); float duration; hipEventElapsedTime(&duration, start, stop); u32 cnt = 0; for (int i = 0; i < HALFSIZE/64; i++) cnt += __builtin_popcountll(~bits[i]); u32 load = (u32)(100L * cnt / CUCKOO_SIZE); printf("nonce %d: %d trims completed in %.3f seconds final load %d%%\n", nonce+r, trims, duration / 1000.0f, load); if (load >= 90) { printf("overloaded! exiting..."); exit(0); } cuckoo_hash &cuckoo = *(new cuckoo_hash()); node_t us[MAXPATHLEN], vs[MAXPATHLEN]; for (nonce_t block = 0; block < HALFSIZE; block += 64) { u64 alive64 = ~bits[block/64]; for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs u32 ffs = __builtin_ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u0=sipnode(&ctx.sip_keys, nonce, 0), v0=sipnode(&ctx.sip_keys, nonce, 1); if (u0) { u32 nu = path(cuckoo, u0, us), nv = path(cuckoo, v0, vs); if (us[nu] == vs[nv]) { u32 min = nu < nv ? nu : nv; for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ; u32 len = nu + nv + 1; printf("%4d-cycle found at %d:%d%%\n", len, 0, (u32)(nonce*100L/HALFSIZE)); if (len == PROOFSIZE) { printf("Solution"); std::set<edge> cycle; u32 n = 0; cycle.insert(edge(*us, *vs)); while (nu--) cycle.insert(edge(us[(nu+1)&~1], us[nu|1])); // u's in even position; v's in odd while (nv--) cycle.insert(edge(vs[nv|1], vs[(nv+1)&~1])); // u's in odd position; v's in even for (nonce_t blk = 0; blk < HALFSIZE; blk += 64) { u64 alv64 = ~bits[blk/64]; for (nonce_t nce = blk-1; alv64; ) { // -1 compensates for 1-based ffs u32 ffs = __builtin_ffsll(alv64); nce += ffs; alv64 >>= ffs; edge e(sipnode(&ctx.sip_keys, nce, 0), sipnode(&ctx.sip_keys, nce, 1)); if (cycle.find(e) != cycle.end()) { printf(" %jx", (uintmax_t)nce); if (PROOFSIZE > 2) cycle.erase(e); n++; } if (ffs & 64) break; // can't shift by 64 } } assert(n==PROOFSIZE); printf("\n"); } } else if (nu < nv) { while (nu--) cuckoo.set(us[nu+1], us[nu]); cuckoo.set(u0, v0); } else { while (nv--) cuckoo.set(vs[nv+1], vs[nv]); cuckoo.set(v0, u0); } } if (ffs & 64) break; // can't shift by 64 } } } checkCudaErrors(hipFree(ctx.alive.bits)); checkCudaErrors(hipFree(ctx.nonleaf.bits)); return 0; }
ad8bc2c4bf759c739a1d937e4b6500b69c617103.cu
// Cuckoo Cycle, a memory-hard proof-of-work // Copyright (c) 2013-2016 John Tromp // The edge-trimming memory optimization is due to Dave Andersen // http://da-data.blogspot.com/2014/03/a-public-review-of-cuckoo-cycle.html #include <stdint.h> #include <string.h> #include "cuckoo.h" #if SIZESHIFT <= 32 typedef u32 nonce_t; typedef u32 node_t; #else typedef u64 nonce_t; typedef u64 node_t; #endif #include <openssl/sha.h> // d(evice s)ipnode #if (__CUDA_ARCH__ >= 320) // redefine ROTL to use funnel shifter, 3% speed gain static __device__ __forceinline__ uint2 operator^ (uint2 a, uint2 b) { return make_uint2(a.x ^ b.x, a.y ^ b.y); } static __device__ __forceinline__ void operator^= (uint2 &a, uint2 b) { a.x ^= b.x, a.y ^= b.y; } static __device__ __forceinline__ void operator+= (uint2 &a, uint2 b) { asm("{\n\tadd.cc.u32 %0,%2,%4;\n\taddc.u32 %1,%3,%5;\n\t}\n\t" : "=r"(a.x), "=r"(a.y) : "r"(a.x), "r"(a.y), "r"(b.x), "r"(b.y)); } #undef ROTL __inline__ __device__ uint2 ROTL(const uint2 a, const int offset) { uint2 result; if (offset >= 32) { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.x), "r"(a.y), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.y), "r"(a.x), "r"(offset)); } else { asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.x) : "r"(a.y), "r"(a.x), "r"(offset)); asm("shf.l.wrap.b32 %0, %1, %2, %3;" : "=r"(result.y) : "r"(a.x), "r"(a.y), "r"(offset)); } return result; } __device__ __forceinline__ uint2 vectorize(const uint64_t x) { uint2 result; asm("mov.b64 {%0,%1},%2; \n\t" : "=r"(result.x), "=r"(result.y) : "l"(x)); return result; } __device__ __forceinline__ uint64_t devectorize(uint2 x) { uint64_t result; asm("mov.b64 %0,{%1,%2}; \n\t" : "=l"(result) : "r"(x.x), "r"(x.y)); return result; } __device__ node_t dipnode(siphash_keys &keys, nonce_t nce, u32 uorv) { uint2 nonce = vectorize(2*nce + uorv); uint2 v0 = vectorize(keys.k0 ^ 0x736f6d6570736575ULL), v1 = vectorize(keys.k1 ^ 0x646f72616e646f6dULL), v2 = vectorize(keys.k0 ^ 0x6c7967656e657261ULL), v3 = vectorize(keys.k1 ^ 0x7465646279746573ULL) ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= vectorize(0xff); SIPROUND; SIPROUND; SIPROUND; SIPROUND; return devectorize(v0 ^ v1 ^ v2 ^ v3) & NODEMASK; } #else __device__ node_t dipnode(siphash_keys &keys, nonce_t nce, u32 uorv) { u64 nonce = 2*nce + uorv; u64 v0 = keys.k0 ^ 0x736f6d6570736575ULL, v1 = keys.k0 ^ 0x646f72616e646f6dULL, v2 = keys.k0 ^ 0x6c7967656e657261ULL, v3 = keys.k0 ^ 0x7465646279746573ULL ^ nonce; SIPROUND; SIPROUND; v0 ^= nonce; v2 ^= 0xff; SIPROUND; SIPROUND; SIPROUND; SIPROUND; return (v0 ^ v1 ^ v2 ^ v3) & NODEMASK; } #endif #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <set> // algorithm parameters #ifndef PART_BITS // #bits used to partition edge set processing to save memory // a value of 0 does no partitioning and is fastest // a value of 1 partitions in two, making twice_set the // same size as shrinkingset at about 33% slowdown // higher values are not that interesting #define PART_BITS 0 #endif #ifndef IDXSHIFT // we want sizeof(cuckoo_hash) == sizeof(twice_set), so // CUCKOO_SIZE * sizeof(u64) == TWICE_WORDS * sizeof(u32) // CUCKOO_SIZE * 2 == TWICE_WORDS // (SIZE >> IDXSHIFT) * 2 == 2 * ONCE_BITS / 32 // SIZE >> IDXSHIFT == HALFSIZE >> PART_BITS >> 5 // IDXSHIFT == 1 + PART_BITS + 5 #define IDXSHIFT (PART_BITS + 6) #endif // grow with cube root of size, hardly affected by trimming #define MAXPATHLEN (8 << (SIZESHIFT/3)) #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } // set that starts out full and gets reset by threads on disjoint words class shrinkingset { public: u32 *bits; __device__ void reset(nonce_t n) { bits[n/32] |= 1 << (n%32); } __device__ bool test(node_t n) const { return !((bits[n/32] >> (n%32)) & 1); } __device__ u32 block(node_t n) const { return ~bits[n/32]; } }; #define PART_MASK ((1 << PART_BITS) - 1) #define ONCE_BITS (HALFSIZE >> PART_BITS) #define TWICE_WORDS ((2 * ONCE_BITS) / 32) class twice_set { public: u32 *bits; __device__ void reset() { memset(bits, 0, TWICE_WORDS * sizeof(u32)); } __device__ void set(node_t u) { node_t idx = u/16; u32 bit = 1 << (2 * (u%16)); u32 old = atomicOr(&bits[idx], bit); u32 bit2 = bit<<1; if ((old & (bit2|bit)) == bit) atomicOr(&bits[idx], bit2); } __device__ u32 test(node_t u) const { return (bits[u/16] >> (2 * (u%16))) & 2; } }; #define CUCKOO_SIZE (SIZE >> IDXSHIFT) #define CUCKOO_MASK (CUCKOO_SIZE - 1) // number of (least significant) key bits that survives leftshift by SIZESHIFT #define KEYBITS (64-SIZESHIFT) #define KEYMASK ((1L << KEYBITS) - 1) #define MAXDRIFT (1L << (KEYBITS - IDXSHIFT)) class cuckoo_hash { public: u64 *cuckoo; cuckoo_hash() { cuckoo = (u64 *)calloc(CUCKOO_SIZE, sizeof(u64)); assert(cuckoo != 0); } ~cuckoo_hash() { free(cuckoo); } void set(node_t u, node_t v) { u64 niew = (u64)u << SIZESHIFT | v; for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { #ifdef ATOMIC u64 old = 0; if (cuckoo[ui].compare_exchange_strong(old, niew, std::memory_order_relaxed)) return; if ((old >> SIZESHIFT) == (u & KEYMASK)) { cuckoo[ui].store(niew, std::memory_order_relaxed); #else u64 old = cuckoo[ui]; if (old == 0 || (old >> SIZESHIFT) == (u & KEYMASK)) { cuckoo[ui] = niew; #endif return; } } } node_t operator[](node_t u) const { for (node_t ui = u >> IDXSHIFT; ; ui = (ui+1) & CUCKOO_MASK) { #ifdef ATOMIC u64 cu = cuckoo[ui].load(std::memory_order_relaxed); #else u64 cu = cuckoo[ui]; #endif if (!cu) return 0; if ((cu >> SIZESHIFT) == (u & KEYMASK)) { assert(((ui - (u >> IDXSHIFT)) & CUCKOO_MASK) < MAXDRIFT); return (node_t)(cu & (SIZE-1)); } } } }; class cuckoo_ctx { public: siphash_keys sip_keys; shrinkingset alive; twice_set nonleaf; int nthreads; cuckoo_ctx(const u32 n_threads) { nthreads = n_threads; } void setheadernonce(char* headernonce, const u32 nonce) { ((u32 *)headernonce)[HEADERLEN/sizeof(u32)-1] = htole32(nonce); // place nonce at end setheader(&sip_keys, headernonce); } }; __global__ void count_node_deg(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_keys sip_keys = ctx->sip_keys; // local copy sip context; 2.5% speed gain int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) { u32 alive32 = alive.block(block); for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs u32 ffs = __ffs(alive32); nonce += ffs; alive32 >>= ffs; node_t u = dipnode(sip_keys, nonce, uorv); if ((u & PART_MASK) == part) { nonleaf.set(u >> PART_BITS); } } } } __global__ void kill_leaf_edges(cuckoo_ctx *ctx, u32 uorv, u32 part) { shrinkingset &alive = ctx->alive; twice_set &nonleaf = ctx->nonleaf; siphash_keys sip_keys = ctx->sip_keys; int id = blockIdx.x * blockDim.x + threadIdx.x; for (nonce_t block = id*32; block < HALFSIZE; block += ctx->nthreads*32) { u32 alive32 = alive.block(block); for (nonce_t nonce = block-1; alive32; ) { // -1 compensates for 1-based ffs u32 ffs = __ffs(alive32); nonce += ffs; alive32 >>= ffs; node_t u = dipnode(sip_keys, nonce, uorv); if ((u & PART_MASK) == part) { if (!nonleaf.test(u >> PART_BITS)) { alive.reset(nonce); } } } } } u32 path(cuckoo_hash &cuckoo, node_t u, node_t *us) { u32 nu; for (nu = 0; u; u = cuckoo[u]) { if (nu >= MAXPATHLEN) { while (nu-- && us[nu] != u) ; if (nu == ~0) printf("maximum path length exceeded\n"); else printf("illegal % 4d-cycle\n", MAXPATHLEN-nu); exit(0); } us[nu++] = u; } return nu-1; } typedef std::pair<node_t,node_t> edge; #include <unistd.h> int main(int argc, char **argv) { int nthreads = 16384; int trims = 32; int tpb = 0; int nonce = 0; int range = 1; const char *header = ""; int c; while ((c = getopt (argc, argv, "h:n:m:r:t:p:")) != -1) { switch (c) { case 'h': header = optarg; break; case 'n': nonce = atoi(optarg); break; case 'm': trims = atoi(optarg); break; case 't': nthreads = atoi(optarg); break; case 'p': tpb = atoi(optarg); break; case 'r': range = atoi(optarg); break; } } if (!tpb) // if not set, then default threads per block to roughly square root of threads for (tpb = 1; tpb*tpb < nthreads; tpb *= 2) ; printf("Looking for %d-cycle on cuckoo%d(\"%s\",%d", PROOFSIZE, SIZESHIFT, header, nonce); if (range > 1) printf("-%d", nonce+range-1); printf(") with 50%% edges, %d trims, %d threads %d per block\n", trims, nthreads, tpb); cuckoo_ctx ctx(nthreads); char headernonce[HEADERLEN]; u32 hdrlen = strlen(header); memcpy(headernonce, header, hdrlen); memset(headernonce+hdrlen, 0, sizeof(headernonce)-hdrlen); u64 edgeBytes = HALFSIZE/8, nodeBytes = TWICE_WORDS*sizeof(u32); checkCudaErrors(cudaMalloc((void**)&ctx.alive.bits, edgeBytes)); checkCudaErrors(cudaMalloc((void**)&ctx.nonleaf.bits, nodeBytes)); int edgeUnit=0, nodeUnit=0; u64 eb = edgeBytes, nb = nodeBytes; for (; eb >= 1024; eb>>=10) edgeUnit++; for (; nb >= 1024; nb>>=10) nodeUnit++; printf("Using %d%cB edge and %d%cB node memory.\n", (int)eb, " KMGT"[edgeUnit], (int)nb, " KMGT"[nodeUnit]); cuckoo_ctx *device_ctx; checkCudaErrors(cudaMalloc((void**)&device_ctx, sizeof(cuckoo_ctx))); cudaEvent_t start, stop; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&stop)); for (int r = 0; r < range; r++) { cudaEventRecord(start, NULL); checkCudaErrors(cudaMemset(ctx.alive.bits, 0, edgeBytes)); ctx.setheadernonce(headernonce, nonce + r); cudaMemcpy(device_ctx, &ctx, sizeof(cuckoo_ctx), cudaMemcpyHostToDevice); for (u32 round=0; round < trims; round++) { for (u32 uorv = 0; uorv < 2; uorv++) { for (u32 part = 0; part <= PART_MASK; part++) { checkCudaErrors(cudaMemset(ctx.nonleaf.bits, 0, nodeBytes)); count_node_deg<<<nthreads/tpb,tpb >>>(device_ctx, uorv, part); kill_leaf_edges<<<nthreads/tpb,tpb >>>(device_ctx, uorv, part); } } } u64 *bits; bits = (u64 *)calloc(HALFSIZE/64, sizeof(u64)); assert(bits != 0); cudaMemcpy(bits, ctx.alive.bits, (HALFSIZE/64) * sizeof(u64), cudaMemcpyDeviceToHost); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); float duration; cudaEventElapsedTime(&duration, start, stop); u32 cnt = 0; for (int i = 0; i < HALFSIZE/64; i++) cnt += __builtin_popcountll(~bits[i]); u32 load = (u32)(100L * cnt / CUCKOO_SIZE); printf("nonce %d: %d trims completed in %.3f seconds final load %d%%\n", nonce+r, trims, duration / 1000.0f, load); if (load >= 90) { printf("overloaded! exiting..."); exit(0); } cuckoo_hash &cuckoo = *(new cuckoo_hash()); node_t us[MAXPATHLEN], vs[MAXPATHLEN]; for (nonce_t block = 0; block < HALFSIZE; block += 64) { u64 alive64 = ~bits[block/64]; for (nonce_t nonce = block-1; alive64; ) { // -1 compensates for 1-based ffs u32 ffs = __builtin_ffsll(alive64); nonce += ffs; alive64 >>= ffs; node_t u0=sipnode(&ctx.sip_keys, nonce, 0), v0=sipnode(&ctx.sip_keys, nonce, 1); if (u0) { u32 nu = path(cuckoo, u0, us), nv = path(cuckoo, v0, vs); if (us[nu] == vs[nv]) { u32 min = nu < nv ? nu : nv; for (nu -= min, nv -= min; us[nu] != vs[nv]; nu++, nv++) ; u32 len = nu + nv + 1; printf("%4d-cycle found at %d:%d%%\n", len, 0, (u32)(nonce*100L/HALFSIZE)); if (len == PROOFSIZE) { printf("Solution"); std::set<edge> cycle; u32 n = 0; cycle.insert(edge(*us, *vs)); while (nu--) cycle.insert(edge(us[(nu+1)&~1], us[nu|1])); // u's in even position; v's in odd while (nv--) cycle.insert(edge(vs[nv|1], vs[(nv+1)&~1])); // u's in odd position; v's in even for (nonce_t blk = 0; blk < HALFSIZE; blk += 64) { u64 alv64 = ~bits[blk/64]; for (nonce_t nce = blk-1; alv64; ) { // -1 compensates for 1-based ffs u32 ffs = __builtin_ffsll(alv64); nce += ffs; alv64 >>= ffs; edge e(sipnode(&ctx.sip_keys, nce, 0), sipnode(&ctx.sip_keys, nce, 1)); if (cycle.find(e) != cycle.end()) { printf(" %jx", (uintmax_t)nce); if (PROOFSIZE > 2) cycle.erase(e); n++; } if (ffs & 64) break; // can't shift by 64 } } assert(n==PROOFSIZE); printf("\n"); } } else if (nu < nv) { while (nu--) cuckoo.set(us[nu+1], us[nu]); cuckoo.set(u0, v0); } else { while (nv--) cuckoo.set(vs[nv+1], vs[nv]); cuckoo.set(v0, u0); } } if (ffs & 64) break; // can't shift by 64 } } } checkCudaErrors(cudaFree(ctx.alive.bits)); checkCudaErrors(cudaFree(ctx.nonleaf.bits)); return 0; }
4fe9293e60a738f779e3f5d505ba0962f5cc9db1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" # include <stdio.h> # include "timer.hpp" # include <iostream> # include <numeric> # include <vector> #include <fstream> #include <string> #include "rocblas.h" __global__ void dot_pro(double *x, double *tmp, int N) { unsigned int ind = threadIdx.x + blockDim.x*blockIdx.x; unsigned int str = blockDim.x*gridDim.x; __shared__ double cache[256]; double tmpsum = 0.0; while(ind < N) { tmpsum += x[ind]*x[ind]; ind += str; } cache[threadIdx.x] = tmpsum; __syncthreads(); for(int i = blockDim.x/2; i>0; i/=2) { __syncthreads(); if(threadIdx.x < i) { cache[threadIdx.x] += cache[threadIdx.x + i]; } } if(threadIdx.x == 0) { tmp[blockIdx.x] = cache[0]; } } __global__ void mmsszz(double *x, double *dot, int N) { unsigned int ind = threadIdx.x + blockDim.x*blockIdx.x; unsigned int str = blockDim.x*gridDim.x; __shared__ double sumofallelements[256]; __shared__ double einsnorm[256]; __shared__ double zweisnorm[256]; __shared__ double maxnorm[256]; __shared__ double zeros[256]; double sum = 0; double einssum = 0; double zweissum = 0; double max = 0; double highNum = 0; double count = 0; while(ind < N) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += x[i]; einssum += std::abs(x[i]); zweissum += x[i] * x[i]; if (x[i] == 0) { count = count + 1; } if (std::abs(x[i]) > highNum) { highNum = std::abs(x[i]); } } max = highNum; ind += str; } sumofallelements[threadIdx.x] = sum; einsnorm[threadIdx.x] = einssum; zweisnorm[threadIdx.x] = zweissum; maxnorm[threadIdx.x] = max; zeros[threadIdx.x] = count; __syncthreads(); for(int i = blockDim.x/2; i>0; i/=2) { __syncthreads(); if(threadIdx.x < i) { sumofallelements[threadIdx.x] += sumofallelements[threadIdx.x + i]; einsnorm[threadIdx.x] += einsnorm[threadIdx.x + i]; zweisnorm[threadIdx.x] += zweisnorm[threadIdx.x + i]; maxnorm[threadIdx.x] += maxnorm[threadIdx.x + i]; zeros[threadIdx.x] += zeros[threadIdx.x + i]; } } if(threadIdx.x == 0) { double highNum = 0; for (int i = 0; i < 256; i++) { if (std::abs(maxnorm[i]) > highNum) { highNum = std::abs(maxnorm[i]); } } atomicAdd(dot + 0,sumofallelements[0]); atomicAdd(dot + 1,einsnorm[0]); atomicAdd(dot + 2,std::sqrt(zweisnorm[0])); dot[3] = highNum; atomicAdd(dot + 4,zeros[0]); } } template <typename T> void toCSV(std::fstream& csv, T* array, int size) { csv << size; for (int i = 0; i < size; ++i) { csv << ";" << array[i]; } csv << std::endl; } int main (void) { std::vector<int> vec_Ns{100, 1000, 10000, 100000, 1000000,10000000, 100000000}; double *x, *cuda_x; double *result, *cuda_result; double *dot, *cuda_dot; double *cublas_dot, *cuda_cublas_dot; Timer timer; int anz = 100; std::vector<double> ownruntime; int shift = 0; std::fstream csv_times; std::string csv_times_name = "shared_times.csv"; csv_times.open(csv_times_name, std::fstream::out | std::fstream::trunc); std::string header = "N;time_shared;time_dot"; // to csv file csv_times << header << std::endl; for (int& N : vec_Ns) { // // init CUBLAS // hipblasHandle_t h; hipblasCreate(&h); hipblasSetPointerMode(h, HIPBLAS_POINTER_MODE_DEVICE); const size_t sz = sizeof(double) * (size_t)N; // // generates a random object: // srand (time(NULL)); std::cout << "N: " << N << std::endl; // // allocate host memory: // std::cout << "Allocating host arrays..." << std::endl; x = (double*)malloc(sizeof(double) * N); result = (double*)malloc(sizeof(double) * 7); for (size_t i=0; i<7; ++i) { result[i] = 0; } for (size_t i=0; i<N; ++i) { x[i] = rand() % 10 + 1;; } // // allocate device memory // std::cout << "Allocating CUDA arrays..." << std::endl; hipMalloc((void **)(&cuda_x), sz); hipMalloc((void **)(&cuda_result), 7*sizeof(double)); hipMalloc((void **)(&cuda_cublas_dot), sizeof(double)); hipMemcpy(cuda_x, x, N*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(cuda_result, result, 7*sizeof(double), hipMemcpyHostToDevice); hipDeviceSynchronize(); timer.reset(); for (int i = 0; i < anz; i++) { hipLaunchKernelGGL(( mmsszz), dim3(256), dim3(256), 0, 0, cuda_x, cuda_result, N); } hipDeviceSynchronize(); hipMemcpy(result,cuda_result, 7*sizeof(double), hipMemcpyDeviceToHost); ownruntime.push_back (1000*timer.get()/anz); hipDeviceSynchronize(); timer.reset(); for (int i = 0; i < anz; i++) { hipLaunchKernelGGL(( dot_pro), dim3(256), dim3(256), 0, 0, cuda_x, cuda_dot, N); } hipDeviceSynchronize(); hipMemcpy(dot,cuda_dot, sizeof(double), hipMemcpyDeviceToHost); ownruntime.push_back (1000*timer.get()/anz); timer.reset(); for (int i = 0; i < anz; i++) { //hipblasSasum(h,N,cuda_x,1,cuda_x,cuda_cublas_dot); } hipDeviceSynchronize(); //hipMemcpy(dot,cuda_dot, sizeof(double), hipMemcpyDeviceToHost); ownruntime.push_back (1000*timer.get()/anz); std::cout << "Time: " << ownruntime[shift] << std::endl; std::cout << "Sum of all elements: " << result[0] << std::endl; std::cout << "1Norm: " << result[1] << std::endl; std::cout << "2Norm: " << result[2] << std::endl; std::cout << "MaxNorm: " << result[3] << std::endl; std::cout << "nomOfzeros: " << result[4] << std::endl; std::cout << " " << std::endl; std::cout << "Time DotProd: " << ownruntime[shift + 1] << std::endl; std::string sep = ";"; csv_times << N << sep << ownruntime[shift] << sep << ownruntime[shift + 1] << std::endl; hipFree(cuda_x); hipFree(cuda_result); free(x); free(result); std::cout << " " << std::endl; std::cout << " " << std::endl; std::cout << " " << std::endl; std::cout << " " << std::endl; shift = shift + 1; } csv_times.close(); std::cout << "\nRuntimes in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_times_name << std::endl; return EXIT_SUCCESS; }
4fe9293e60a738f779e3f5d505ba0962f5cc9db1.cu
# include <stdio.h> # include "timer.hpp" # include <iostream> # include <numeric> # include <vector> #include <fstream> #include <string> #include "cublas_v2.h" __global__ void dot_pro(double *x, double *tmp, int N) { unsigned int ind = threadIdx.x + blockDim.x*blockIdx.x; unsigned int str = blockDim.x*gridDim.x; __shared__ double cache[256]; double tmpsum = 0.0; while(ind < N) { tmpsum += x[ind]*x[ind]; ind += str; } cache[threadIdx.x] = tmpsum; __syncthreads(); for(int i = blockDim.x/2; i>0; i/=2) { __syncthreads(); if(threadIdx.x < i) { cache[threadIdx.x] += cache[threadIdx.x + i]; } } if(threadIdx.x == 0) { tmp[blockIdx.x] = cache[0]; } } __global__ void mmsszz(double *x, double *dot, int N) { unsigned int ind = threadIdx.x + blockDim.x*blockIdx.x; unsigned int str = blockDim.x*gridDim.x; __shared__ double sumofallelements[256]; __shared__ double einsnorm[256]; __shared__ double zweisnorm[256]; __shared__ double maxnorm[256]; __shared__ double zeros[256]; double sum = 0; double einssum = 0; double zweissum = 0; double max = 0; double highNum = 0; double count = 0; while(ind < N) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { sum += x[i]; einssum += std::abs(x[i]); zweissum += x[i] * x[i]; if (x[i] == 0) { count = count + 1; } if (std::abs(x[i]) > highNum) { highNum = std::abs(x[i]); } } max = highNum; ind += str; } sumofallelements[threadIdx.x] = sum; einsnorm[threadIdx.x] = einssum; zweisnorm[threadIdx.x] = zweissum; maxnorm[threadIdx.x] = max; zeros[threadIdx.x] = count; __syncthreads(); for(int i = blockDim.x/2; i>0; i/=2) { __syncthreads(); if(threadIdx.x < i) { sumofallelements[threadIdx.x] += sumofallelements[threadIdx.x + i]; einsnorm[threadIdx.x] += einsnorm[threadIdx.x + i]; zweisnorm[threadIdx.x] += zweisnorm[threadIdx.x + i]; maxnorm[threadIdx.x] += maxnorm[threadIdx.x + i]; zeros[threadIdx.x] += zeros[threadIdx.x + i]; } } if(threadIdx.x == 0) { double highNum = 0; for (int i = 0; i < 256; i++) { if (std::abs(maxnorm[i]) > highNum) { highNum = std::abs(maxnorm[i]); } } atomicAdd(dot + 0,sumofallelements[0]); atomicAdd(dot + 1,einsnorm[0]); atomicAdd(dot + 2,std::sqrt(zweisnorm[0])); dot[3] = highNum; atomicAdd(dot + 4,zeros[0]); } } template <typename T> void toCSV(std::fstream& csv, T* array, int size) { csv << size; for (int i = 0; i < size; ++i) { csv << ";" << array[i]; } csv << std::endl; } int main (void) { std::vector<int> vec_Ns{100, 1000, 10000, 100000, 1000000,10000000, 100000000}; double *x, *cuda_x; double *result, *cuda_result; double *dot, *cuda_dot; double *cublas_dot, *cuda_cublas_dot; Timer timer; int anz = 100; std::vector<double> ownruntime; int shift = 0; std::fstream csv_times; std::string csv_times_name = "shared_times.csv"; csv_times.open(csv_times_name, std::fstream::out | std::fstream::trunc); std::string header = "N;time_shared;time_dot"; // to csv file csv_times << header << std::endl; for (int& N : vec_Ns) { // // init CUBLAS // cublasHandle_t h; cublasCreate(&h); cublasSetPointerMode(h, CUBLAS_POINTER_MODE_DEVICE); const size_t sz = sizeof(double) * (size_t)N; // // generates a random object: // srand (time(NULL)); std::cout << "N: " << N << std::endl; // // allocate host memory: // std::cout << "Allocating host arrays..." << std::endl; x = (double*)malloc(sizeof(double) * N); result = (double*)malloc(sizeof(double) * 7); for (size_t i=0; i<7; ++i) { result[i] = 0; } for (size_t i=0; i<N; ++i) { x[i] = rand() % 10 + 1;; } // // allocate device memory // std::cout << "Allocating CUDA arrays..." << std::endl; cudaMalloc((void **)(&cuda_x), sz); cudaMalloc((void **)(&cuda_result), 7*sizeof(double)); cudaMalloc((void **)(&cuda_cublas_dot), sizeof(double)); cudaMemcpy(cuda_x, x, N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(cuda_result, result, 7*sizeof(double), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); timer.reset(); for (int i = 0; i < anz; i++) { mmsszz<<<256, 256>>>(cuda_x, cuda_result, N); } cudaDeviceSynchronize(); cudaMemcpy(result,cuda_result, 7*sizeof(double), cudaMemcpyDeviceToHost); ownruntime.push_back (1000*timer.get()/anz); cudaDeviceSynchronize(); timer.reset(); for (int i = 0; i < anz; i++) { dot_pro<<<256, 256>>>(cuda_x, cuda_dot, N); } cudaDeviceSynchronize(); cudaMemcpy(dot,cuda_dot, sizeof(double), cudaMemcpyDeviceToHost); ownruntime.push_back (1000*timer.get()/anz); timer.reset(); for (int i = 0; i < anz; i++) { //cublasSasum(h,N,cuda_x,1,cuda_x,cuda_cublas_dot); } cudaDeviceSynchronize(); //cudaMemcpy(dot,cuda_dot, sizeof(double), cudaMemcpyDeviceToHost); ownruntime.push_back (1000*timer.get()/anz); std::cout << "Time: " << ownruntime[shift] << std::endl; std::cout << "Sum of all elements: " << result[0] << std::endl; std::cout << "1Norm: " << result[1] << std::endl; std::cout << "2Norm: " << result[2] << std::endl; std::cout << "MaxNorm: " << result[3] << std::endl; std::cout << "nomOfzeros: " << result[4] << std::endl; std::cout << " " << std::endl; std::cout << "Time DotProd: " << ownruntime[shift + 1] << std::endl; std::string sep = ";"; csv_times << N << sep << ownruntime[shift] << sep << ownruntime[shift + 1] << std::endl; cudaFree(cuda_x); cudaFree(cuda_result); free(x); free(result); std::cout << " " << std::endl; std::cout << " " << std::endl; std::cout << " " << std::endl; std::cout << " " << std::endl; shift = shift + 1; } csv_times.close(); std::cout << "\nRuntimes in csv form can be found here\nhttps://gtx1080.360252.org/2020/ex6/" + csv_times_name << std::endl; return EXIT_SUCCESS; }
c017e0c547b63b1154bc1c70254a0406b0c35516.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Simulate growing mesenchyme constrained by a planar wall #include <hiprand/hiprand_kernel.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <time.h> #include "../include/dtypes.cuh" #include "../include/inits.cuh" #include "../include/polarity.cuh" #include "../include/property.cuh" #include "../include/solvers.cuh" #include "../include/utils.cuh" #include "../include/vtk.cuh" const auto r_max = 1.0f; const auto mean_dist = 0.75f; const auto r_protrusion = 1.0f; const auto protrusion_strength = 0.15f; const auto prots_per_cell = 1; const auto n_0 = 500; const auto n_max = 100000; const auto dt = 0.1; auto n_time_steps = 500; auto skip_step = n_time_steps/100; auto update_prob = 0.5; auto prolif_rate = 0.005; enum Cell_types { wall_node, mesenchyme }; __device__ Cell_types* d_type; template<typename Pt> __device__ float wall_friction(Pt Xi, Pt r, float dist, int i, int j) { if (i == 0 or j == 0) return 0; if (dist < r_max) return 1; return 0; } __device__ float3 relu_force( float3 Xi, float3 r, float dist, int i, int j) { float3 dF{0}; // No one interacts with the wall node through pwints if(i==0 or j==0) return dF; if (i == j) return dF; if (dist > r_max) return dF; auto F = fmaxf(0.7 - dist, 0) - fmaxf(dist - 0.8, 0); dF.x += r.x * F / dist; dF.y += r.y * F / dist; dF.z += r.z * F / dist; return dF; } __global__ void proliferate(float rate, int n_cells, hiprandState_t* d_state, float3* d_X, float3* d_old_v, int* d_n_cells) { D_ASSERT(n_cells * rate <= n_max); auto i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n_cells) return; // Dividing new cells is problematic! if(i == 0) return; // The wall node doesn't proliferate auto rnd = hiprand_uniform(&d_state[i]); if (rnd > rate) return; auto n = atomicAdd(d_n_cells, 1); auto theta = acosf(2. * hiprand_uniform(&d_state[i]) - 1); auto phi = hiprand_uniform(&d_state[i]) * 2 * M_PI; d_X[n].x = d_X[i].x + mean_dist / 4 * sinf(theta) * cosf(phi); d_X[n].y = d_X[i].y + mean_dist / 4 * sinf(theta) * sinf(phi); d_X[n].z = d_X[i].z + mean_dist / 4 * cosf(theta); d_type[n] = d_type[i]; d_old_v[n] = d_old_v[i]; } __global__ void update_protrusions_wall(const int n_cells, const Grid* __restrict__ d_grid, const float3* __restrict d_X, hiprandState_t* d_state, Link* d_link, float update_prob) { auto i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= (n_cells) * prots_per_cell) return; auto j = static_cast<int>((i + 0.5) / prots_per_cell); auto rand_nb_cube = d_grid->d_cube_id[j] + d_nhood[min(static_cast<int>(hiprand_uniform(&d_state[i]) * 27), 26)]; auto cells_in_cube = d_grid->d_cube_end[rand_nb_cube] - d_grid->d_cube_start[rand_nb_cube] + 1; if (cells_in_cube < 1) return; auto a = d_grid->d_point_id[j]; if (a==0) return; auto b = d_grid->d_point_id[d_grid->d_cube_start[rand_nb_cube] + min(static_cast<int>( hiprand_uniform(&d_state[i]) * cells_in_cube), cells_in_cube - 1)]; D_ASSERT(a >= 0); D_ASSERT(a < n_cells); D_ASSERT(b >= 0); D_ASSERT(b < n_cells); if (a == b) return; if (b == 0) return; auto new_r = d_X[a] - d_X[b]; auto new_dist = norm3df(new_r.x, new_r.y, new_r.z); if (new_dist > r_protrusion) return; auto link = &d_link[a * prots_per_cell + i % prots_per_cell]; auto not_initialized = link->a == link->b; auto old_r = d_X[link->a] - d_X[link->b]; auto old_dist = norm3df(old_r.x, old_r.y, old_r.z); auto noise = hiprand_uniform(&d_state[i]); auto updated = noise < update_prob; if (not_initialized or updated){ link->a = a; link->b = b; } } int main(int argc, const char* argv[]) { // Solution<float3, Grid_solver> cells{n_max}; Solution<float3, Gabriel_solver> cells{n_max}; *cells.h_n = n_0; cells.h_X[0].x = 0.0; cells.h_X[0].y = 0.0; cells.h_X[0].z = -mean_dist; random_sphere(0.5, cells, 1); for (auto i = 1; i < n_0; i++) { if(cells.h_X[i].z < 0.0f) cells.h_X[i].z *= -1.0f; } cells.copy_to_device(); Property<Cell_types> type{n_max}; hipMemcpyToSymbol(d_type, &type.d_prop, sizeof(d_type)); auto wall = [&](const int n, const float3* __restrict__ d_X, float3* d_dX){ return wall_forces<float3, xy_wall_relu_force>(n, d_X, d_dX, 0); }; type.h_prop[0] = wall_node; for (auto i = 1; i < *cells.h_n; i++) type.h_prop[i] = mesenchyme; type.copy_to_device(); cells.copy_to_device(); // Initial relaxation for (auto time_step = 0; time_step <= 100; time_step++) cells.take_step<relu_force, friction_on_background>(dt, wall); Links protrusions{n_max, protrusion_strength}; protrusions.set_d_n(n_0); auto interc_wall = [&](const int n, const float3* __restrict__ d_X, float3* d_dX){ return link_wall_forces<float3, linear_force, xy_wall_relu_force>(protrusions, n, d_X, d_dX, 0); }; Grid grid{n_max}; hiprandState_t* d_state; hipMalloc(&d_state, n_max * sizeof(hiprandState_t)); auto seed = time(NULL); hipLaunchKernelGGL(( setup_rand_states), dim3((n_max + 128 - 1) / 128), dim3(128), 0, 0, n_max, seed, d_state); // Simulate growth Vtk_output output{"growth_w_wall","output/", true}; for (auto time_step = 0; time_step <= n_time_steps; time_step++) { protrusions.set_d_n(cells.get_d_n() * prots_per_cell); grid.build(cells, r_protrusion); hipLaunchKernelGGL(( update_protrusions_wall), dim3((protrusions.get_d_n() + 32 - 1) / 32), dim3(32), 0, 0, cells.get_d_n(), grid.d_grid, cells.d_X, protrusions.d_state, protrusions.d_link, update_prob); cells.take_step<relu_force, wall_friction>(dt, interc_wall); hipLaunchKernelGGL(( proliferate), dim3((cells.get_d_n() + 128 - 1) / 128), dim3(128), 0, 0, prolif_rate, cells.get_d_n(), d_state, cells.d_X, cells.d_old_v, cells.d_n); if(time_step % skip_step == 0){ hipDeviceSynchronize(); cells.copy_to_host(); protrusions.copy_to_host(); type.copy_to_host(); output.write_positions(cells); output.write_links(protrusions); output.write_property(type); } } return 0; }
c017e0c547b63b1154bc1c70254a0406b0c35516.cu
// Simulate growing mesenchyme constrained by a planar wall #include <curand_kernel.h> #include <thrust/execution_policy.h> #include <thrust/fill.h> #include <time.h> #include "../include/dtypes.cuh" #include "../include/inits.cuh" #include "../include/polarity.cuh" #include "../include/property.cuh" #include "../include/solvers.cuh" #include "../include/utils.cuh" #include "../include/vtk.cuh" const auto r_max = 1.0f; const auto mean_dist = 0.75f; const auto r_protrusion = 1.0f; const auto protrusion_strength = 0.15f; const auto prots_per_cell = 1; const auto n_0 = 500; const auto n_max = 100000; const auto dt = 0.1; auto n_time_steps = 500; auto skip_step = n_time_steps/100; auto update_prob = 0.5; auto prolif_rate = 0.005; enum Cell_types { wall_node, mesenchyme }; __device__ Cell_types* d_type; template<typename Pt> __device__ float wall_friction(Pt Xi, Pt r, float dist, int i, int j) { if (i == 0 or j == 0) return 0; if (dist < r_max) return 1; return 0; } __device__ float3 relu_force( float3 Xi, float3 r, float dist, int i, int j) { float3 dF{0}; // No one interacts with the wall node through pwints if(i==0 or j==0) return dF; if (i == j) return dF; if (dist > r_max) return dF; auto F = fmaxf(0.7 - dist, 0) - fmaxf(dist - 0.8, 0); dF.x += r.x * F / dist; dF.y += r.y * F / dist; dF.z += r.z * F / dist; return dF; } __global__ void proliferate(float rate, int n_cells, curandState* d_state, float3* d_X, float3* d_old_v, int* d_n_cells) { D_ASSERT(n_cells * rate <= n_max); auto i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= n_cells) return; // Dividing new cells is problematic! if(i == 0) return; // The wall node doesn't proliferate auto rnd = curand_uniform(&d_state[i]); if (rnd > rate) return; auto n = atomicAdd(d_n_cells, 1); auto theta = acosf(2. * curand_uniform(&d_state[i]) - 1); auto phi = curand_uniform(&d_state[i]) * 2 * M_PI; d_X[n].x = d_X[i].x + mean_dist / 4 * sinf(theta) * cosf(phi); d_X[n].y = d_X[i].y + mean_dist / 4 * sinf(theta) * sinf(phi); d_X[n].z = d_X[i].z + mean_dist / 4 * cosf(theta); d_type[n] = d_type[i]; d_old_v[n] = d_old_v[i]; } __global__ void update_protrusions_wall(const int n_cells, const Grid* __restrict__ d_grid, const float3* __restrict d_X, curandState* d_state, Link* d_link, float update_prob) { auto i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= (n_cells) * prots_per_cell) return; auto j = static_cast<int>((i + 0.5) / prots_per_cell); auto rand_nb_cube = d_grid->d_cube_id[j] + d_nhood[min(static_cast<int>(curand_uniform(&d_state[i]) * 27), 26)]; auto cells_in_cube = d_grid->d_cube_end[rand_nb_cube] - d_grid->d_cube_start[rand_nb_cube] + 1; if (cells_in_cube < 1) return; auto a = d_grid->d_point_id[j]; if (a==0) return; auto b = d_grid->d_point_id[d_grid->d_cube_start[rand_nb_cube] + min(static_cast<int>( curand_uniform(&d_state[i]) * cells_in_cube), cells_in_cube - 1)]; D_ASSERT(a >= 0); D_ASSERT(a < n_cells); D_ASSERT(b >= 0); D_ASSERT(b < n_cells); if (a == b) return; if (b == 0) return; auto new_r = d_X[a] - d_X[b]; auto new_dist = norm3df(new_r.x, new_r.y, new_r.z); if (new_dist > r_protrusion) return; auto link = &d_link[a * prots_per_cell + i % prots_per_cell]; auto not_initialized = link->a == link->b; auto old_r = d_X[link->a] - d_X[link->b]; auto old_dist = norm3df(old_r.x, old_r.y, old_r.z); auto noise = curand_uniform(&d_state[i]); auto updated = noise < update_prob; if (not_initialized or updated){ link->a = a; link->b = b; } } int main(int argc, const char* argv[]) { // Solution<float3, Grid_solver> cells{n_max}; Solution<float3, Gabriel_solver> cells{n_max}; *cells.h_n = n_0; cells.h_X[0].x = 0.0; cells.h_X[0].y = 0.0; cells.h_X[0].z = -mean_dist; random_sphere(0.5, cells, 1); for (auto i = 1; i < n_0; i++) { if(cells.h_X[i].z < 0.0f) cells.h_X[i].z *= -1.0f; } cells.copy_to_device(); Property<Cell_types> type{n_max}; cudaMemcpyToSymbol(d_type, &type.d_prop, sizeof(d_type)); auto wall = [&](const int n, const float3* __restrict__ d_X, float3* d_dX){ return wall_forces<float3, xy_wall_relu_force>(n, d_X, d_dX, 0); }; type.h_prop[0] = wall_node; for (auto i = 1; i < *cells.h_n; i++) type.h_prop[i] = mesenchyme; type.copy_to_device(); cells.copy_to_device(); // Initial relaxation for (auto time_step = 0; time_step <= 100; time_step++) cells.take_step<relu_force, friction_on_background>(dt, wall); Links protrusions{n_max, protrusion_strength}; protrusions.set_d_n(n_0); auto interc_wall = [&](const int n, const float3* __restrict__ d_X, float3* d_dX){ return link_wall_forces<float3, linear_force, xy_wall_relu_force>(protrusions, n, d_X, d_dX, 0); }; Grid grid{n_max}; curandState* d_state; cudaMalloc(&d_state, n_max * sizeof(curandState)); auto seed = time(NULL); setup_rand_states<<<(n_max + 128 - 1) / 128, 128>>>(n_max, seed, d_state); // Simulate growth Vtk_output output{"growth_w_wall","output/", true}; for (auto time_step = 0; time_step <= n_time_steps; time_step++) { protrusions.set_d_n(cells.get_d_n() * prots_per_cell); grid.build(cells, r_protrusion); update_protrusions_wall<<<(protrusions.get_d_n() + 32 - 1) / 32, 32>>>( cells.get_d_n(), grid.d_grid, cells.d_X, protrusions.d_state, protrusions.d_link, update_prob); cells.take_step<relu_force, wall_friction>(dt, interc_wall); proliferate<<<(cells.get_d_n() + 128 - 1) / 128, 128>>>( prolif_rate, cells.get_d_n(), d_state, cells.d_X, cells.d_old_v, cells.d_n); if(time_step % skip_step == 0){ cudaDeviceSynchronize(); cells.copy_to_host(); protrusions.copy_to_host(); type.copy_to_host(); output.write_positions(cells); output.write_links(protrusions); output.write_property(type); } } return 0; }
31d5d9492a2b93e23fbae626ac33828149990e42.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "euclideandistancematrixgpu.h" #include <cmath> #include <float.h> #include <set> #include <algorithm> #define EMPTY_DIRECTION -1 #define SIDE_LEFT 0 #define SIDE_END 1 #define SIDE_RIGHT 2 #define LEFT_SON 0 #define RIGHT_SON 1 #define STUB_INIT_DIST 100000.0 #define STUB_INIT_ID -1 __global__ void normalizeDataStep1(float* dataTable_device, size_t dataTable_pitch, int numberOfEntities, int numberOfDimension){ __shared__ float mean[256]; int tid = threadIdx.x + blockIdx.x * blockDim.x; mean[threadIdx.x] = 0.0f; __syncthreads(); if(tid < numberOfDimension){ int number = 0; while(number < numberOfEntities){ float* pElement = (float*)((char*)dataTable_device + tid * dataTable_pitch) + number; mean[threadIdx.x] += *pElement; ++number; } mean[threadIdx.x] /= numberOfEntities; number = 0; while(number < numberOfEntities){ float* pElement = (float*)((char*)dataTable_device + tid * dataTable_pitch) + number; *pElement -= mean[threadIdx.x]; ++number; } } } __global__ void normalizeDataStep2(float* dataTable_device, size_t dataTable_pitch, int numberOfEntities, int numberOfDimension){ __shared__ float mX[256]; __shared__ float mXGlobal; int tid = threadIdx.x; int bias = blockDim.x; mX[tid] = 0.0f; if(tid == 0){ mXGlobal = 0.0; } __syncthreads(); if(tid < numberOfDimension){ int number = 0; while(number < numberOfEntities){ float* pElement = (float*)((char*)dataTable_device + tid * dataTable_pitch) + number; mX[threadIdx.x] = max(abs(*pElement), mX[threadIdx.x]); ++number; } tid += bias; } __syncthreads(); if(threadIdx.x == 0){ for(int i=0 ; i<256 ; ++i){ mXGlobal = max(mXGlobal, mX[i]); } } tid = threadIdx.x; __syncthreads(); if(tid < numberOfDimension){ int number = 0; while(number < numberOfEntities){ float* pElement = (float*)((char*)dataTable_device + tid * dataTable_pitch) + number; *pElement /= mXGlobal; ++number; } tid += bias; } } __global__ void findGraphTraversalStartPoint(int* graphTraversalStartPoint_device, int numberOfEntities, int numberOfNeighbors, TreeNode** trees_device, TreeNodeLeaf** treesLeafs_device, int* trees_size_device, int* treesLeafs_size_device, int numberOfTrees){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ for(int tree=0 ; tree<numberOfTrees ; ++tree){ TreeNodeLeaf treeNodeLeaf = treesLeafs_device[tree][tid]; int parent = treeNodeLeaf.parent; int entityNumber = treeNodeLeaf.entityNumber; int numberOfElementsInNode = trees_device[tree][parent].numberOfEntities; while(numberOfElementsInNode < (numberOfNeighbors+1)){ parent = trees_device[tree][parent].parent; numberOfElementsInNode = trees_device[tree][parent].numberOfEntities; } graphTraversalStartPoint_device[tree*numberOfEntities+entityNumber] = parent; } } } __global__ void findInitialStateOfAproximatedEuclideanKNN(int* graphTraversalStartPoint_device, int numberOfEntities, int numberOfNeighbors, TreeNode** trees_device, TreeNodeLeaf** treesLeafs_device, int* trees_size_device, int* treesLeafs_size_device, int numberOfTrees, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, int* neighboursId_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, int dimensionOfIndexesAndDistances, short* marker_device, int minSize){ __shared__ float entity[256]; __shared__ int hist[256]; __shared__ int elementsToCheckInTreesLeafs_device; __shared__ int startPointForTreesLeafs_device; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ int idOfBatch; __shared__ int elementsPerBatch; __shared__ int startOfTheBatch; __shared__ int endOfTheBatch; __shared__ int idxCheckingIdGlobal; __shared__ int idxCheckingIdGlobal2; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ double minimalSizeOfBucket; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; if(threadIdx.x == 0){ idOfBatch = blockIdx.x; elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); startOfTheBatch = elementsPerBatch*idOfBatch; endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; idxCheckingIdGlobal = idOfBatch*numberOfEntities; idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; minimalSizeOfBucket = 0.0078125; } __syncthreads(); for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy liczby do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=threadIdx.x ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int treeNo=0 ; treeNo<numberOfTrees ; ++treeNo){ //Dla danego drzewa wyszukujemy pukty, dla ktorych bedziemy szukac poczatku danych w treesLeafs_device if(threadIdx.x == 0){ int startPoint = graphTraversalStartPoint_device[treeNo*numberOfEntities+i]; TreeNode treeNode = trees_device[treeNo][startPoint]; elementsToCheckInTreesLeafs_device = treeNode.numberOfEntities; while(treeNode.rightChild != EMPTY_DIRECTION){ treeNode = trees_device[treeNo][treeNode.leftChild]; } startPointForTreesLeafs_device = treeNode.leftChild; } __syncthreads(); //Ustawiamy te liczby, dla ktorych mamy liczyc (te ktore wyszukalismy w treesLeafs_device) for(int kk=threadIdx.x ; kk<elementsToCheckInTreesLeafs_device ; kk+=blockDim.x){ TreeNodeLeaf tnl = treesLeafs_device[treeNo][kk+startPointForTreesLeafs_device]; int elem = tnl.entityNumber; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe dla ktorej robimy poszukiwania if(threadIdx.x == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; numbersToCheck = 0; } __syncthreads(); for(int kk=threadIdx.x ; kk<numberOfEntities ; kk+=blockDim.x){ char idxPtr = idxChecking_device[idxCheckingIdGlobal+kk]; if(idxPtr == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=threadIdx.x ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(threadIdx.x == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = threadIdx.x; while(localTid < numberOfEntities){ char idxPtr = idxChecking_device[idxCheckingIdGlobal+localTid]; if(idxPtr == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((threadIdx.x < 256)&&(d+threadIdx.x < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+threadIdx.x) * dataTable_Pitch) + i; entity[threadIdx.x] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distance = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distance += (entity[k-d]-pElementVal)*(entity[k-d]-pElementVal); } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distance; } __syncthreads(); } biggestNumber[threadIdx.x] = 0.0f; smalestNumber[threadIdx.x] = STUB_INIT_DIST; __syncthreads(); for(int lp = numberOfNeighbors+threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); distances_device[idxCheckingIdGlobal2+lp] = dist; } __syncthreads(); for(int lp = threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; biggestNumber[threadIdx.x] = max(biggestNumber[threadIdx.x], ceil(dist)); smalestNumber[threadIdx.x] = min(smalestNumber[threadIdx.x], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(threadIdx.x < 32){ for(int ii=threadIdx.x ; ii<256 ; ii+=32){ biggestNumber[threadIdx.x] = max(biggestNumber[threadIdx.x], biggestNumber[ii]); smalestNumber[threadIdx.x] = min(smalestNumber[threadIdx.x], smalestNumber[ii]); } } if(threadIdx.x == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(threadIdx.x == 0){ bias = smalestNumber[0]; minValue = 0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[threadIdx.x] = 0; if(threadIdx.x == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short marker = marker_device[idxCheckingIdGlobal2+lp]; if(marker == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(threadIdx.x == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[threadIdx.x]+alreadyFoundNumbers)>numberOfNeighbors){ atomicMin(&interestingBucket, threadIdx.x); } //jezeli znalezlismy dokladna liczbe to koncz if((threadIdx.x == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[threadIdx.x] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((threadIdx.x == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, e nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short marker = marker_device[idxCheckingIdGlobal2+lp]; if((marker < interestingBucket)&&(marker >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((marker > interestingBucket)&&(marker < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(marker == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(threadIdx.x == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //przepisz rozwiazanie wynikowe przy wyszukiwaniu najblizszych liczb for(int lp = threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short marker = marker_device[idxCheckingIdGlobal2+lp]; if(marker == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short marker = marker_device[idxCheckingIdGlobal2+lp]; if(marker == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } __global__ void findInitialStateOfAproximatedTaxicabKNN(int* graphTraversalStartPoint_device, int numberOfEntities, int numberOfNeighbors, TreeNode** trees_device, TreeNodeLeaf** treesLeafs_device, int* trees_size_device, int* treesLeafs_size_device, int numberOfTrees, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, int* neighboursId_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, int dimensionOfIndexesAndDistances, short* marker_device, int minSize){ __shared__ int startPointForTreesLeafs_device; __shared__ int elementsToCheckInTreesLeafs_device; __shared__ float entity[256]; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ int hist[256]; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; int tid = threadIdx.x; int idOfBatch = blockIdx.x; int elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); int startOfTheBatch = elementsPerBatch*idOfBatch; int endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; int idxCheckingIdGlobal = idOfBatch*numberOfEntities; int idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; double minimalSizeOfBucket = 0.000000059604644775; for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy bity do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=tid ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int treeNo=0 ; treeNo<numberOfTrees ; ++treeNo){ //Dla danego drzewa wyszukujemy pukty, dla ktorych bedziemy szukac poczatku danych w treesLeafs_device if(tid == 0){ TreeNode treeNode = trees_device[treeNo][graphTraversalStartPoint_device[treeNo*numberOfEntities+i]]; elementsToCheckInTreesLeafs_device = treeNode.numberOfEntities; while(treeNode.rightChild != EMPTY_DIRECTION){ treeNode = trees_device[treeNo][treeNode.leftChild]; } startPointForTreesLeafs_device = treeNode.leftChild; } __syncthreads(); //Ustawiamy te bity, dla ktorych mamy liczyc (te ktore wyszukalismy w treesLeafs_device) for(int kk=tid ; kk<elementsToCheckInTreesLeafs_device ; kk+=blockDim.x){ int elem = treesLeafs_device[treeNo][kk+startPointForTreesLeafs_device].entityNumber; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe dla ktorej robimy poszukiwania if(tid == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; numbersToCheck = 0; } __syncthreads(); for(int kk=tid ; kk<numberOfEntities ; kk+=blockDim.x){ char idxPtr2 = idxChecking_device[idxCheckingIdGlobal+kk]; if(idxPtr2 == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(tid == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = tid; while(localTid < numberOfEntities){ char idxPtr2 = idxChecking_device[idxCheckingIdGlobal+localTid]; if(idxPtr2 == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((tid < 256)&&(d+tid < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+tid) * dataTable_Pitch) + i; entity[tid] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distance = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distance += abs(entity[k-d]-pElementVal); } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distance; } __syncthreads(); } biggestNumber[tid] = 0.0f; smalestNumber[tid] = STUB_INIT_DIST; __syncthreads(); for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); distances_device[idxCheckingIdGlobal2+lp] = dist; } __syncthreads(); for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; biggestNumber[tid] = max(biggestNumber[tid], ceil(dist)); smalestNumber[tid] = min(smalestNumber[tid], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(tid < 32){ for(int ii=tid ; ii<256 ; ii+=32){ biggestNumber[tid] = max(biggestNumber[tid], biggestNumber[ii]); smalestNumber[tid] = min(smalestNumber[tid], smalestNumber[ii]); } } if(tid == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(tid == 0){ bias = smalestNumber[0]; minValue = 0.0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[tid] = 0; if(tid == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(tid == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[tid]+alreadyFoundNumbers) > numberOfNeighbors){ atomicMin(&interestingBucket, tid); } //jezeli znalezlismy dokladna liczbe to koncz if((tid == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[tid] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((tid == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, e nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if((mark < interestingBucket)&&(mark >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((mark > interestingBucket)&&(mark < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(mark == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(tid == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //Wpisujemy k najmniejsze liczby jako nowe rozwiazanie do neighbours for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } __global__ void findInitialStateOfAproximatedCosineKNN(int* graphTraversalStartPoint_device, int numberOfEntities, int numberOfNeighbors, TreeNode** trees_device, TreeNodeLeaf** treesLeafs_device, int* trees_size_device, int* treesLeafs_size_device, int numberOfTrees, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, int* neighboursId_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, float* distances2_device, float* distances3_device, int dimensionOfIndexesAndDistances, short* marker_device, int minSize){ __shared__ int startPointForTreesLeafs_device; __shared__ int elementsToCheckInTreesLeafs_device; __shared__ float entity[256]; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ int hist[256]; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; int tid = threadIdx.x; int idOfBatch = blockIdx.x; int elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); int startOfTheBatch = elementsPerBatch*idOfBatch; int endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; int idxCheckingIdGlobal = idOfBatch*numberOfEntities; int idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; double minimalSizeOfBucket = 0.000000059604644775; for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy bity do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=tid ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int treeNo=0 ; treeNo<numberOfTrees ; ++treeNo){ //Dla danego drzewa wyszukujemy pukty, dla ktorych bedziemy szukac poczatku danych w treesLeafs_device if(tid == 0){ TreeNode treeNode = trees_device[treeNo][graphTraversalStartPoint_device[treeNo*numberOfEntities+i]]; elementsToCheckInTreesLeafs_device = treeNode.numberOfEntities; while(treeNode.rightChild != EMPTY_DIRECTION){ treeNode = trees_device[treeNo][treeNode.leftChild]; } startPointForTreesLeafs_device = treeNode.leftChild; } __syncthreads(); //Ustawiamy te bity, dla ktorych mamy liczyc (te ktore wyszukalismy w treesLeafs_device) for(int kk=tid ; kk<elementsToCheckInTreesLeafs_device ; kk+=blockDim.x){ int elem = treesLeafs_device[treeNo][kk+startPointForTreesLeafs_device].entityNumber; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe dla ktorej robimy poszukiwania if(tid == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; numbersToCheck = 0; } __syncthreads(); for(int kk=tid ; kk<numberOfEntities ; kk+=blockDim.x){ char idxPtr2 = idxChecking_device[idxCheckingIdGlobal+kk]; if(idxPtr2 == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(tid == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = tid; while(localTid < numberOfEntities){ char idxPtr2 = idxChecking_device[idxCheckingIdGlobal+localTid]; if(idxPtr2 == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; distances2_device[idxCheckingIdGlobal2+pos] = 0.0f; distances3_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((tid < 256)&&(d+tid < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+tid) * dataTable_Pitch) + i; entity[tid] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distanceAB = 0.0; float distanceA = 0.0; float distanceB = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distanceAB += entity[k-d]*pElementVal; distanceA += entity[k-d]*entity[k-d]; distanceB += pElementVal*pElementVal; } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distanceAB; distances2_device[idxCheckingIdGlobal2+lp] += distanceA; distances3_device[idxCheckingIdGlobal2+lp] += distanceB; } __syncthreads(); } biggestNumber[tid] = 0.0f; smalestNumber[tid] = STUB_INIT_DIST; __syncthreads(); //wyznaczanie odleglosci do liczb najblizszych for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distanceAB = distances_device[idxCheckingIdGlobal2+lp]; float distanceA = distances2_device[idxCheckingIdGlobal2+lp]; float distanceB = distances3_device[idxCheckingIdGlobal2+lp]; float distance = distanceAB/(sqrt(distanceA)*sqrt(distanceB)); distance = (-1.0*distance)+1.0; distances_device[idxCheckingIdGlobal2+lp] = distance; } __syncthreads(); //for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ //float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); //distances_device[idxCheckingIdGlobal2+lp] = dist; //} //__syncthreads(); for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; //dist = (-1.0*dist)+1.0; //distances_device[idxCheckingIdGlobal2+lp] = dist; biggestNumber[tid] = max(biggestNumber[tid], ceil(dist)); smalestNumber[tid] = min(smalestNumber[tid], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(tid < 32){ for(int ii=tid ; ii<256 ; ii+=32){ biggestNumber[tid] = max(biggestNumber[tid], biggestNumber[ii]); smalestNumber[tid] = min(smalestNumber[tid], smalestNumber[ii]); } } if(tid == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(tid == 0){ bias = smalestNumber[0]; minValue = 0.0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[tid] = 0; if(tid == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(tid == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[tid]+alreadyFoundNumbers) > numberOfNeighbors){ atomicMin(&interestingBucket, tid); } //jezeli znalezlismy dokladna liczbe to koncz if((tid == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[tid] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((tid == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, e nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if((mark < interestingBucket)&&(mark >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((mark > interestingBucket)&&(mark < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(mark == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(tid == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //Wpisujemy k najmniejsze liczby jako nowe rozwiazanie do neighbours for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = -1.0*(distances_device[idxCheckingIdGlobal2+lp]-1.0); neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = -1.0*(distances_device[idxCheckingIdGlobal2+lp]-1.0); neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } __global__ void propagateEuclideanKernel(int numberOfEntities, int numberOfNeighbors, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, float* neighboursDistance2_device, int* neighboursId_device, int* neighboursId2_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, int dimensionOfIndexesAndDistances, short* marker_device){ __shared__ float entity[256]; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ int hist[256]; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; int tid = threadIdx.x; int idOfBatch = blockIdx.x; int elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); int startOfTheBatch = elementsPerBatch*idOfBatch; int endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; int idxCheckingIdGlobal = idOfBatch*numberOfEntities; int idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; double minimalSizeOfBucket = 0.000000059604644775; for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy bity do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=tid ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int neighHi = 0 ; neighHi < numberOfNeighbors ; neighHi += 1){ for(int neighLo = tid ; neighLo < numberOfNeighbors ; neighLo+=blockDim.x){ int interestingNeighbour = neighboursId_device[i*numberOfNeighbors+neighHi]; int elem = neighboursId_device[interestingNeighbour*numberOfNeighbors+neighLo]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe sama ze soba if(tid == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; } __syncthreads(); //Zerujemy bity dla wlasnych sasiadow i przepisujemy aktualnie najblizszych for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ int elem = neighboursId_device[i*numberOfNeighbors+kk]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x00; neighboursId2_device[i*numberOfNeighbors+kk] = elem; neighboursDistance2_device[i*numberOfNeighbors+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; } __syncthreads(); //Liczymy najblizszych if(tid == 0){ numbersToCheck = 0; } __syncthreads(); for(int kk=tid ; kk<numberOfEntities ; kk+=blockDim.x){ if(idxChecking_device[idxCheckingIdGlobal+kk] == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId2_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance2_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(tid == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = tid; while(localTid < numberOfEntities){ if(idxChecking_device[idxCheckingIdGlobal+localTid] == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((tid < 256)&&(d+tid < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+tid) * dataTable_Pitch) + i; entity[tid] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distance = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distance += (entity[k-d]-pElementVal)*(entity[k-d]-pElementVal); } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distance; } __syncthreads(); } biggestNumber[tid] = 0.0f; smalestNumber[tid] = STUB_INIT_DIST; __syncthreads(); for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); distances_device[idxCheckingIdGlobal2+lp] = dist; } __syncthreads(); for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; biggestNumber[tid] = max(biggestNumber[tid], ceil(dist)); smalestNumber[tid] = min(smalestNumber[tid], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(tid < 32){ for(int ii=tid ; ii<256 ; ii+=32){ biggestNumber[tid] = max(biggestNumber[tid], biggestNumber[ii]); smalestNumber[tid] = min(smalestNumber[tid], smalestNumber[ii]); } } if(tid == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(tid == 0){ bias = smalestNumber[0]; minValue = 0.0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[tid] = 0; if(tid == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(tid == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[tid]+alreadyFoundNumbers) > numberOfNeighbors){ atomicMin(&interestingBucket, tid); } //jezeli znalezlismy dokladna liczbe to koncz if((tid == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[tid] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((tid == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, e nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if((mark < interestingBucket)&&(mark >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((mark > interestingBucket)&&(mark < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(mark == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(tid == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //Wpisujemy k najmniejsze liczby jako nowe rozwiazanie do neighbours for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } __global__ void propagateTaxicabKernel(int numberOfEntities, int numberOfNeighbors, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, float* neighboursDistance2_device, int* neighboursId_device, int* neighboursId2_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, int dimensionOfIndexesAndDistances, short* marker_device){ __shared__ float entity[256]; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ int hist[256]; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; int tid = threadIdx.x; int idOfBatch = blockIdx.x; int elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); int startOfTheBatch = elementsPerBatch*idOfBatch; int endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; int idxCheckingIdGlobal = idOfBatch*numberOfEntities; int idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; double minimalSizeOfBucket = 0.000000059604644775; for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy bity do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=tid ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int neighHi = 0 ; neighHi < numberOfNeighbors ; neighHi += 1){ for(int neighLo = tid ; neighLo < numberOfNeighbors ; neighLo+=blockDim.x){ int interestingNeighbour = neighboursId_device[i*numberOfNeighbors+neighHi]; int elem = neighboursId_device[interestingNeighbour*numberOfNeighbors+neighLo]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe sama ze soba if(tid == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; } __syncthreads(); //Zerujemy bity dla wlasnych sasiadow i przepisujemy aktualnie najblizszych for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ int elem = neighboursId_device[i*numberOfNeighbors+kk]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x00; neighboursId2_device[i*numberOfNeighbors+kk] = elem; neighboursDistance2_device[i*numberOfNeighbors+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; } __syncthreads(); //Liczymy najblizszych if(tid == 0){ numbersToCheck = 0; } __syncthreads(); for(int kk=tid ; kk<numberOfEntities ; kk+=blockDim.x){ if(idxChecking_device[idxCheckingIdGlobal+kk] == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId2_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance2_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(tid == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = tid; while(localTid < numberOfEntities){ if(idxChecking_device[idxCheckingIdGlobal+localTid] == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((tid < 256)&&(d+tid < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+tid) * dataTable_Pitch) + i; entity[tid] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distance = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distance += abs(entity[k-d]-pElementVal); } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distance; } __syncthreads(); } biggestNumber[tid] = 0.0f; smalestNumber[tid] = STUB_INIT_DIST; __syncthreads(); for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); distances_device[idxCheckingIdGlobal2+lp] = dist; } __syncthreads(); for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; biggestNumber[tid] = max(biggestNumber[tid], ceil(dist)); smalestNumber[tid] = min(smalestNumber[tid], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(tid < 32){ for(int ii=tid ; ii<256 ; ii+=32){ biggestNumber[tid] = max(biggestNumber[tid], biggestNumber[ii]); smalestNumber[tid] = min(smalestNumber[tid], smalestNumber[ii]); } } if(tid == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(tid == 0){ bias = smalestNumber[0]; minValue = 0.0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[tid] = 0; if(tid == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(tid == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[tid]+alreadyFoundNumbers) > numberOfNeighbors){ atomicMin(&interestingBucket, tid); } //jezeli znalezlismy dokladna liczbe to koncz if((tid == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[tid] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((tid == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, e nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if((mark < interestingBucket)&&(mark >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((mark > interestingBucket)&&(mark < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(mark == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(tid == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //Wpisujemy k najmniejsze liczby jako nowe rozwiazanie do neighbours for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } __global__ void propagateCosineKernel(int numberOfEntities, int numberOfNeighbors, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, float* neighboursDistance2_device, int* neighboursId_device, int* neighboursId2_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, float* distances2_device, float* distances3_device, int dimensionOfIndexesAndDistances, short* marker_device){ __shared__ float entity[256]; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ int hist[256]; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; int tid = threadIdx.x; int idOfBatch = blockIdx.x; int elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); int startOfTheBatch = elementsPerBatch*idOfBatch; int endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; int idxCheckingIdGlobal = idOfBatch*numberOfEntities; int idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; double minimalSizeOfBucket = 0.000000059604644775; for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy bity do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=tid ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int neighHi = 0 ; neighHi < numberOfNeighbors ; neighHi += 1){ for(int neighLo = tid ; neighLo < numberOfNeighbors ; neighLo+=blockDim.x){ int interestingNeighbour = neighboursId_device[i*numberOfNeighbors+neighHi]; int elem = neighboursId_device[interestingNeighbour*numberOfNeighbors+neighLo]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe sama ze soba if(tid == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; } __syncthreads(); //Zerujemy bity dla wlasnych sasiadow i przepisujemy aktualnie najblizszych for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ int elem = neighboursId_device[i*numberOfNeighbors+kk]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x00; neighboursId2_device[i*numberOfNeighbors+kk] = elem; neighboursDistance2_device[i*numberOfNeighbors+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; } __syncthreads(); //Liczymy najblizszych if(tid == 0){ numbersToCheck = 0; } __syncthreads(); for(int kk=tid ; kk<numberOfEntities ; kk+=blockDim.x){ if(idxChecking_device[idxCheckingIdGlobal+kk] == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId2_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance2_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(tid == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = tid; while(localTid < numberOfEntities){ if(idxChecking_device[idxCheckingIdGlobal+localTid] == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((tid < 256)&&(d+tid < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+tid) * dataTable_Pitch) + i; entity[tid] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distanceAB = 0.0; float distanceA = 0.0; float distanceB = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distanceAB += entity[k-d]*pElementVal; distanceA += entity[k-d]*entity[k-d]; distanceB += pElementVal*pElementVal; } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distanceAB; distances2_device[idxCheckingIdGlobal2+lp] += distanceA; distances3_device[idxCheckingIdGlobal2+lp] += distanceB; } __syncthreads(); } biggestNumber[tid] = 0.0f; smalestNumber[tid] = STUB_INIT_DIST; __syncthreads(); //wyznaczanie odleglosci do liczb najblizszych for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distanceAB = distances_device[idxCheckingIdGlobal2+lp]; float distanceA = distances2_device[idxCheckingIdGlobal2+lp]; float distanceB = distances3_device[idxCheckingIdGlobal2+lp]; float distance = distanceAB/(sqrt(distanceA)*sqrt(distanceB)); distance = (-1.0*distance)+1.0; distances_device[idxCheckingIdGlobal2+lp] = distance; } __syncthreads(); //for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ // float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); // distances_device[idxCheckingIdGlobal2+lp] = dist; //} //__syncthreads(); for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; //dist = (-1.0*dist)+1.0; //distances_device[idxCheckingIdGlobal2+lp] = dist; biggestNumber[tid] = max(biggestNumber[tid], ceil(dist)); smalestNumber[tid] = min(smalestNumber[tid], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(tid < 32){ for(int ii=tid ; ii<256 ; ii+=32){ biggestNumber[tid] = max(biggestNumber[tid], biggestNumber[ii]); smalestNumber[tid] = min(smalestNumber[tid], smalestNumber[ii]); } } if(tid == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(tid == 0){ bias = smalestNumber[0]; minValue = 0.0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[tid] = 0; if(tid == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(tid == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[tid]+alreadyFoundNumbers) > numberOfNeighbors){ atomicMin(&interestingBucket, tid); } //jezeli znalezlismy dokladna liczbe to koncz if((tid == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[tid] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((tid == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, e nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if((mark < interestingBucket)&&(mark >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((mark > interestingBucket)&&(mark < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(mark == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(tid == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //Wpisujemy k najmniejsze liczby jako nowe rozwiazanie do neighbours for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = -1.0*(distances_device[idxCheckingIdGlobal2+lp]-1.0); neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = -1.0*(distances_device[idxCheckingIdGlobal2+lp]-1.0); neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } void EuclideanDistanceMatrixGPU::propagate(){ for(int prop=0 ; prop<numberOfPropagations ; ++prop){ cuCall(hipSetDevice(device)); std::cout<<"Urzadzenie "<<device<<" uruchamia zadanie propagacji dla punktow: "<<partition.start<<" - "<<partition.end-1<<" dla iteracji: "<<prop+1<<"\n"; dim3 grid2(this->numberOfMultiprocessors*this->numberOfBlocksPerMultiprocessors, 1); dim3 block2(256, 1); if(typeOfDistance == DISTANCE_EUCLIDEAN){ hipLaunchKernelGGL(( propagateEuclideanKernel), dim3(grid2), dim3(block2), 0, executionStreams, numberOfEntities, numberOfNeighbors, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursDistance2_device, neighboursId_device, neighboursId2_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, dimensionOfIndexesAndDistances, marker_device); }else if(typeOfDistance == DISTANCE_TAXICAB){ hipLaunchKernelGGL(( propagateTaxicabKernel), dim3(grid2), dim3(block2), 0, executionStreams, numberOfEntities, numberOfNeighbors, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursDistance2_device, neighboursId_device, neighboursId2_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, dimensionOfIndexesAndDistances, marker_device); }else if(typeOfDistance == DISTANCE_COSINE){ hipLaunchKernelGGL(( propagateCosineKernel), dim3(grid2), dim3(block2), 0, executionStreams, numberOfEntities, numberOfNeighbors, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursDistance2_device, neighboursId_device, neighboursId2_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, distances2_device, distances3_device, dimensionOfIndexesAndDistances, marker_device); }else{ std::cout<<"We do not have such type of distance\n"; } cuCall(hipStreamSynchronize(executionStreams)); hipError_t err = hipGetLastError(); if (hipSuccess != err){ std::cout<<"propagateKernel: "<<hipGetErrorString(err)<<"\n"; } float* tmp1 = neighboursDistance_device; neighboursDistance_device = neighboursDistance2_device; neighboursDistance2_device = tmp1; int* tmp2 = neighboursId_device; neighboursId_device = neighboursId2_device; neighboursId2_device = tmp2; } } bool compareByLengthMin(const DataPoint &a, const DataPoint &b){ return a.distance < b.distance; } bool compareByLengthMinCosine(const DataPoint &a, const DataPoint &b){ return a.distance > b.distance; } std::string trim(std::string const& str){ if(str.empty()) return str; std::size_t firstScan = str.find_first_not_of(' '); std::size_t first = firstScan == std::string::npos ? str.length() : firstScan; std::size_t last = str.find_last_not_of(' '); return str.substr(first, last-first+1); } bool EuclideanDistanceMatrixGPU::initilizeGPUStructuresForTrees(){ bool error = false; error |= cuCall(hipMalloc((void**)&trees_device, numberOfTrees*sizeof(TreeNode*))); error |= cuCall(hipHostMalloc((void**)&trees_device_pointer_for_cpu, numberOfTrees*sizeof(TreeNode*), hipHostMallocPortable)); for(int i=0 ; i<numberOfTrees ; ++i){ int elems = trees_host[i].size(); error |= cuCall(hipMalloc((void**)&trees_device_pointer_for_cpu[i], elems*sizeof(TreeNode))); } error |= cuCall(hipMemcpy((void*)trees_device, (void*)trees_device_pointer_for_cpu, numberOfTrees*sizeof(TreeNode*), hipMemcpyHostToDevice)); error |= cuCall(hipMalloc((void**)&treesLeafs_device, numberOfTrees*sizeof(TreeNodeLeaf*))); error |= cuCall(hipHostMalloc((void**)&treesLeafs_device_pointer_for_cpu, numberOfTrees*sizeof(TreeNodeLeaf*), hipHostMallocPortable)); for(int i=0 ; i<numberOfTrees ; ++i){ int elems = treesLeafs_host[i].size(); error |= cuCall(hipMalloc((void**)&treesLeafs_device_pointer_for_cpu[i], elems*sizeof(TreeNodeLeaf))); } error |= cuCall(hipMemcpy((void*)treesLeafs_device, (void*)treesLeafs_device_pointer_for_cpu, numberOfTrees*sizeof(TreeNodeLeaf*), hipMemcpyHostToDevice)); error |= cuCall(hipMalloc((void**)&trees_size_device, numberOfTrees*sizeof(int))); error |= cuCall(hipHostMalloc((void**)&trees_size_host, numberOfTrees*sizeof(int), hipHostMallocPortable)); error |= cuCall(hipMalloc((void**)&treesLeafs_size_device, numberOfTrees*sizeof(int))); error |= cuCall(hipHostMalloc((void**)&treesLeafs_size_host, numberOfTrees*sizeof(int), hipHostMallocPortable)); //Przekopiowanie vektorow for(int i=0 ; i<numberOfTrees ; ++i){ error |= cuCall(hipMemcpyAsync((void*)trees_device_pointer_for_cpu[i], (void*)trees_host[i].data(), trees_host[i].size()*sizeof(TreeNode), hipMemcpyHostToDevice, executionStreams)); error |= cuCall(hipMemcpyAsync((void*)treesLeafs_device_pointer_for_cpu[i], (void*)treesLeafs_host[i].data(), treesLeafs_host[i].size()*sizeof(TreeNodeLeaf), hipMemcpyHostToDevice, executionStreams)); trees_size_host[i] = trees_host[i].size(); treesLeafs_size_host[i] = treesLeafs_host[i].size(); } error |= cuCall(hipMemcpyAsync((void*)trees_size_device, (void*)trees_size_host, numberOfTrees*sizeof(int), hipMemcpyHostToDevice, executionStreams)); error |= cuCall(hipMemcpyAsync((void*)treesLeafs_size_device, (void*)treesLeafs_size_host, numberOfTrees*sizeof(int), hipMemcpyHostToDevice, executionStreams)); error |= cuCall(hipStreamSynchronize(executionStreams)); return error; } bool EuclideanDistanceMatrixGPU::deinitializeGPUStructuresForTrees(){ bool error = false; error |= cuCall(hipSetDevice(device)); error |= cuCall(hipDeviceSynchronize()); for(int i=0 ; i<numberOfTrees ; ++i){ error |= cuCall(hipFree((void*)trees_device_pointer_for_cpu[i])); } error |= cuCall(hipFree((void*)trees_device)); error |= cuCall(hipHostFree((void*)trees_device_pointer_for_cpu)); for(int i=0 ; i<numberOfTrees ; ++i){ error |= cuCall(hipFree((void*)treesLeafs_device_pointer_for_cpu[i])); } error |= cuCall(hipFree((void*)treesLeafs_device)); error |= cuCall(hipHostFree((void*)treesLeafs_device_pointer_for_cpu)); error |= cuCall(hipFree((void*)trees_size_device)); error |= cuCall(hipHostFree((void*)trees_size_host)); error |= cuCall(hipFree((void*)treesLeafs_size_device)); error |= cuCall(hipHostFree((void*)treesLeafs_size_host)); return error; } void EuclideanDistanceMatrixGPU::findInitialKNN(){ dim3 grid1(ceil(float(numberOfEntities)/256.0), 1); dim3 block1(256, 1); hipLaunchKernelGGL(( findGraphTraversalStartPoint), dim3(grid1), dim3(block1), 0, executionStreams, graphTraversalStartPoint_device, numberOfEntities, numberOfNeighbors, trees_device, treesLeafs_device, trees_size_device, treesLeafs_size_device, numberOfTrees); cuCall(hipStreamSynchronize(executionStreams)); hipError_t err1 = hipGetLastError(); if (hipSuccess != err1){ std::cout<<"findGraphTraversalStartPoint: "<<hipGetErrorString(err1)<<"\n"; } std::cout<<"Urzadzenie "<<device<<" uruchamia zadanie inicjalizacji kNN dla punktow: "<<partition.start<<" - "<<partition.end-1<<"\n"; dim3 grid2(this->numberOfMultiprocessors*this->numberOfBlocksPerMultiprocessors, 1); dim3 block2(256, 1); if(typeOfDistance == DISTANCE_EUCLIDEAN){ hipLaunchKernelGGL(( findInitialStateOfAproximatedEuclideanKNN), dim3(grid2), dim3(block2), 0, executionStreams, graphTraversalStartPoint_device, numberOfEntities, numberOfNeighbors, trees_device, treesLeafs_device, trees_size_device, treesLeafs_size_device, numberOfTrees, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursId_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, dimensionOfIndexesAndDistances, marker_device, minSize); }else if(typeOfDistance == DISTANCE_TAXICAB){ hipLaunchKernelGGL(( findInitialStateOfAproximatedTaxicabKNN), dim3(grid2), dim3(block2), 0, executionStreams, graphTraversalStartPoint_device, numberOfEntities, numberOfNeighbors, trees_device, treesLeafs_device, trees_size_device, treesLeafs_size_device, numberOfTrees, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursId_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, dimensionOfIndexesAndDistances, marker_device, minSize); }else if(typeOfDistance == DISTANCE_COSINE){ hipLaunchKernelGGL(( findInitialStateOfAproximatedCosineKNN), dim3(grid2), dim3(block2), 0, executionStreams, graphTraversalStartPoint_device, numberOfEntities, numberOfNeighbors, trees_device, treesLeafs_device, trees_size_device, treesLeafs_size_device, numberOfTrees, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursId_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, distances2_device, distances3_device, dimensionOfIndexesAndDistances, marker_device, minSize); }else{ std::cout<<"We do not have such type of distance\n"; } cuCall(hipStreamSynchronize(executionStreams)); hipError_t err2 = hipGetLastError(); if (hipSuccess != err2){ std::cout<<"findInitialStateOfAproximatedKNN: "<<hipGetErrorString(err2)<<"\n"; } } __global__ void stubInitializationKernel(int numberOfEntities, int numberOfNeighbors, float* neighboursDistance_device, int* neighboursId_device){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < (numberOfEntities*numberOfNeighbors)){ neighboursDistance_device[tid] = STUB_INIT_DIST; neighboursId_device[tid] = STUB_INIT_ID; } } void EuclideanDistanceMatrixGPU::stubInitialization(){ dim3 grid1(ceil(float(numberOfEntities*numberOfNeighbors)/256.0), 1); dim3 block1(256, 1); hipLaunchKernelGGL(( stubInitializationKernel), dim3(grid1), dim3(block1), 0, executionStreams, numberOfEntities, numberOfNeighbors, neighboursDistance_device, neighboursId_device); cuCall(hipStreamSynchronize(executionStreams)); hipError_t err2 = hipGetLastError(); if (hipSuccess != err2){ std::cout<<"stubInitializationKernel: "<<hipGetErrorString(err2)<<"\n"; } } __global__ void makePartitionOfLeaf0(int* treeNodeSizeDevice){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid == 0){ *treeNodeSizeDevice = 1; } } __global__ void makePartitionOfLeaf1(float* dataTable_device, size_t dataTable_pitch, int numberOfDimension, int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel, int* points1, int* points2, char* side_device, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int checkedPoint = elemsPerLeafInCurrentLevel[tid]; int point1 = elemsPerLeafInCurrentLevel[biasOfElemsPerLeafInCurrentLevel[tid] + points1[tid]]; int point2 = elemsPerLeafInCurrentLevel[biasOfElemsPerLeafInCurrentLevel[tid] + points2[tid]]; int size = numberOfElemsPerLeafInCurrentLevel[tid]; if(size <= minSize){ return; } float sideSign = 0.0f; for(int dim = 0 ; dim < numberOfDimension ; ++dim){ float* pElementCheckedPoint = (float*)((char*)dataTable_device + dim * dataTable_pitch) + checkedPoint; float* pElementPoint1 = (float*)((char*)dataTable_device + dim * dataTable_pitch) + point1; float* pElementPoint2 = (float*)((char*)dataTable_device + dim * dataTable_pitch) + point2; sideSign += (*pElementCheckedPoint)*((*pElementPoint2)-(*pElementPoint1)); } if(sideSign < 0){ side_device[tid] = SIDE_LEFT; }else{ side_device[tid] = SIDE_RIGHT; } } } __global__ void makePartitionOfLeaf2(float* dataTable_device, size_t dataTable_pitch, int numberOfDimension, int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel, int* points1, int* points2, char* side_device, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if((tid < numberOfEntities) && (tid == biasOfElemsPerLeafInCurrentLevel[tid])){ int point1 = biasOfElemsPerLeafInCurrentLevel[tid] + points1[tid]; int point2 = biasOfElemsPerLeafInCurrentLevel[tid] + points2[tid]; int size = numberOfElemsPerLeafInCurrentLevel[tid]; if(size <= minSize){ return; } side_device[point1] = SIDE_LEFT; side_device[point2] = SIDE_RIGHT; } } __global__ void makePartitionOfLeaf3(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int biasOfElement = biasOfElemsPerLeafInCurrentLevel[tid]; if(tid == biasOfElement){ int parent = idOfLeafParent[tid]; treeNodeDevice[parent].leftChild = 0; treeNodeDevice[parent].rightChild = 0; } } } __global__ void makePartitionOfLeaf4(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int element = elemsPerLeafInCurrentLevel[tid]; int biasOfElement = biasOfElemsPerLeafInCurrentLevel[tid]; int parent = idOfLeafParent[tid]; char side = side_device[tid]; unsigned int* numberOfElemsLeft = (unsigned int*)&treeNodeDevice[parent].leftChild; if(side == SIDE_LEFT){ int newPos = (int)atomicInc(numberOfElemsLeft, INT_MAX); elemsPerLeafInCurrentLevel2[biasOfElement + newPos] = element; side2_device[biasOfElement + newPos] = SIDE_LEFT; biasOfElemsPerLeafInCurrentLevel2[biasOfElement + newPos] = biasOfElement; idOfLeafParent2[biasOfElement + newPos] = parent; } } } __global__ void makePartitionOfLeaf5(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int element = elemsPerLeafInCurrentLevel[tid]; int biasOfElement = biasOfElemsPerLeafInCurrentLevel[tid]; int parent = idOfLeafParent[tid]; char side = side_device[tid]; int numberElemsLeft = treeNodeDevice[parent].leftChild; unsigned int* numberOfElemsRight = (unsigned int*)&treeNodeDevice[parent].rightChild; if(side == SIDE_RIGHT){ int newPos = (int)atomicInc(numberOfElemsRight, INT_MAX); elemsPerLeafInCurrentLevel2[biasOfElement + numberElemsLeft + newPos] = element; side2_device[biasOfElement + numberElemsLeft + newPos] = SIDE_RIGHT; biasOfElemsPerLeafInCurrentLevel2[biasOfElement + numberElemsLeft + newPos] = biasOfElement; idOfLeafParent2[biasOfElement + numberElemsLeft + newPos] = parent; } } } __global__ void makePartitionOfLeaf6(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int biasOfElement = biasOfElemsPerLeafInCurrentLevel2[tid]; int parent = idOfLeafParent2[tid]; char side = side2_device[tid]; int numberElemsLeft = treeNodeDevice[parent].leftChild; int numberElemsRight = treeNodeDevice[parent].rightChild; if(side == SIDE_LEFT){ numberOfElemsPerLeafInCurrentLevel2[tid] = numberElemsLeft; biasOfElemsPerLeafInCurrentLevel2[tid] = biasOfElement; } if(side == SIDE_RIGHT){ numberOfElemsPerLeafInCurrentLevel2[tid] = numberElemsRight; biasOfElemsPerLeafInCurrentLevel2[tid] = biasOfElement + numberElemsLeft; } } } __global__ void makePartitionOfLeaf7(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize, int rand1, int rand2, int* thereWasDividing){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int biasOfElementLeft = biasOfElemsPerLeafInCurrentLevel[tid]; if(tid == biasOfElementLeft){ int parent = idOfLeafParent2[tid]; int numberElemsLeft = treeNodeDevice[parent].leftChild; int numberElemsRight = treeNodeDevice[parent].rightChild; if((numberElemsLeft > 0 ) && (numberElemsRight > 0)){ //bylo dzielenie atomicCAS(thereWasDividing, 0, 1); TreeNode treeNodeLeft = {parent, EMPTY_DIRECTION, EMPTY_DIRECTION, numberElemsLeft}; TreeNode treeNodeRight = {parent, EMPTY_DIRECTION, EMPTY_DIRECTION, numberElemsRight}; int idLeft = (int)atomicInc((unsigned int*)treeNodeSizeDevice, INT_MAX); int idRight = (int)atomicInc((unsigned int*)treeNodeSizeDevice, INT_MAX); treeNodeDevice[idLeft] = treeNodeLeft; treeNodeDevice[idRight] = treeNodeRight; treeNodeDevice[parent].leftChild = idLeft; treeNodeDevice[parent].rightChild = idRight; idOfLeafParent2[biasOfElementLeft] = idLeft; if(numberElemsLeft > 1){ int pointIdx1 = 0; int pointIdx2 = 0; int count = numberElemsLeft; pointIdx1 = int(((double)rand1/(RAND_MAX))*INT_MAX) % count; pointIdx2 = int(((double)rand2/(RAND_MAX))*INT_MAX) % count; if(pointIdx1 == pointIdx2){ pointIdx2 = (pointIdx1+1)%count; } points12[biasOfElementLeft] = pointIdx1; points22[biasOfElementLeft] = pointIdx2; }else{ points12[biasOfElementLeft] = 0; points22[biasOfElementLeft] = 0; } int biasOfElementRight = biasOfElementLeft + numberElemsLeft; idOfLeafParent2[biasOfElementRight] = idRight; if(numberElemsRight > 1){ int pointIdx1 = 0; int pointIdx2 = 0; int count = numberElemsRight; pointIdx1 = int(((double)rand1/(RAND_MAX))*INT_MAX) % count; pointIdx2 = int(((double)rand2/(RAND_MAX))*INT_MAX) % count; if(pointIdx1 == pointIdx2){ pointIdx2 = (pointIdx1+1)%count; } points12[biasOfElementRight] = pointIdx1; points22[biasOfElementRight] = pointIdx2; }else{ points12[biasOfElementRight] = 0; points22[biasOfElementRight] = 0; } }else{ //nie bylo dzielenia points12[tid] = 0; points22[tid] = 0; } } } } __global__ void makePartitionOfLeaf8(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int biasOfElement = biasOfElemsPerLeafInCurrentLevel2[tid]; int parent = idOfLeafParent2[biasOfElement]; int p1 = points12[biasOfElement]; int p2 = points22[biasOfElement]; idOfLeafParent2[tid] = parent; points12[tid] = p1; points22[tid] = p2; } } __global__ void makePartitionOfLeaf9(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel, int* idOfLeafParent_device, TreeNodeLeaf* treeNodesLeafs, TreeNode* treeNodeDevice){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ TreeNodeLeaf treeNodeLeaf; treeNodeLeaf.parent = idOfLeafParent_device[tid]; treeNodeLeaf.entityNumber = elemsPerLeafInCurrentLevel[tid]; treeNodesLeafs[tid] = treeNodeLeaf; if(tid == biasOfElemsPerLeafInCurrentLevel[tid]){ treeNodeDevice[treeNodeLeaf.parent].leftChild = tid; treeNodeDevice[treeNodeLeaf.parent].rightChild = EMPTY_DIRECTION; } } } void EuclideanDistanceMatrixGPU::buildUpTheTrees(){ trees_host.clear(); treesLeafs_host.clear(); TreeNode* treeNodeDevice; TreeNodeLeaf* treeNodesLeafsDevice; int* treeNodeSizeDevice; int* thereWasDividing; cuCall(hipMalloc((void**)&treeNodeDevice, 2*numberOfEntities*sizeof(TreeNode))); cuCall(hipMalloc((void**)&treeNodesLeafsDevice, numberOfEntities*sizeof(TreeNodeLeaf))); cuCall(hipMalloc((void**)&treeNodeSizeDevice, sizeof(int))); cuCall(hipMalloc((void**)&thereWasDividing, sizeof(int))); int* elemsPerLeafInCurrentLevel_host; int* elemsPerLeafInCurrentLevel_device; int* elemsPerLeafInCurrentLevel2_device; int* numberOfElemsPerLeafInCurrentLevel_host; int* numberOfElemsPerLeafInCurrentLevel_device; int* numberOfElemsPerLeafInCurrentLevel2_device; int* biasOfElemsPerLeafInCurrentLevel_host; int* biasOfElemsPerLeafInCurrentLevel_device; int* biasOfElemsPerLeafInCurrentLevel2_device; int* points1_host; int* points1_device; int* points12_device; int* points2_host; int* points2_device; int* points22_device; int* idOfLeafParent_host; int* idOfLeafParent_device; int* idOfLeafParent2_device; char* side_host; char* side_device; char* side2_device; cuCall(hipHostMalloc((void**)&elemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), hipHostMallocPortable)); cuCall(hipMalloc((void**)&elemsPerLeafInCurrentLevel_device, numberOfEntities*sizeof(int))); cuCall(hipMalloc((void**)&elemsPerLeafInCurrentLevel2_device, numberOfEntities*sizeof(int))); cuCall(hipHostMalloc((void**)&numberOfElemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), hipHostMallocPortable)); cuCall(hipMalloc((void**)&numberOfElemsPerLeafInCurrentLevel_device, numberOfEntities*sizeof(int))); cuCall(hipMalloc((void**)&numberOfElemsPerLeafInCurrentLevel2_device, numberOfEntities*sizeof(int))); cuCall(hipHostMalloc((void**)&biasOfElemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), hipHostMallocPortable)); cuCall(hipMalloc((void**)&biasOfElemsPerLeafInCurrentLevel_device, numberOfEntities*sizeof(int))); cuCall(hipMalloc((void**)&biasOfElemsPerLeafInCurrentLevel2_device, numberOfEntities*sizeof(int))); cuCall(hipHostMalloc((void**)&points1_host, numberOfEntities*sizeof(int), hipHostMallocPortable)); cuCall(hipMalloc((void**)&points1_device, numberOfEntities*sizeof(int))); cuCall(hipMalloc((void**)&points12_device, numberOfEntities*sizeof(int))); cuCall(hipHostMalloc((void**)&points2_host, numberOfEntities*sizeof(int), hipHostMallocPortable)); cuCall(hipMalloc((void**)&points2_device, numberOfEntities*sizeof(int))); cuCall(hipMalloc((void**)&points22_device, numberOfEntities*sizeof(int))); cuCall(hipHostMalloc((void**)&idOfLeafParent_host, numberOfEntities*sizeof(int), hipHostMallocPortable)); cuCall(hipMalloc((void**)&idOfLeafParent_device, numberOfEntities*sizeof(int))); cuCall(hipMalloc((void**)&idOfLeafParent2_device, numberOfEntities*sizeof(int))); cuCall(hipHostMalloc((void**)&side_host, numberOfEntities*sizeof(char), hipHostMallocPortable)); cuCall(hipMalloc((void**)&side_device, numberOfEntities*sizeof(char))); cuCall(hipMalloc((void**)&side2_device, numberOfEntities*sizeof(char))); for(int i=0 ; i<numberOfTrees ; ++i){ std::cout<<"The tree with number: "<<i+1<<" is building\n"; //Inicjalizacja std::vector<TreeNode> treeNodes; trees_host[i] = treeNodes; std::vector<TreeNodeLeaf> treeNodesLeafs; treesLeafs_host[i] = treeNodesLeafs; TreeNode treeNode = {EMPTY_DIRECTION, EMPTY_DIRECTION, EMPTY_DIRECTION, numberOfEntities}; trees_host[i].push_back(treeNode); cuCall(hipMemcpyAsync((void*)treeNodeDevice, (void*)trees_host[i].data(), sizeof(TreeNode), hipMemcpyHostToDevice, executionStreams)); hipLaunchKernelGGL(( makePartitionOfLeaf0), dim3(1), dim3(1), 0, executionStreams, treeNodeSizeDevice); //Inicjalizacja tablic int pointIdx1 = 0; int pointIdx2 = 0; int count = numberOfEntities; pointIdx1 = int(((double)rand()/(RAND_MAX))*INT_MAX) % count; pointIdx2 = int(((double)rand()/(RAND_MAX))*INT_MAX) % count; if(pointIdx1 == pointIdx2){ pointIdx2 = (pointIdx1+1)%count; } for(int k=0 ; k<numberOfEntities ; ++k){ elemsPerLeafInCurrentLevel_host[k] = k; numberOfElemsPerLeafInCurrentLevel_host[k] = numberOfEntities; biasOfElemsPerLeafInCurrentLevel_host[k] = 0; points1_host[k] = pointIdx1; points2_host[k] = pointIdx2; idOfLeafParent_host[k] = 0; side_host[k] = SIDE_LEFT; } //Przeslanie na GPU odpowiednich tablic cuCall(hipMemcpyAsync((void*)elemsPerLeafInCurrentLevel_device, (void*)elemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), hipMemcpyHostToDevice, executionStreams)); cuCall(hipMemcpyAsync((void*)numberOfElemsPerLeafInCurrentLevel_device, (void*)numberOfElemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), hipMemcpyHostToDevice, executionStreams)); cuCall(hipMemcpyAsync((void*)biasOfElemsPerLeafInCurrentLevel_device, (void*)biasOfElemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), hipMemcpyHostToDevice, executionStreams)); cuCall(hipMemcpyAsync((void*)points1_device, (void*)points1_host, numberOfEntities*sizeof(int), hipMemcpyHostToDevice, executionStreams)); cuCall(hipMemcpyAsync((void*)points2_device, (void*)points2_host, numberOfEntities*sizeof(int), hipMemcpyHostToDevice, executionStreams)); cuCall(hipMemcpyAsync((void*)idOfLeafParent_device, (void*)idOfLeafParent_host, numberOfEntities*sizeof(int), hipMemcpyHostToDevice, executionStreams)); cuCall(hipMemcpyAsync((void*)side_device, (void*)side_host, numberOfEntities*sizeof(char), hipMemcpyHostToDevice, executionStreams)); //Dzielenie galezi bool treeeIsGoingToBeEdit = true; while(treeeIsGoingToBeEdit == true){ treeeIsGoingToBeEdit = false; //Przeliczenie dim3 grid(ceil(float(numberOfEntities)/256.0), 1); dim3 block(256, 1); hipLaunchKernelGGL(( makePartitionOfLeaf1), dim3(grid), dim3(block), 0, executionStreams, dataTable_device, dataTable_Pitch, dimensionOfEntity, numberOfEntities, elemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel_device, points1_device, points2_device, side_device, minSize); hipLaunchKernelGGL(( makePartitionOfLeaf2), dim3(grid), dim3(block), 0, executionStreams, dataTable_device, dataTable_Pitch, dimensionOfEntity, numberOfEntities, elemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel_device, points1_device, points2_device, side_device, minSize); //czasowo wszystko czyscimy cuCall(hipMemsetAsync((void*)elemsPerLeafInCurrentLevel2_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(hipMemsetAsync((void*)numberOfElemsPerLeafInCurrentLevel2_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(hipMemsetAsync((void*)biasOfElemsPerLeafInCurrentLevel2_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(hipMemsetAsync((void*)points12_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(hipMemsetAsync((void*)points22_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(hipMemsetAsync((void*)idOfLeafParent2_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(hipMemsetAsync((void*)side2_device, 0, numberOfEntities*sizeof(char), executionStreams)); cuCall(hipMemsetAsync((void*)thereWasDividing, 0, sizeof(int), executionStreams)); hipLaunchKernelGGL(( makePartitionOfLeaf3), dim3(grid), dim3(block), 0, executionStreams, numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize); hipLaunchKernelGGL(( makePartitionOfLeaf4), dim3(grid), dim3(block), 0, executionStreams, numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize); hipLaunchKernelGGL(( makePartitionOfLeaf5), dim3(grid), dim3(block), 0, executionStreams, numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize); hipLaunchKernelGGL(( makePartitionOfLeaf6), dim3(grid), dim3(block), 0, executionStreams, numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize); hipLaunchKernelGGL(( makePartitionOfLeaf7), dim3(grid), dim3(block), 0, executionStreams, numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize, rand(), rand(), thereWasDividing); hipLaunchKernelGGL(( makePartitionOfLeaf8), dim3(grid), dim3(block), 0, executionStreams, numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize); int thereWasDividingHost; cuCall(hipMemcpyAsync((void*)&thereWasDividingHost, (void*)thereWasDividing, sizeof(int), hipMemcpyDeviceToHost, executionStreams)); cuCall(hipStreamSynchronize(executionStreams)); int* tmp1 = elemsPerLeafInCurrentLevel_device; elemsPerLeafInCurrentLevel_device = elemsPerLeafInCurrentLevel2_device; elemsPerLeafInCurrentLevel2_device = tmp1; int* tmp2 = numberOfElemsPerLeafInCurrentLevel_device; numberOfElemsPerLeafInCurrentLevel_device = numberOfElemsPerLeafInCurrentLevel2_device; numberOfElemsPerLeafInCurrentLevel2_device = tmp2; int* tmp3 = biasOfElemsPerLeafInCurrentLevel_device; biasOfElemsPerLeafInCurrentLevel_device = biasOfElemsPerLeafInCurrentLevel2_device; biasOfElemsPerLeafInCurrentLevel2_device = tmp3; int* tmp4 = points1_device; points1_device = points12_device; points12_device = tmp4; int* tmp5 = points2_device; points2_device = points22_device; points22_device = tmp5; int* tmp6 = idOfLeafParent_device; idOfLeafParent_device = idOfLeafParent2_device; idOfLeafParent2_device = tmp6; char* tmp7 = side_device; side_device = side2_device; side2_device = tmp7; if(thereWasDividingHost != 0){ treeeIsGoingToBeEdit = true; } } //Utworzenie koncowych lisci z wlasciwymi elementami dim3 grid(ceil(float(numberOfEntities)/256.0), 1); dim3 block(256, 1); hipLaunchKernelGGL(( makePartitionOfLeaf9), dim3(grid), dim3(block), 0, executionStreams, numberOfEntities, elemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel_device, idOfLeafParent_device, treeNodesLeafsDevice, treeNodeDevice); int treeNodeSizeHost; cuCall(hipMemcpyAsync((void*)&treeNodeSizeHost, (void*)treeNodeSizeDevice, sizeof(int), hipMemcpyDeviceToHost, executionStreams)); cuCall(hipStreamSynchronize(executionStreams)); trees_host[i].resize(treeNodeSizeHost); treesLeafs_host[i].resize(numberOfEntities); cuCall(hipMemcpyAsync((void*)trees_host[i].data(), (void*)treeNodeDevice, treeNodeSizeHost*sizeof(TreeNode), hipMemcpyDeviceToHost, executionStreams)); cuCall(hipMemcpyAsync((void*)treesLeafs_host[i].data(), (void*)treeNodesLeafsDevice, numberOfEntities*sizeof(TreeNodeLeaf), hipMemcpyDeviceToHost, executionStreams)); cuCall(hipStreamSynchronize(executionStreams)); std::cout<<"The tree with number: "<<i+1<<" has been built\n"; } cuCall(hipFree((void*)treeNodeDevice)); cuCall(hipFree((void*)treeNodesLeafsDevice)); cuCall(hipFree((void*)treeNodeSizeDevice)); cuCall(hipFree((void*)thereWasDividing)); cuCall(hipHostFree((void*)elemsPerLeafInCurrentLevel_host)); cuCall(hipFree((void*)elemsPerLeafInCurrentLevel_device)); cuCall(hipFree((void*)elemsPerLeafInCurrentLevel2_device)); cuCall(hipHostFree((void*)numberOfElemsPerLeafInCurrentLevel_host)); cuCall(hipFree((void*)numberOfElemsPerLeafInCurrentLevel_device)); cuCall(hipFree((void*)numberOfElemsPerLeafInCurrentLevel2_device)); cuCall(hipHostFree((void*)biasOfElemsPerLeafInCurrentLevel_host)); cuCall(hipFree((void*)biasOfElemsPerLeafInCurrentLevel_device)); cuCall(hipFree((void*)biasOfElemsPerLeafInCurrentLevel2_device)); cuCall(hipHostFree((void*)points1_host)); cuCall(hipFree((void*)points1_device)); cuCall(hipFree((void*)points12_device)); cuCall(hipHostFree((void*)points2_host)); cuCall(hipFree((void*)points2_device)); cuCall(hipFree((void*)points22_device)); cuCall(hipHostFree((void*)idOfLeafParent_host)); cuCall(hipFree((void*)idOfLeafParent_device)); cuCall(hipFree((void*)idOfLeafParent2_device)); cuCall(hipHostFree((void*)side_host)); cuCall(hipFree((void*)side_device)); cuCall(hipFree((void*)side2_device)); } EuclideanDistanceMatrixGPU::EuclideanDistanceMatrixGPU(){ typeOfDistance = DISTANCE_EUCLIDEAN; this->numberOfBlocksPerMultiprocessors = 10; this->numberOfMultiprocessors = 1; this->debugMode = false; this->minSize = 1; } EuclideanDistanceMatrixGPU::EuclideanDistanceMatrixGPU(bool debugMode){ typeOfDistance = DISTANCE_EUCLIDEAN; this->numberOfBlocksPerMultiprocessors = 10; this->numberOfMultiprocessors = 1; this->debugMode = debugMode; this->minSize = 1; } EuclideanDistanceMatrixGPU::~EuclideanDistanceMatrixGPU(){ } void EuclideanDistanceMatrixGPU::setDataFile(std::string nameOfFile){ this->inputFile = nameOfFile; } bool EuclideanDistanceMatrixGPU::loadData(){ std::ifstream myfile; myfile.open(this->inputFile.c_str()); if (myfile.is_open()){ std::cout<<"The datafile has been opened\n"; }else{ std::cout<<"Error opening the file\n"; return true; } std::string line; std::getline(myfile, line); std::getline(myfile, line); int idOfEntity = 0; char* lineChar; while ((std::getline(myfile, line))&&(idOfEntity<numberOfEntities)){ std::vector<std::string> cuttedString; lineChar = new char[line.length() + 1]; std::strcpy(lineChar, line.c_str()); std::string str; char* pch = strtok(lineChar,","); while (pch != NULL){ str = std::string(pch); str = trim(str); cuttedString.push_back(str); pch = strtok (NULL, ","); } delete [] lineChar; if(klaster){ for(int i=0 ; i<cuttedString.size()-1 ; ++i){ this->dataTable_host[idOfEntity+numberOfEntities*i] = atof(cuttedString[i].c_str()); } this->dataTableId_host[idOfEntity] = atoi(cuttedString[cuttedString.size()-1].c_str()); }else{ for(int i=0 ; i<cuttedString.size() ; ++i){ this->dataTable_host[idOfEntity+numberOfEntities*i] = atof(cuttedString[i].c_str()); } } idOfEntity++; } return false; } bool EuclideanDistanceMatrixGPU::initialize(int numberOfEntities, int dimensionOfEntity, int numberOfNeighbors, int device, int typeOfDistance, bool klaster, int numberOfTrees, int numberOfPropagations, int minSize){ this->typeOfDistance = typeOfDistance; this->klaster = klaster; this->numberOfEntities = numberOfEntities; this->numberOfNeighbors = numberOfNeighbors; this->dimensionOfEntity = dimensionOfEntity; this->numberOfTrees = numberOfTrees; this->numberOfPropagations = numberOfPropagations; this->dimensionOfIndexesAndDistances = min(numberOfNeighbors*numberOfNeighbors+numberOfNeighbors, numberOfEntities); this->minSize = minSize; this->device = device; bool error = false; error |= cuCall(hipSetDevice(device)); error |= cuCall(hipDeviceReset()); hipDeviceProp_t devProp; error |= cuCall(hipGetDeviceProperties(&devProp, device)); this->numberOfMultiprocessors = devProp.multiProcessorCount; error |= cuCall(hipHostMalloc((void**)&dataTable_host, numberOfEntities*dimensionOfEntity*sizeof(float), hipHostMallocPortable)); error |= cuCall(hipHostMalloc((void**)&dataTableId_host, numberOfEntities*sizeof(int), hipHostMallocPortable)); error |= cuCall(hipMallocPitch((void**)&dataTable_device, &dataTable_Pitch, numberOfEntities*sizeof(float), dimensionOfEntity)); error |= cuCall(hipHostMalloc((void**)&neighboursDistance_host, numberOfNeighbors*numberOfEntities*sizeof(float))); error |= cuCall(hipMalloc((void**)&neighboursDistance_device, numberOfNeighbors*numberOfEntities*sizeof(float))); error |= cuCall(hipMalloc((void**)&neighboursDistance2_device, numberOfNeighbors*numberOfEntities*sizeof(float))); error |= cuCall(hipHostMalloc((void**)&neighboursId_host, numberOfNeighbors*numberOfEntities*sizeof(int))); error |= cuCall(hipMalloc((void**)&neighboursId_device, numberOfNeighbors*numberOfEntities*sizeof(int))); error |= cuCall(hipMalloc((void**)&neighboursId2_device, numberOfNeighbors*numberOfEntities*sizeof(int))); error |= cuCall(hipStreamCreate(&executionStreams)); error |= cuCall(hipEventCreate(&startEvents)); error |= cuCall(hipEventCreate(&stopEvents)); error |= cuCall(hipMalloc((void**)&idxChecking_device, numberOfMultiprocessors*numberOfBlocksPerMultiprocessors*numberOfEntities*sizeof(char))); error |= cuCall(hipMalloc((void**)&indexes_device, numberOfBlocksPerMultiprocessors*numberOfMultiprocessors*dimensionOfIndexesAndDistances*sizeof(int))); error |= cuCall(hipMalloc((void**)&distances_device, numberOfBlocksPerMultiprocessors*numberOfMultiprocessors*dimensionOfIndexesAndDistances*sizeof(float))); error |= cuCall(hipMalloc((void**)&distances2_device, numberOfBlocksPerMultiprocessors*numberOfMultiprocessors*dimensionOfIndexesAndDistances*sizeof(float))); error |= cuCall(hipMalloc((void**)&distances3_device, numberOfBlocksPerMultiprocessors*numberOfMultiprocessors*dimensionOfIndexesAndDistances*sizeof(float))); error |= cuCall(hipMalloc((void**)&marker_device, numberOfBlocksPerMultiprocessors*numberOfMultiprocessors*dimensionOfIndexesAndDistances*sizeof(short))); error |= loadData(); //send data to GPU error |= cuCall(hipMemcpy2D((void*)dataTable_device, dataTable_Pitch, (void*)dataTable_host, numberOfEntities*sizeof(float), numberOfEntities*sizeof(float), dimensionOfEntity, hipMemcpyHostToDevice)); error |= cuCall(hipMalloc((void**)&graphTraversalStartPoint_device, numberOfTrees*numberOfEntities*sizeof(int))); Partition p = {0, numberOfEntities}; partition = p; return error; } bool EuclideanDistanceMatrixGPU::deinitialize(){ bool error = false; error |= cuCall(hipSetDevice(device)); error |= cuCall(hipDeviceSynchronize()); error |= cuCall(hipHostFree((void*)dataTable_host)); error |= cuCall(hipHostFree((void*)dataTableId_host)); error |= cuCall(hipFree((void*)dataTable_device)); error |= cuCall(hipHostFree((void*)neighboursDistance_host)); error |= cuCall(hipFree((void*)neighboursDistance_device)); error |= cuCall(hipFree((void*)neighboursDistance2_device)); error |= cuCall(hipHostFree((void*)neighboursId_host)); error |= cuCall(hipFree((void*)neighboursId_device)); error |= cuCall(hipFree((void*)neighboursId2_device)); error |= cuCall(hipStreamDestroy(executionStreams)); error |= cuCall(hipEventDestroy(startEvents)); error |= cuCall(hipEventDestroy(stopEvents)); error |= cuCall(hipFree((void*)idxChecking_device)); error |= cuCall(hipFree((void*)indexes_device)); error |= cuCall(hipFree((void*)distances_device)); error |= cuCall(hipFree((void*)distances2_device)); error |= cuCall(hipFree((void*)distances3_device)); error |= cuCall(hipFree((void*)marker_device)); error |= cuCall(hipFree((void*)graphTraversalStartPoint_device)); error |= cuCall(hipDeviceReset()); return error; } bool EuclideanDistanceMatrixGPU::calculate(){ bool error = false; std::cout<<"The device "<<device<<" is calculating the neighbours for: "<<partition.start<<" - "<<partition.end-1<<"\n"; error |= cuCall(hipSetDevice(device)); error |= cuCall(hipEventRecord(startEvents, executionStreams)); dim3 grid1(ceil(float(dimensionOfEntity)/256.0), 1); dim3 block1(256, 1); hipLaunchKernelGGL(( normalizeDataStep1), dim3(grid1), dim3(block1), 0, executionStreams, dataTable_device, dataTable_Pitch, numberOfEntities, dimensionOfEntity); dim3 grid2(1, 1); dim3 block2(256, 1); hipLaunchKernelGGL(( normalizeDataStep2), dim3(grid2), dim3(block2), 0, executionStreams, dataTable_device, dataTable_Pitch, numberOfEntities, dimensionOfEntity); buildUpTheTrees(); initilizeGPUStructuresForTrees(); stubInitialization(); findInitialKNN(); propagate(); deinitializeGPUStructuresForTrees(); error |= cuCall(hipEventRecord(stopEvents, executionStreams)); error |= cuCall(hipEventSynchronize(stopEvents)); float milliseconds = 0; hipEventElapsedTime(&milliseconds, startEvents, stopEvents); std::cout<<"The device "<<device<<": has done task in: "<<milliseconds<<" ms\n"; return error; } void EuclideanDistanceMatrixGPU::setResultsFile(std::string nameOfFile){ this->outputFile = nameOfFile; } template <typename T> std::string tostr(const T& t) { std::ostringstream os; os<<t; return os.str(); } bool EuclideanDistanceMatrixGPU::saveResultToResultFile(){ bool error = false; error |= cuCall(hipSetDevice(device)); error |= cuCall(hipMemcpyAsync((void*)neighboursDistance_host, (void*)neighboursDistance_device, numberOfNeighbors*numberOfEntities*sizeof(float), hipMemcpyDeviceToHost, executionStreams)); error |= cuCall(hipMemcpyAsync((void*)neighboursId_host, (void*)neighboursId_device, numberOfNeighbors*numberOfEntities*sizeof(int), hipMemcpyDeviceToHost, executionStreams)); error |= cuCall(hipStreamSynchronize(executionStreams)); //Zapisanie rezultatu do pliku std::ofstream ofs; ofs.open(outputFile.c_str(), std::ofstream::trunc | std::ofstream::binary); std::ofstream ofsDebug; if(debugMode){ ofsDebug.open((outputFile+"DEBUG").c_str(), std::ofstream::trunc | std::ofstream::binary); } bool validationSuccess = true; std::ofstream ofsValidation; ofsValidation.open((outputFile+"VALIDATION").c_str(), std::ofstream::trunc | std::ofstream::binary); if(ofs.is_open()){ ofs<<numberOfEntities<<";"<< numberOfNeighbors<<";"<<sizeof(long)<<"\n"; long l = 0x01020304; ofs.write((char*)&l, sizeof(long)); //zapisywanie punktow for(int lp=partition.start ; lp<partition.end ; ++lp){ std::vector<DataPoint> liczbyNear; for(int c=0 ; c<numberOfNeighbors ; ++c){ DataPoint dp = {neighboursId_host[lp*numberOfNeighbors+c], neighboursDistance_host[lp*numberOfNeighbors+c]}; liczbyNear.push_back(dp); } if(typeOfDistance == DISTANCE_COSINE){ std::sort(liczbyNear.begin(), liczbyNear.end(), compareByLengthMinCosine); }else{ std::sort(liczbyNear.begin(), liczbyNear.end(), compareByLengthMin); } for(std::vector<DataPoint>::iterator it = liczbyNear.begin() ; it != liczbyNear.end() ; ++it){ DataPoint f = *it; ofs.write((char*)&f.id, sizeof(long)); if((debugMode)&&(ofsDebug.is_open())){ ofsDebug<<"NEAR: <"<<lp<<", "<<f.id<<">("<<f.distance<<") "; } } /* for(std::vector<DataPoint>::iterator it = liczbyNear.begin() ; it != liczbyNear.end() ; ++it){ long tmp = -1; ofs.write((char*)&tmp, sizeof(long)); if((debugMode)&&(ofsDebug.is_open())){ ofsDebug<<"FAR: <"<<lp<<", "<<tmp<<">("<<FLT_MAX<<") "; } } */ if((debugMode)&&(ofsDebug.is_open())){ ofsDebug<<";\n"; } } ofs.close(); if((debugMode)&&(ofsDebug.is_open())){ ofsDebug.close(); } if(ofsValidation.is_open()){ if(validationSuccess){ ofsValidation<<"Everything is OK."; } ofsValidation.close(); } }else{ std::cout <<"Can not open the file for saving result.\n"; error |= true; } return error; }
31d5d9492a2b93e23fbae626ac33828149990e42.cu
#include "euclideandistancematrixgpu.h" #include <cmath> #include <float.h> #include <set> #include <algorithm> #define EMPTY_DIRECTION -1 #define SIDE_LEFT 0 #define SIDE_END 1 #define SIDE_RIGHT 2 #define LEFT_SON 0 #define RIGHT_SON 1 #define STUB_INIT_DIST 100000.0 #define STUB_INIT_ID -1 __global__ void normalizeDataStep1(float* dataTable_device, size_t dataTable_pitch, int numberOfEntities, int numberOfDimension){ __shared__ float mean[256]; int tid = threadIdx.x + blockIdx.x * blockDim.x; mean[threadIdx.x] = 0.0f; __syncthreads(); if(tid < numberOfDimension){ int number = 0; while(number < numberOfEntities){ float* pElement = (float*)((char*)dataTable_device + tid * dataTable_pitch) + number; mean[threadIdx.x] += *pElement; ++number; } mean[threadIdx.x] /= numberOfEntities; number = 0; while(number < numberOfEntities){ float* pElement = (float*)((char*)dataTable_device + tid * dataTable_pitch) + number; *pElement -= mean[threadIdx.x]; ++number; } } } __global__ void normalizeDataStep2(float* dataTable_device, size_t dataTable_pitch, int numberOfEntities, int numberOfDimension){ __shared__ float mX[256]; __shared__ float mXGlobal; int tid = threadIdx.x; int bias = blockDim.x; mX[tid] = 0.0f; if(tid == 0){ mXGlobal = 0.0; } __syncthreads(); if(tid < numberOfDimension){ int number = 0; while(number < numberOfEntities){ float* pElement = (float*)((char*)dataTable_device + tid * dataTable_pitch) + number; mX[threadIdx.x] = max(abs(*pElement), mX[threadIdx.x]); ++number; } tid += bias; } __syncthreads(); if(threadIdx.x == 0){ for(int i=0 ; i<256 ; ++i){ mXGlobal = max(mXGlobal, mX[i]); } } tid = threadIdx.x; __syncthreads(); if(tid < numberOfDimension){ int number = 0; while(number < numberOfEntities){ float* pElement = (float*)((char*)dataTable_device + tid * dataTable_pitch) + number; *pElement /= mXGlobal; ++number; } tid += bias; } } __global__ void findGraphTraversalStartPoint(int* graphTraversalStartPoint_device, int numberOfEntities, int numberOfNeighbors, TreeNode** trees_device, TreeNodeLeaf** treesLeafs_device, int* trees_size_device, int* treesLeafs_size_device, int numberOfTrees){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ for(int tree=0 ; tree<numberOfTrees ; ++tree){ TreeNodeLeaf treeNodeLeaf = treesLeafs_device[tree][tid]; int parent = treeNodeLeaf.parent; int entityNumber = treeNodeLeaf.entityNumber; int numberOfElementsInNode = trees_device[tree][parent].numberOfEntities; while(numberOfElementsInNode < (numberOfNeighbors+1)){ parent = trees_device[tree][parent].parent; numberOfElementsInNode = trees_device[tree][parent].numberOfEntities; } graphTraversalStartPoint_device[tree*numberOfEntities+entityNumber] = parent; } } } __global__ void findInitialStateOfAproximatedEuclideanKNN(int* graphTraversalStartPoint_device, int numberOfEntities, int numberOfNeighbors, TreeNode** trees_device, TreeNodeLeaf** treesLeafs_device, int* trees_size_device, int* treesLeafs_size_device, int numberOfTrees, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, int* neighboursId_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, int dimensionOfIndexesAndDistances, short* marker_device, int minSize){ __shared__ float entity[256]; __shared__ int hist[256]; __shared__ int elementsToCheckInTreesLeafs_device; __shared__ int startPointForTreesLeafs_device; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ int idOfBatch; __shared__ int elementsPerBatch; __shared__ int startOfTheBatch; __shared__ int endOfTheBatch; __shared__ int idxCheckingIdGlobal; __shared__ int idxCheckingIdGlobal2; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ double minimalSizeOfBucket; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; if(threadIdx.x == 0){ idOfBatch = blockIdx.x; elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); startOfTheBatch = elementsPerBatch*idOfBatch; endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; idxCheckingIdGlobal = idOfBatch*numberOfEntities; idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; minimalSizeOfBucket = 0.0078125; } __syncthreads(); for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy liczby do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=threadIdx.x ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int treeNo=0 ; treeNo<numberOfTrees ; ++treeNo){ //Dla danego drzewa wyszukujemy pukty, dla ktorych bedziemy szukac poczatku danych w treesLeafs_device if(threadIdx.x == 0){ int startPoint = graphTraversalStartPoint_device[treeNo*numberOfEntities+i]; TreeNode treeNode = trees_device[treeNo][startPoint]; elementsToCheckInTreesLeafs_device = treeNode.numberOfEntities; while(treeNode.rightChild != EMPTY_DIRECTION){ treeNode = trees_device[treeNo][treeNode.leftChild]; } startPointForTreesLeafs_device = treeNode.leftChild; } __syncthreads(); //Ustawiamy te liczby, dla ktorych mamy liczyc (te ktore wyszukalismy w treesLeafs_device) for(int kk=threadIdx.x ; kk<elementsToCheckInTreesLeafs_device ; kk+=blockDim.x){ TreeNodeLeaf tnl = treesLeafs_device[treeNo][kk+startPointForTreesLeafs_device]; int elem = tnl.entityNumber; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe dla ktorej robimy poszukiwania if(threadIdx.x == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; numbersToCheck = 0; } __syncthreads(); for(int kk=threadIdx.x ; kk<numberOfEntities ; kk+=blockDim.x){ char idxPtr = idxChecking_device[idxCheckingIdGlobal+kk]; if(idxPtr == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=threadIdx.x ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(threadIdx.x == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = threadIdx.x; while(localTid < numberOfEntities){ char idxPtr = idxChecking_device[idxCheckingIdGlobal+localTid]; if(idxPtr == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((threadIdx.x < 256)&&(d+threadIdx.x < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+threadIdx.x) * dataTable_Pitch) + i; entity[threadIdx.x] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distance = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distance += (entity[k-d]-pElementVal)*(entity[k-d]-pElementVal); } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distance; } __syncthreads(); } biggestNumber[threadIdx.x] = 0.0f; smalestNumber[threadIdx.x] = STUB_INIT_DIST; __syncthreads(); for(int lp = numberOfNeighbors+threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); distances_device[idxCheckingIdGlobal2+lp] = dist; } __syncthreads(); for(int lp = threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; biggestNumber[threadIdx.x] = max(biggestNumber[threadIdx.x], ceil(dist)); smalestNumber[threadIdx.x] = min(smalestNumber[threadIdx.x], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(threadIdx.x < 32){ for(int ii=threadIdx.x ; ii<256 ; ii+=32){ biggestNumber[threadIdx.x] = max(biggestNumber[threadIdx.x], biggestNumber[ii]); smalestNumber[threadIdx.x] = min(smalestNumber[threadIdx.x], smalestNumber[ii]); } } if(threadIdx.x == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(threadIdx.x == 0){ bias = smalestNumber[0]; minValue = 0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[threadIdx.x] = 0; if(threadIdx.x == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short marker = marker_device[idxCheckingIdGlobal2+lp]; if(marker == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(threadIdx.x == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[threadIdx.x]+alreadyFoundNumbers)>numberOfNeighbors){ atomicMin(&interestingBucket, threadIdx.x); } //jezeli znalezlismy dokladna liczbe to koncz if((threadIdx.x == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[threadIdx.x] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((threadIdx.x == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, że nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short marker = marker_device[idxCheckingIdGlobal2+lp]; if((marker < interestingBucket)&&(marker >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((marker > interestingBucket)&&(marker < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(marker == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(threadIdx.x == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //przepisz rozwiazanie wynikowe przy wyszukiwaniu najblizszych liczb for(int lp = threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short marker = marker_device[idxCheckingIdGlobal2+lp]; if(marker == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = threadIdx.x ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short marker = marker_device[idxCheckingIdGlobal2+lp]; if(marker == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } __global__ void findInitialStateOfAproximatedTaxicabKNN(int* graphTraversalStartPoint_device, int numberOfEntities, int numberOfNeighbors, TreeNode** trees_device, TreeNodeLeaf** treesLeafs_device, int* trees_size_device, int* treesLeafs_size_device, int numberOfTrees, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, int* neighboursId_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, int dimensionOfIndexesAndDistances, short* marker_device, int minSize){ __shared__ int startPointForTreesLeafs_device; __shared__ int elementsToCheckInTreesLeafs_device; __shared__ float entity[256]; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ int hist[256]; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; int tid = threadIdx.x; int idOfBatch = blockIdx.x; int elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); int startOfTheBatch = elementsPerBatch*idOfBatch; int endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; int idxCheckingIdGlobal = idOfBatch*numberOfEntities; int idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; double minimalSizeOfBucket = 0.000000059604644775; for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy bity do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=tid ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int treeNo=0 ; treeNo<numberOfTrees ; ++treeNo){ //Dla danego drzewa wyszukujemy pukty, dla ktorych bedziemy szukac poczatku danych w treesLeafs_device if(tid == 0){ TreeNode treeNode = trees_device[treeNo][graphTraversalStartPoint_device[treeNo*numberOfEntities+i]]; elementsToCheckInTreesLeafs_device = treeNode.numberOfEntities; while(treeNode.rightChild != EMPTY_DIRECTION){ treeNode = trees_device[treeNo][treeNode.leftChild]; } startPointForTreesLeafs_device = treeNode.leftChild; } __syncthreads(); //Ustawiamy te bity, dla ktorych mamy liczyc (te ktore wyszukalismy w treesLeafs_device) for(int kk=tid ; kk<elementsToCheckInTreesLeafs_device ; kk+=blockDim.x){ int elem = treesLeafs_device[treeNo][kk+startPointForTreesLeafs_device].entityNumber; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe dla ktorej robimy poszukiwania if(tid == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; numbersToCheck = 0; } __syncthreads(); for(int kk=tid ; kk<numberOfEntities ; kk+=blockDim.x){ char idxPtr2 = idxChecking_device[idxCheckingIdGlobal+kk]; if(idxPtr2 == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(tid == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = tid; while(localTid < numberOfEntities){ char idxPtr2 = idxChecking_device[idxCheckingIdGlobal+localTid]; if(idxPtr2 == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((tid < 256)&&(d+tid < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+tid) * dataTable_Pitch) + i; entity[tid] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distance = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distance += abs(entity[k-d]-pElementVal); } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distance; } __syncthreads(); } biggestNumber[tid] = 0.0f; smalestNumber[tid] = STUB_INIT_DIST; __syncthreads(); for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); distances_device[idxCheckingIdGlobal2+lp] = dist; } __syncthreads(); for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; biggestNumber[tid] = max(biggestNumber[tid], ceil(dist)); smalestNumber[tid] = min(smalestNumber[tid], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(tid < 32){ for(int ii=tid ; ii<256 ; ii+=32){ biggestNumber[tid] = max(biggestNumber[tid], biggestNumber[ii]); smalestNumber[tid] = min(smalestNumber[tid], smalestNumber[ii]); } } if(tid == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(tid == 0){ bias = smalestNumber[0]; minValue = 0.0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[tid] = 0; if(tid == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(tid == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[tid]+alreadyFoundNumbers) > numberOfNeighbors){ atomicMin(&interestingBucket, tid); } //jezeli znalezlismy dokladna liczbe to koncz if((tid == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[tid] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((tid == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, że nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if((mark < interestingBucket)&&(mark >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((mark > interestingBucket)&&(mark < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(mark == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(tid == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //Wpisujemy k najmniejsze liczby jako nowe rozwiazanie do neighbours for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } __global__ void findInitialStateOfAproximatedCosineKNN(int* graphTraversalStartPoint_device, int numberOfEntities, int numberOfNeighbors, TreeNode** trees_device, TreeNodeLeaf** treesLeafs_device, int* trees_size_device, int* treesLeafs_size_device, int numberOfTrees, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, int* neighboursId_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, float* distances2_device, float* distances3_device, int dimensionOfIndexesAndDistances, short* marker_device, int minSize){ __shared__ int startPointForTreesLeafs_device; __shared__ int elementsToCheckInTreesLeafs_device; __shared__ float entity[256]; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ int hist[256]; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; int tid = threadIdx.x; int idOfBatch = blockIdx.x; int elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); int startOfTheBatch = elementsPerBatch*idOfBatch; int endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; int idxCheckingIdGlobal = idOfBatch*numberOfEntities; int idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; double minimalSizeOfBucket = 0.000000059604644775; for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy bity do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=tid ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int treeNo=0 ; treeNo<numberOfTrees ; ++treeNo){ //Dla danego drzewa wyszukujemy pukty, dla ktorych bedziemy szukac poczatku danych w treesLeafs_device if(tid == 0){ TreeNode treeNode = trees_device[treeNo][graphTraversalStartPoint_device[treeNo*numberOfEntities+i]]; elementsToCheckInTreesLeafs_device = treeNode.numberOfEntities; while(treeNode.rightChild != EMPTY_DIRECTION){ treeNode = trees_device[treeNo][treeNode.leftChild]; } startPointForTreesLeafs_device = treeNode.leftChild; } __syncthreads(); //Ustawiamy te bity, dla ktorych mamy liczyc (te ktore wyszukalismy w treesLeafs_device) for(int kk=tid ; kk<elementsToCheckInTreesLeafs_device ; kk+=blockDim.x){ int elem = treesLeafs_device[treeNo][kk+startPointForTreesLeafs_device].entityNumber; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe dla ktorej robimy poszukiwania if(tid == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; numbersToCheck = 0; } __syncthreads(); for(int kk=tid ; kk<numberOfEntities ; kk+=blockDim.x){ char idxPtr2 = idxChecking_device[idxCheckingIdGlobal+kk]; if(idxPtr2 == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(tid == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = tid; while(localTid < numberOfEntities){ char idxPtr2 = idxChecking_device[idxCheckingIdGlobal+localTid]; if(idxPtr2 == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; distances2_device[idxCheckingIdGlobal2+pos] = 0.0f; distances3_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((tid < 256)&&(d+tid < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+tid) * dataTable_Pitch) + i; entity[tid] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distanceAB = 0.0; float distanceA = 0.0; float distanceB = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distanceAB += entity[k-d]*pElementVal; distanceA += entity[k-d]*entity[k-d]; distanceB += pElementVal*pElementVal; } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distanceAB; distances2_device[idxCheckingIdGlobal2+lp] += distanceA; distances3_device[idxCheckingIdGlobal2+lp] += distanceB; } __syncthreads(); } biggestNumber[tid] = 0.0f; smalestNumber[tid] = STUB_INIT_DIST; __syncthreads(); //wyznaczanie odleglosci do liczb najblizszych for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distanceAB = distances_device[idxCheckingIdGlobal2+lp]; float distanceA = distances2_device[idxCheckingIdGlobal2+lp]; float distanceB = distances3_device[idxCheckingIdGlobal2+lp]; float distance = distanceAB/(sqrt(distanceA)*sqrt(distanceB)); distance = (-1.0*distance)+1.0; distances_device[idxCheckingIdGlobal2+lp] = distance; } __syncthreads(); //for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ //float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); //distances_device[idxCheckingIdGlobal2+lp] = dist; //} //__syncthreads(); for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; //dist = (-1.0*dist)+1.0; //distances_device[idxCheckingIdGlobal2+lp] = dist; biggestNumber[tid] = max(biggestNumber[tid], ceil(dist)); smalestNumber[tid] = min(smalestNumber[tid], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(tid < 32){ for(int ii=tid ; ii<256 ; ii+=32){ biggestNumber[tid] = max(biggestNumber[tid], biggestNumber[ii]); smalestNumber[tid] = min(smalestNumber[tid], smalestNumber[ii]); } } if(tid == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(tid == 0){ bias = smalestNumber[0]; minValue = 0.0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[tid] = 0; if(tid == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(tid == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[tid]+alreadyFoundNumbers) > numberOfNeighbors){ atomicMin(&interestingBucket, tid); } //jezeli znalezlismy dokladna liczbe to koncz if((tid == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[tid] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((tid == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, że nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if((mark < interestingBucket)&&(mark >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((mark > interestingBucket)&&(mark < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(mark == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(tid == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //Wpisujemy k najmniejsze liczby jako nowe rozwiazanie do neighbours for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = -1.0*(distances_device[idxCheckingIdGlobal2+lp]-1.0); neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance_device[i*numberOfNeighbors+id] = -1.0*(distances_device[idxCheckingIdGlobal2+lp]-1.0); neighboursId_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } __global__ void propagateEuclideanKernel(int numberOfEntities, int numberOfNeighbors, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, float* neighboursDistance2_device, int* neighboursId_device, int* neighboursId2_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, int dimensionOfIndexesAndDistances, short* marker_device){ __shared__ float entity[256]; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ int hist[256]; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; int tid = threadIdx.x; int idOfBatch = blockIdx.x; int elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); int startOfTheBatch = elementsPerBatch*idOfBatch; int endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; int idxCheckingIdGlobal = idOfBatch*numberOfEntities; int idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; double minimalSizeOfBucket = 0.000000059604644775; for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy bity do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=tid ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int neighHi = 0 ; neighHi < numberOfNeighbors ; neighHi += 1){ for(int neighLo = tid ; neighLo < numberOfNeighbors ; neighLo+=blockDim.x){ int interestingNeighbour = neighboursId_device[i*numberOfNeighbors+neighHi]; int elem = neighboursId_device[interestingNeighbour*numberOfNeighbors+neighLo]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe sama ze soba if(tid == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; } __syncthreads(); //Zerujemy bity dla wlasnych sasiadow i przepisujemy aktualnie najblizszych for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ int elem = neighboursId_device[i*numberOfNeighbors+kk]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x00; neighboursId2_device[i*numberOfNeighbors+kk] = elem; neighboursDistance2_device[i*numberOfNeighbors+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; } __syncthreads(); //Liczymy najblizszych if(tid == 0){ numbersToCheck = 0; } __syncthreads(); for(int kk=tid ; kk<numberOfEntities ; kk+=blockDim.x){ if(idxChecking_device[idxCheckingIdGlobal+kk] == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId2_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance2_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(tid == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = tid; while(localTid < numberOfEntities){ if(idxChecking_device[idxCheckingIdGlobal+localTid] == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((tid < 256)&&(d+tid < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+tid) * dataTable_Pitch) + i; entity[tid] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distance = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distance += (entity[k-d]-pElementVal)*(entity[k-d]-pElementVal); } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distance; } __syncthreads(); } biggestNumber[tid] = 0.0f; smalestNumber[tid] = STUB_INIT_DIST; __syncthreads(); for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); distances_device[idxCheckingIdGlobal2+lp] = dist; } __syncthreads(); for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; biggestNumber[tid] = max(biggestNumber[tid], ceil(dist)); smalestNumber[tid] = min(smalestNumber[tid], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(tid < 32){ for(int ii=tid ; ii<256 ; ii+=32){ biggestNumber[tid] = max(biggestNumber[tid], biggestNumber[ii]); smalestNumber[tid] = min(smalestNumber[tid], smalestNumber[ii]); } } if(tid == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(tid == 0){ bias = smalestNumber[0]; minValue = 0.0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[tid] = 0; if(tid == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(tid == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[tid]+alreadyFoundNumbers) > numberOfNeighbors){ atomicMin(&interestingBucket, tid); } //jezeli znalezlismy dokladna liczbe to koncz if((tid == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[tid] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((tid == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, że nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if((mark < interestingBucket)&&(mark >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((mark > interestingBucket)&&(mark < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(mark == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(tid == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //Wpisujemy k najmniejsze liczby jako nowe rozwiazanie do neighbours for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } __global__ void propagateTaxicabKernel(int numberOfEntities, int numberOfNeighbors, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, float* neighboursDistance2_device, int* neighboursId_device, int* neighboursId2_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, int dimensionOfIndexesAndDistances, short* marker_device){ __shared__ float entity[256]; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ int hist[256]; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; int tid = threadIdx.x; int idOfBatch = blockIdx.x; int elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); int startOfTheBatch = elementsPerBatch*idOfBatch; int endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; int idxCheckingIdGlobal = idOfBatch*numberOfEntities; int idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; double minimalSizeOfBucket = 0.000000059604644775; for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy bity do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=tid ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int neighHi = 0 ; neighHi < numberOfNeighbors ; neighHi += 1){ for(int neighLo = tid ; neighLo < numberOfNeighbors ; neighLo+=blockDim.x){ int interestingNeighbour = neighboursId_device[i*numberOfNeighbors+neighHi]; int elem = neighboursId_device[interestingNeighbour*numberOfNeighbors+neighLo]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe sama ze soba if(tid == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; } __syncthreads(); //Zerujemy bity dla wlasnych sasiadow i przepisujemy aktualnie najblizszych for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ int elem = neighboursId_device[i*numberOfNeighbors+kk]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x00; neighboursId2_device[i*numberOfNeighbors+kk] = elem; neighboursDistance2_device[i*numberOfNeighbors+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; } __syncthreads(); //Liczymy najblizszych if(tid == 0){ numbersToCheck = 0; } __syncthreads(); for(int kk=tid ; kk<numberOfEntities ; kk+=blockDim.x){ if(idxChecking_device[idxCheckingIdGlobal+kk] == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId2_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance2_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(tid == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = tid; while(localTid < numberOfEntities){ if(idxChecking_device[idxCheckingIdGlobal+localTid] == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((tid < 256)&&(d+tid < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+tid) * dataTable_Pitch) + i; entity[tid] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distance = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distance += abs(entity[k-d]-pElementVal); } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distance; } __syncthreads(); } biggestNumber[tid] = 0.0f; smalestNumber[tid] = STUB_INIT_DIST; __syncthreads(); for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); distances_device[idxCheckingIdGlobal2+lp] = dist; } __syncthreads(); for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; biggestNumber[tid] = max(biggestNumber[tid], ceil(dist)); smalestNumber[tid] = min(smalestNumber[tid], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(tid < 32){ for(int ii=tid ; ii<256 ; ii+=32){ biggestNumber[tid] = max(biggestNumber[tid], biggestNumber[ii]); smalestNumber[tid] = min(smalestNumber[tid], smalestNumber[ii]); } } if(tid == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(tid == 0){ bias = smalestNumber[0]; minValue = 0.0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[tid] = 0; if(tid == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(tid == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[tid]+alreadyFoundNumbers) > numberOfNeighbors){ atomicMin(&interestingBucket, tid); } //jezeli znalezlismy dokladna liczbe to koncz if((tid == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[tid] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((tid == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, że nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if((mark < interestingBucket)&&(mark >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((mark > interestingBucket)&&(mark < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(mark == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(tid == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //Wpisujemy k najmniejsze liczby jako nowe rozwiazanie do neighbours for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = distances_device[idxCheckingIdGlobal2+lp]; neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } __global__ void propagateCosineKernel(int numberOfEntities, int numberOfNeighbors, float* dataTable_device, size_t dataTable_Pitch, float* neighboursDistance_device, float* neighboursDistance2_device, int* neighboursId_device, int* neighboursId2_device, char* idxChecking_device, int dimensionOfEntity, int start, int end, int* indexes_device, float* distances_device, float* distances2_device, float* distances3_device, int dimensionOfIndexesAndDistances, short* marker_device){ __shared__ float entity[256]; __shared__ int numbersToCheck; __shared__ int numbersToCheckInThisPart; __shared__ int numbersToCheckInThisPartRealPart; __shared__ double bias; __shared__ double lengthOfBucket; __shared__ double maxValue; __shared__ double minValue; __shared__ int hist[256]; __shared__ float biggestNumber[256]; __shared__ float smalestNumber[256]; __shared__ int foundExactSolution; __shared__ int limitOfLengthOfBucketExceeded; __shared__ int alreadyFoundNumbers; __shared__ int interestingBucket; __shared__ int rewrittenNumbers; __shared__ int complement; int tid = threadIdx.x; int idOfBatch = blockIdx.x; int elementsPerBatch = ceil(float(end-start)/float(gridDim.x)); int startOfTheBatch = elementsPerBatch*idOfBatch; int endOfTheBatch = elementsPerBatch*(idOfBatch+1) <= (end-start) ? elementsPerBatch*(idOfBatch+1) : (end-start); startOfTheBatch += start; endOfTheBatch += start; int idxCheckingIdGlobal = idOfBatch*numberOfEntities; int idxCheckingIdGlobal2 = idOfBatch*dimensionOfIndexesAndDistances; double minimalSizeOfBucket = 0.000000059604644775; for(int i=startOfTheBatch ; i<endOfTheBatch ; ++i){ //Zerujemy bity do wyszukiwania dla danego punktu, dla ktorego liczymy for(int ii=tid ; ii<numberOfEntities ; ii+=blockDim.x){ idxChecking_device[idxCheckingIdGlobal+ii] = 0x00; } __syncthreads(); //Wyszukujemy liczby do przeszukania for(int neighHi = 0 ; neighHi < numberOfNeighbors ; neighHi += 1){ for(int neighLo = tid ; neighLo < numberOfNeighbors ; neighLo+=blockDim.x){ int interestingNeighbour = neighboursId_device[i*numberOfNeighbors+neighHi]; int elem = neighboursId_device[interestingNeighbour*numberOfNeighbors+neighLo]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x01; } __syncthreads(); } __syncthreads(); //Zerujemy bit odpowiedzialny za liczbe sama ze soba if(tid == 0){ idxChecking_device[idxCheckingIdGlobal+i] = 0x00; } __syncthreads(); //Zerujemy bity dla wlasnych sasiadow i przepisujemy aktualnie najblizszych for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ int elem = neighboursId_device[i*numberOfNeighbors+kk]; idxChecking_device[idxCheckingIdGlobal+elem] = 0x00; neighboursId2_device[i*numberOfNeighbors+kk] = elem; neighboursDistance2_device[i*numberOfNeighbors+kk] = neighboursDistance_device[i*numberOfNeighbors+kk]; } __syncthreads(); //Liczymy najblizszych if(tid == 0){ numbersToCheck = 0; } __syncthreads(); for(int kk=tid ; kk<numberOfEntities ; kk+=blockDim.x){ if(idxChecking_device[idxCheckingIdGlobal+kk] == 0x01){ atomicAdd(&numbersToCheck, 1); } } __syncthreads(); //Przepisujemy te liczby do tablicy z wyszukiwaniem najblizszych sasiadow while(numbersToCheck > 0){ __syncthreads(); //Przepisujemy aktualne najblizsze liczby for(int kk=tid ; kk<numberOfNeighbors ; kk+=blockDim.x){ indexes_device[idxCheckingIdGlobal2+kk] = neighboursId2_device[i*numberOfNeighbors+kk]; distances_device[idxCheckingIdGlobal2+kk] = neighboursDistance2_device[i*numberOfNeighbors+kk]; marker_device[idxCheckingIdGlobal2+kk] = 0; } //Dopisujemy te co aktualnie sprawdzamy if(tid == 0){ numbersToCheck = 0; numbersToCheckInThisPart = numberOfNeighbors; numbersToCheckInThisPartRealPart = numberOfNeighbors; } __syncthreads(); int localTid = tid; while(localTid < numberOfEntities){ if(idxChecking_device[idxCheckingIdGlobal+localTid] == 0x01){ int pos = atomicAdd(&numbersToCheckInThisPart, 1); if(pos < dimensionOfIndexesAndDistances){ indexes_device[idxCheckingIdGlobal2+pos] = localTid; distances_device[idxCheckingIdGlobal2+pos] = 0.0f; marker_device[idxCheckingIdGlobal2+pos] = 0; idxChecking_device[idxCheckingIdGlobal+localTid] = 0x00; atomicAdd(&numbersToCheckInThisPartRealPart, 1); }else{ atomicAdd(&numbersToCheck, 1); } } localTid += blockDim.x; } __syncthreads(); //Wyznaczamy odleglosc do tych nowych liczb for(int d=0 ; d<dimensionOfEntity ; d+=256){ //wczytaj liczbe dla ktorej bedziemy liczyc odleglosci do innych liczb if((tid < 256)&&(d+tid < dimensionOfEntity)){ float* pElement = (float*)((char*)dataTable_device + (d+tid) * dataTable_Pitch) + i; entity[tid] = *pElement; } __syncthreads(); //wyznaczanie odleglosci do liczb for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distanceAB = 0.0; float distanceA = 0.0; float distanceB = 0.0; for(int k=d ; (k<dimensionOfEntity)&&(k<(d+256)) ; ++k){ int lpp = indexes_device[idxCheckingIdGlobal2+lp]; float* pElement = (float*)((char*)dataTable_device + k * dataTable_Pitch) + lpp; float pElementVal = *pElement; distanceAB += entity[k-d]*pElementVal; distanceA += entity[k-d]*entity[k-d]; distanceB += pElementVal*pElementVal; } //zapisanie odleglosci do tablicy na bazie ktorej beda wyszukiwani najblizsi sasiedzi distances_device[idxCheckingIdGlobal2+lp] += distanceAB; distances2_device[idxCheckingIdGlobal2+lp] += distanceA; distances3_device[idxCheckingIdGlobal2+lp] += distanceB; } __syncthreads(); } biggestNumber[tid] = 0.0f; smalestNumber[tid] = STUB_INIT_DIST; __syncthreads(); //wyznaczanie odleglosci do liczb najblizszych for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float distanceAB = distances_device[idxCheckingIdGlobal2+lp]; float distanceA = distances2_device[idxCheckingIdGlobal2+lp]; float distanceB = distances3_device[idxCheckingIdGlobal2+lp]; float distance = distanceAB/(sqrt(distanceA)*sqrt(distanceB)); distance = (-1.0*distance)+1.0; distances_device[idxCheckingIdGlobal2+lp] = distance; } __syncthreads(); //for(int lp = numberOfNeighbors+tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ // float dist = sqrt(distances_device[idxCheckingIdGlobal2+lp]); // distances_device[idxCheckingIdGlobal2+lp] = dist; //} //__syncthreads(); for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ float dist = distances_device[idxCheckingIdGlobal2+lp]; //dist = (-1.0*dist)+1.0; //distances_device[idxCheckingIdGlobal2+lp] = dist; biggestNumber[tid] = max(biggestNumber[tid], ceil(dist)); smalestNumber[tid] = min(smalestNumber[tid], floor(dist)); } __syncthreads(); //wyszukiwanie najwiekszej liczby w rezultacie if(tid < 32){ for(int ii=tid ; ii<256 ; ii+=32){ biggestNumber[tid] = max(biggestNumber[tid], biggestNumber[ii]); smalestNumber[tid] = min(smalestNumber[tid], smalestNumber[ii]); } } if(tid == 0){ #pragma unroll for(int c=0 ; c<32 ; ++c){ biggestNumber[0] = max(biggestNumber[0], biggestNumber[c]); smalestNumber[0] = min(smalestNumber[0], smalestNumber[c]); } } __syncthreads(); //Wyszukujemy k najmniejszych liczb if(tid == 0){ bias = smalestNumber[0]; minValue = 0.0; maxValue = biggestNumber[0] - smalestNumber[0]; maxValue = pow(2.0, ceil(log(maxValue+1.0)/log(2.0))); lengthOfBucket = (maxValue-minValue)/256.0; foundExactSolution = FALSE; limitOfLengthOfBucketExceeded = FALSE; alreadyFoundNumbers = 0; rewrittenNumbers = 0; complement = 0; } __syncthreads(); while((foundExactSolution == FALSE) && (limitOfLengthOfBucketExceeded == FALSE)){ hist[tid] = 0; if(tid == 0){ interestingBucket = NON_OF_BUCKET_IN_INTEREST; } __syncthreads(); //wyznacz histogram dla aktualnego opisu minValue-maxValue for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int idOfBucketInHist = (distances_device[idxCheckingIdGlobal2+lp]-bias-minValue)/lengthOfBucket; atomicAdd(&hist[idOfBucketInHist], 1); marker_device[idxCheckingIdGlobal2+lp] = idOfBucketInHist; } } __syncthreads(); //zsumuj histogram tak, ze hist(i) to suma od hist(0) do hist(i) if(tid == 0){ for(int k=1 ; k<256 ; ++k){ hist[k] += hist[k-1]; } } __syncthreads(); if((hist[tid]+alreadyFoundNumbers) > numberOfNeighbors){ atomicMin(&interestingBucket, tid); } //jezeli znalezlismy dokladna liczbe to koncz if((tid == 0) && (alreadyFoundNumbers == numberOfNeighbors)){ foundExactSolution = TRUE; } //Sprawdzamy czy nie znalezlismy juz rozwiazania przyblizonego int tmpSum = hist[tid] + alreadyFoundNumbers; if(tmpSum == numberOfNeighbors){ foundExactSolution = TRUE; } //sprawdzamy czy czasami nie osigniemy juz zbyt malej szerokosci kubelka if((tid == 0) && (lengthOfBucket < minimalSizeOfBucket)){ limitOfLengthOfBucketExceeded = TRUE; } __syncthreads(); //dla tych kubelkow z id>interestingBucket zaznaczamy, że nie sa interesujace, a dla id<interestingBucket ze sa w rozwiazaniu, dla id==interestingBucket, do rozpatrzenia w nastepnej iteracji for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if((mark < interestingBucket)&&(mark >= 0)){ marker_device[idxCheckingIdGlobal2+lp] = IN_SOLUTION; atomicAdd(&alreadyFoundNumbers, 1); }else if((mark > interestingBucket)&&(mark < 256)){ marker_device[idxCheckingIdGlobal2+lp] = OUT_OF_SOLUTION; }else if(mark == interestingBucket){ marker_device[idxCheckingIdGlobal2+lp] = 0; } } __syncthreads(); //przeliczenie zakresow if(tid == 0){ bias = bias+interestingBucket*lengthOfBucket; minValue = 0.0; maxValue = lengthOfBucket; lengthOfBucket = (maxValue-minValue)/256.0; } __syncthreads(); } __syncthreads(); //Wpisujemy k najmniejsze liczby jako nowe rozwiazanie do neighbours for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == IN_SOLUTION){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = -1.0*(distances_device[idxCheckingIdGlobal2+lp]-1.0); neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } __syncthreads(); //jezeli zostal przekroczony limit kubelka to znajdz odpowiednie liczby dla dopelnienia rezultatu dla najblizszych liczb if((limitOfLengthOfBucketExceeded == TRUE)&&(foundExactSolution == FALSE)){ for(int lp = tid ; lp<numbersToCheckInThisPartRealPart ; lp+=blockDim.x){ short mark = marker_device[idxCheckingIdGlobal2+lp]; if(mark == 0){ int id2 = atomicAdd(&complement, 1); if((id2+alreadyFoundNumbers) < numberOfNeighbors){ int id = atomicAdd(&rewrittenNumbers, 1); neighboursDistance2_device[i*numberOfNeighbors+id] = -1.0*(distances_device[idxCheckingIdGlobal2+lp]-1.0); neighboursId2_device[i*numberOfNeighbors+id] = indexes_device[idxCheckingIdGlobal2+lp]; } } } } __syncthreads(); } __syncthreads(); } } void EuclideanDistanceMatrixGPU::propagate(){ for(int prop=0 ; prop<numberOfPropagations ; ++prop){ cuCall(cudaSetDevice(device)); std::cout<<"Urzadzenie "<<device<<" uruchamia zadanie propagacji dla punktow: "<<partition.start<<" - "<<partition.end-1<<" dla iteracji: "<<prop+1<<"\n"; dim3 grid2(this->numberOfMultiprocessors*this->numberOfBlocksPerMultiprocessors, 1); dim3 block2(256, 1); if(typeOfDistance == DISTANCE_EUCLIDEAN){ propagateEuclideanKernel<<<grid2, block2, 0, executionStreams>>>(numberOfEntities, numberOfNeighbors, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursDistance2_device, neighboursId_device, neighboursId2_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, dimensionOfIndexesAndDistances, marker_device); }else if(typeOfDistance == DISTANCE_TAXICAB){ propagateTaxicabKernel<<<grid2, block2, 0, executionStreams>>>(numberOfEntities, numberOfNeighbors, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursDistance2_device, neighboursId_device, neighboursId2_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, dimensionOfIndexesAndDistances, marker_device); }else if(typeOfDistance == DISTANCE_COSINE){ propagateCosineKernel<<<grid2, block2, 0, executionStreams>>>(numberOfEntities, numberOfNeighbors, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursDistance2_device, neighboursId_device, neighboursId2_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, distances2_device, distances3_device, dimensionOfIndexesAndDistances, marker_device); }else{ std::cout<<"We do not have such type of distance\n"; } cuCall(cudaStreamSynchronize(executionStreams)); cudaError_t err = cudaGetLastError(); if (cudaSuccess != err){ std::cout<<"propagateKernel: "<<cudaGetErrorString(err)<<"\n"; } float* tmp1 = neighboursDistance_device; neighboursDistance_device = neighboursDistance2_device; neighboursDistance2_device = tmp1; int* tmp2 = neighboursId_device; neighboursId_device = neighboursId2_device; neighboursId2_device = tmp2; } } bool compareByLengthMin(const DataPoint &a, const DataPoint &b){ return a.distance < b.distance; } bool compareByLengthMinCosine(const DataPoint &a, const DataPoint &b){ return a.distance > b.distance; } std::string trim(std::string const& str){ if(str.empty()) return str; std::size_t firstScan = str.find_first_not_of(' '); std::size_t first = firstScan == std::string::npos ? str.length() : firstScan; std::size_t last = str.find_last_not_of(' '); return str.substr(first, last-first+1); } bool EuclideanDistanceMatrixGPU::initilizeGPUStructuresForTrees(){ bool error = false; error |= cuCall(cudaMalloc((void**)&trees_device, numberOfTrees*sizeof(TreeNode*))); error |= cuCall(cudaHostAlloc((void**)&trees_device_pointer_for_cpu, numberOfTrees*sizeof(TreeNode*), cudaHostAllocPortable)); for(int i=0 ; i<numberOfTrees ; ++i){ int elems = trees_host[i].size(); error |= cuCall(cudaMalloc((void**)&trees_device_pointer_for_cpu[i], elems*sizeof(TreeNode))); } error |= cuCall(cudaMemcpy((void*)trees_device, (void*)trees_device_pointer_for_cpu, numberOfTrees*sizeof(TreeNode*), cudaMemcpyHostToDevice)); error |= cuCall(cudaMalloc((void**)&treesLeafs_device, numberOfTrees*sizeof(TreeNodeLeaf*))); error |= cuCall(cudaHostAlloc((void**)&treesLeafs_device_pointer_for_cpu, numberOfTrees*sizeof(TreeNodeLeaf*), cudaHostAllocPortable)); for(int i=0 ; i<numberOfTrees ; ++i){ int elems = treesLeafs_host[i].size(); error |= cuCall(cudaMalloc((void**)&treesLeafs_device_pointer_for_cpu[i], elems*sizeof(TreeNodeLeaf))); } error |= cuCall(cudaMemcpy((void*)treesLeafs_device, (void*)treesLeafs_device_pointer_for_cpu, numberOfTrees*sizeof(TreeNodeLeaf*), cudaMemcpyHostToDevice)); error |= cuCall(cudaMalloc((void**)&trees_size_device, numberOfTrees*sizeof(int))); error |= cuCall(cudaHostAlloc((void**)&trees_size_host, numberOfTrees*sizeof(int), cudaHostAllocPortable)); error |= cuCall(cudaMalloc((void**)&treesLeafs_size_device, numberOfTrees*sizeof(int))); error |= cuCall(cudaHostAlloc((void**)&treesLeafs_size_host, numberOfTrees*sizeof(int), cudaHostAllocPortable)); //Przekopiowanie vektorow for(int i=0 ; i<numberOfTrees ; ++i){ error |= cuCall(cudaMemcpyAsync((void*)trees_device_pointer_for_cpu[i], (void*)trees_host[i].data(), trees_host[i].size()*sizeof(TreeNode), cudaMemcpyHostToDevice, executionStreams)); error |= cuCall(cudaMemcpyAsync((void*)treesLeafs_device_pointer_for_cpu[i], (void*)treesLeafs_host[i].data(), treesLeafs_host[i].size()*sizeof(TreeNodeLeaf), cudaMemcpyHostToDevice, executionStreams)); trees_size_host[i] = trees_host[i].size(); treesLeafs_size_host[i] = treesLeafs_host[i].size(); } error |= cuCall(cudaMemcpyAsync((void*)trees_size_device, (void*)trees_size_host, numberOfTrees*sizeof(int), cudaMemcpyHostToDevice, executionStreams)); error |= cuCall(cudaMemcpyAsync((void*)treesLeafs_size_device, (void*)treesLeafs_size_host, numberOfTrees*sizeof(int), cudaMemcpyHostToDevice, executionStreams)); error |= cuCall(cudaStreamSynchronize(executionStreams)); return error; } bool EuclideanDistanceMatrixGPU::deinitializeGPUStructuresForTrees(){ bool error = false; error |= cuCall(cudaSetDevice(device)); error |= cuCall(cudaDeviceSynchronize()); for(int i=0 ; i<numberOfTrees ; ++i){ error |= cuCall(cudaFree((void*)trees_device_pointer_for_cpu[i])); } error |= cuCall(cudaFree((void*)trees_device)); error |= cuCall(cudaFreeHost((void*)trees_device_pointer_for_cpu)); for(int i=0 ; i<numberOfTrees ; ++i){ error |= cuCall(cudaFree((void*)treesLeafs_device_pointer_for_cpu[i])); } error |= cuCall(cudaFree((void*)treesLeafs_device)); error |= cuCall(cudaFreeHost((void*)treesLeafs_device_pointer_for_cpu)); error |= cuCall(cudaFree((void*)trees_size_device)); error |= cuCall(cudaFreeHost((void*)trees_size_host)); error |= cuCall(cudaFree((void*)treesLeafs_size_device)); error |= cuCall(cudaFreeHost((void*)treesLeafs_size_host)); return error; } void EuclideanDistanceMatrixGPU::findInitialKNN(){ dim3 grid1(ceil(float(numberOfEntities)/256.0), 1); dim3 block1(256, 1); findGraphTraversalStartPoint<<<grid1, block1, 0, executionStreams>>>(graphTraversalStartPoint_device, numberOfEntities, numberOfNeighbors, trees_device, treesLeafs_device, trees_size_device, treesLeafs_size_device, numberOfTrees); cuCall(cudaStreamSynchronize(executionStreams)); cudaError_t err1 = cudaGetLastError(); if (cudaSuccess != err1){ std::cout<<"findGraphTraversalStartPoint: "<<cudaGetErrorString(err1)<<"\n"; } std::cout<<"Urzadzenie "<<device<<" uruchamia zadanie inicjalizacji kNN dla punktow: "<<partition.start<<" - "<<partition.end-1<<"\n"; dim3 grid2(this->numberOfMultiprocessors*this->numberOfBlocksPerMultiprocessors, 1); dim3 block2(256, 1); if(typeOfDistance == DISTANCE_EUCLIDEAN){ findInitialStateOfAproximatedEuclideanKNN<<<grid2, block2, 0, executionStreams>>>(graphTraversalStartPoint_device, numberOfEntities, numberOfNeighbors, trees_device, treesLeafs_device, trees_size_device, treesLeafs_size_device, numberOfTrees, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursId_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, dimensionOfIndexesAndDistances, marker_device, minSize); }else if(typeOfDistance == DISTANCE_TAXICAB){ findInitialStateOfAproximatedTaxicabKNN<<<grid2, block2, 0, executionStreams>>>(graphTraversalStartPoint_device, numberOfEntities, numberOfNeighbors, trees_device, treesLeafs_device, trees_size_device, treesLeafs_size_device, numberOfTrees, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursId_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, dimensionOfIndexesAndDistances, marker_device, minSize); }else if(typeOfDistance == DISTANCE_COSINE){ findInitialStateOfAproximatedCosineKNN<<<grid2, block2, 0, executionStreams>>>(graphTraversalStartPoint_device, numberOfEntities, numberOfNeighbors, trees_device, treesLeafs_device, trees_size_device, treesLeafs_size_device, numberOfTrees, dataTable_device, dataTable_Pitch, neighboursDistance_device, neighboursId_device, idxChecking_device, dimensionOfEntity, partition.start, partition.end, indexes_device, distances_device, distances2_device, distances3_device, dimensionOfIndexesAndDistances, marker_device, minSize); }else{ std::cout<<"We do not have such type of distance\n"; } cuCall(cudaStreamSynchronize(executionStreams)); cudaError_t err2 = cudaGetLastError(); if (cudaSuccess != err2){ std::cout<<"findInitialStateOfAproximatedKNN: "<<cudaGetErrorString(err2)<<"\n"; } } __global__ void stubInitializationKernel(int numberOfEntities, int numberOfNeighbors, float* neighboursDistance_device, int* neighboursId_device){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < (numberOfEntities*numberOfNeighbors)){ neighboursDistance_device[tid] = STUB_INIT_DIST; neighboursId_device[tid] = STUB_INIT_ID; } } void EuclideanDistanceMatrixGPU::stubInitialization(){ dim3 grid1(ceil(float(numberOfEntities*numberOfNeighbors)/256.0), 1); dim3 block1(256, 1); stubInitializationKernel<<<grid1, block1, 0, executionStreams>>>(numberOfEntities, numberOfNeighbors, neighboursDistance_device, neighboursId_device); cuCall(cudaStreamSynchronize(executionStreams)); cudaError_t err2 = cudaGetLastError(); if (cudaSuccess != err2){ std::cout<<"stubInitializationKernel: "<<cudaGetErrorString(err2)<<"\n"; } } __global__ void makePartitionOfLeaf0(int* treeNodeSizeDevice){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid == 0){ *treeNodeSizeDevice = 1; } } __global__ void makePartitionOfLeaf1(float* dataTable_device, size_t dataTable_pitch, int numberOfDimension, int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel, int* points1, int* points2, char* side_device, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int checkedPoint = elemsPerLeafInCurrentLevel[tid]; int point1 = elemsPerLeafInCurrentLevel[biasOfElemsPerLeafInCurrentLevel[tid] + points1[tid]]; int point2 = elemsPerLeafInCurrentLevel[biasOfElemsPerLeafInCurrentLevel[tid] + points2[tid]]; int size = numberOfElemsPerLeafInCurrentLevel[tid]; if(size <= minSize){ return; } float sideSign = 0.0f; for(int dim = 0 ; dim < numberOfDimension ; ++dim){ float* pElementCheckedPoint = (float*)((char*)dataTable_device + dim * dataTable_pitch) + checkedPoint; float* pElementPoint1 = (float*)((char*)dataTable_device + dim * dataTable_pitch) + point1; float* pElementPoint2 = (float*)((char*)dataTable_device + dim * dataTable_pitch) + point2; sideSign += (*pElementCheckedPoint)*((*pElementPoint2)-(*pElementPoint1)); } if(sideSign < 0){ side_device[tid] = SIDE_LEFT; }else{ side_device[tid] = SIDE_RIGHT; } } } __global__ void makePartitionOfLeaf2(float* dataTable_device, size_t dataTable_pitch, int numberOfDimension, int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel, int* points1, int* points2, char* side_device, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if((tid < numberOfEntities) && (tid == biasOfElemsPerLeafInCurrentLevel[tid])){ int point1 = biasOfElemsPerLeafInCurrentLevel[tid] + points1[tid]; int point2 = biasOfElemsPerLeafInCurrentLevel[tid] + points2[tid]; int size = numberOfElemsPerLeafInCurrentLevel[tid]; if(size <= minSize){ return; } side_device[point1] = SIDE_LEFT; side_device[point2] = SIDE_RIGHT; } } __global__ void makePartitionOfLeaf3(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int biasOfElement = biasOfElemsPerLeafInCurrentLevel[tid]; if(tid == biasOfElement){ int parent = idOfLeafParent[tid]; treeNodeDevice[parent].leftChild = 0; treeNodeDevice[parent].rightChild = 0; } } } __global__ void makePartitionOfLeaf4(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int element = elemsPerLeafInCurrentLevel[tid]; int biasOfElement = biasOfElemsPerLeafInCurrentLevel[tid]; int parent = idOfLeafParent[tid]; char side = side_device[tid]; unsigned int* numberOfElemsLeft = (unsigned int*)&treeNodeDevice[parent].leftChild; if(side == SIDE_LEFT){ int newPos = (int)atomicInc(numberOfElemsLeft, INT_MAX); elemsPerLeafInCurrentLevel2[biasOfElement + newPos] = element; side2_device[biasOfElement + newPos] = SIDE_LEFT; biasOfElemsPerLeafInCurrentLevel2[biasOfElement + newPos] = biasOfElement; idOfLeafParent2[biasOfElement + newPos] = parent; } } } __global__ void makePartitionOfLeaf5(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int element = elemsPerLeafInCurrentLevel[tid]; int biasOfElement = biasOfElemsPerLeafInCurrentLevel[tid]; int parent = idOfLeafParent[tid]; char side = side_device[tid]; int numberElemsLeft = treeNodeDevice[parent].leftChild; unsigned int* numberOfElemsRight = (unsigned int*)&treeNodeDevice[parent].rightChild; if(side == SIDE_RIGHT){ int newPos = (int)atomicInc(numberOfElemsRight, INT_MAX); elemsPerLeafInCurrentLevel2[biasOfElement + numberElemsLeft + newPos] = element; side2_device[biasOfElement + numberElemsLeft + newPos] = SIDE_RIGHT; biasOfElemsPerLeafInCurrentLevel2[biasOfElement + numberElemsLeft + newPos] = biasOfElement; idOfLeafParent2[biasOfElement + numberElemsLeft + newPos] = parent; } } } __global__ void makePartitionOfLeaf6(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int biasOfElement = biasOfElemsPerLeafInCurrentLevel2[tid]; int parent = idOfLeafParent2[tid]; char side = side2_device[tid]; int numberElemsLeft = treeNodeDevice[parent].leftChild; int numberElemsRight = treeNodeDevice[parent].rightChild; if(side == SIDE_LEFT){ numberOfElemsPerLeafInCurrentLevel2[tid] = numberElemsLeft; biasOfElemsPerLeafInCurrentLevel2[tid] = biasOfElement; } if(side == SIDE_RIGHT){ numberOfElemsPerLeafInCurrentLevel2[tid] = numberElemsRight; biasOfElemsPerLeafInCurrentLevel2[tid] = biasOfElement + numberElemsLeft; } } } __global__ void makePartitionOfLeaf7(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize, int rand1, int rand2, int* thereWasDividing){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int biasOfElementLeft = biasOfElemsPerLeafInCurrentLevel[tid]; if(tid == biasOfElementLeft){ int parent = idOfLeafParent2[tid]; int numberElemsLeft = treeNodeDevice[parent].leftChild; int numberElemsRight = treeNodeDevice[parent].rightChild; if((numberElemsLeft > 0 ) && (numberElemsRight > 0)){ //bylo dzielenie atomicCAS(thereWasDividing, 0, 1); TreeNode treeNodeLeft = {parent, EMPTY_DIRECTION, EMPTY_DIRECTION, numberElemsLeft}; TreeNode treeNodeRight = {parent, EMPTY_DIRECTION, EMPTY_DIRECTION, numberElemsRight}; int idLeft = (int)atomicInc((unsigned int*)treeNodeSizeDevice, INT_MAX); int idRight = (int)atomicInc((unsigned int*)treeNodeSizeDevice, INT_MAX); treeNodeDevice[idLeft] = treeNodeLeft; treeNodeDevice[idRight] = treeNodeRight; treeNodeDevice[parent].leftChild = idLeft; treeNodeDevice[parent].rightChild = idRight; idOfLeafParent2[biasOfElementLeft] = idLeft; if(numberElemsLeft > 1){ int pointIdx1 = 0; int pointIdx2 = 0; int count = numberElemsLeft; pointIdx1 = int(((double)rand1/(RAND_MAX))*INT_MAX) % count; pointIdx2 = int(((double)rand2/(RAND_MAX))*INT_MAX) % count; if(pointIdx1 == pointIdx2){ pointIdx2 = (pointIdx1+1)%count; } points12[biasOfElementLeft] = pointIdx1; points22[biasOfElementLeft] = pointIdx2; }else{ points12[biasOfElementLeft] = 0; points22[biasOfElementLeft] = 0; } int biasOfElementRight = biasOfElementLeft + numberElemsLeft; idOfLeafParent2[biasOfElementRight] = idRight; if(numberElemsRight > 1){ int pointIdx1 = 0; int pointIdx2 = 0; int count = numberElemsRight; pointIdx1 = int(((double)rand1/(RAND_MAX))*INT_MAX) % count; pointIdx2 = int(((double)rand2/(RAND_MAX))*INT_MAX) % count; if(pointIdx1 == pointIdx2){ pointIdx2 = (pointIdx1+1)%count; } points12[biasOfElementRight] = pointIdx1; points22[biasOfElementRight] = pointIdx2; }else{ points12[biasOfElementRight] = 0; points22[biasOfElementRight] = 0; } }else{ //nie bylo dzielenia points12[tid] = 0; points22[tid] = 0; } } } } __global__ void makePartitionOfLeaf8(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* elemsPerLeafInCurrentLevel2, int* numberOfElemsPerLeafInCurrentLevel, int* numberOfElemsPerLeafInCurrentLevel2, int* biasOfElemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel2, int* points1, int* points12, int* points2, int* points22, int* idOfLeafParent, int* idOfLeafParent2, char* side_device, char* side2_device, TreeNode* treeNodeDevice, int* treeNodeSizeDevice, int minSize){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ int biasOfElement = biasOfElemsPerLeafInCurrentLevel2[tid]; int parent = idOfLeafParent2[biasOfElement]; int p1 = points12[biasOfElement]; int p2 = points22[biasOfElement]; idOfLeafParent2[tid] = parent; points12[tid] = p1; points22[tid] = p2; } } __global__ void makePartitionOfLeaf9(int numberOfEntities, int* elemsPerLeafInCurrentLevel, int* biasOfElemsPerLeafInCurrentLevel, int* idOfLeafParent_device, TreeNodeLeaf* treeNodesLeafs, TreeNode* treeNodeDevice){ int tid = threadIdx.x + blockIdx.x * blockDim.x; if(tid < numberOfEntities){ TreeNodeLeaf treeNodeLeaf; treeNodeLeaf.parent = idOfLeafParent_device[tid]; treeNodeLeaf.entityNumber = elemsPerLeafInCurrentLevel[tid]; treeNodesLeafs[tid] = treeNodeLeaf; if(tid == biasOfElemsPerLeafInCurrentLevel[tid]){ treeNodeDevice[treeNodeLeaf.parent].leftChild = tid; treeNodeDevice[treeNodeLeaf.parent].rightChild = EMPTY_DIRECTION; } } } void EuclideanDistanceMatrixGPU::buildUpTheTrees(){ trees_host.clear(); treesLeafs_host.clear(); TreeNode* treeNodeDevice; TreeNodeLeaf* treeNodesLeafsDevice; int* treeNodeSizeDevice; int* thereWasDividing; cuCall(cudaMalloc((void**)&treeNodeDevice, 2*numberOfEntities*sizeof(TreeNode))); cuCall(cudaMalloc((void**)&treeNodesLeafsDevice, numberOfEntities*sizeof(TreeNodeLeaf))); cuCall(cudaMalloc((void**)&treeNodeSizeDevice, sizeof(int))); cuCall(cudaMalloc((void**)&thereWasDividing, sizeof(int))); int* elemsPerLeafInCurrentLevel_host; int* elemsPerLeafInCurrentLevel_device; int* elemsPerLeafInCurrentLevel2_device; int* numberOfElemsPerLeafInCurrentLevel_host; int* numberOfElemsPerLeafInCurrentLevel_device; int* numberOfElemsPerLeafInCurrentLevel2_device; int* biasOfElemsPerLeafInCurrentLevel_host; int* biasOfElemsPerLeafInCurrentLevel_device; int* biasOfElemsPerLeafInCurrentLevel2_device; int* points1_host; int* points1_device; int* points12_device; int* points2_host; int* points2_device; int* points22_device; int* idOfLeafParent_host; int* idOfLeafParent_device; int* idOfLeafParent2_device; char* side_host; char* side_device; char* side2_device; cuCall(cudaHostAlloc((void**)&elemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), cudaHostAllocPortable)); cuCall(cudaMalloc((void**)&elemsPerLeafInCurrentLevel_device, numberOfEntities*sizeof(int))); cuCall(cudaMalloc((void**)&elemsPerLeafInCurrentLevel2_device, numberOfEntities*sizeof(int))); cuCall(cudaHostAlloc((void**)&numberOfElemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), cudaHostAllocPortable)); cuCall(cudaMalloc((void**)&numberOfElemsPerLeafInCurrentLevel_device, numberOfEntities*sizeof(int))); cuCall(cudaMalloc((void**)&numberOfElemsPerLeafInCurrentLevel2_device, numberOfEntities*sizeof(int))); cuCall(cudaHostAlloc((void**)&biasOfElemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), cudaHostAllocPortable)); cuCall(cudaMalloc((void**)&biasOfElemsPerLeafInCurrentLevel_device, numberOfEntities*sizeof(int))); cuCall(cudaMalloc((void**)&biasOfElemsPerLeafInCurrentLevel2_device, numberOfEntities*sizeof(int))); cuCall(cudaHostAlloc((void**)&points1_host, numberOfEntities*sizeof(int), cudaHostAllocPortable)); cuCall(cudaMalloc((void**)&points1_device, numberOfEntities*sizeof(int))); cuCall(cudaMalloc((void**)&points12_device, numberOfEntities*sizeof(int))); cuCall(cudaHostAlloc((void**)&points2_host, numberOfEntities*sizeof(int), cudaHostAllocPortable)); cuCall(cudaMalloc((void**)&points2_device, numberOfEntities*sizeof(int))); cuCall(cudaMalloc((void**)&points22_device, numberOfEntities*sizeof(int))); cuCall(cudaHostAlloc((void**)&idOfLeafParent_host, numberOfEntities*sizeof(int), cudaHostAllocPortable)); cuCall(cudaMalloc((void**)&idOfLeafParent_device, numberOfEntities*sizeof(int))); cuCall(cudaMalloc((void**)&idOfLeafParent2_device, numberOfEntities*sizeof(int))); cuCall(cudaHostAlloc((void**)&side_host, numberOfEntities*sizeof(char), cudaHostAllocPortable)); cuCall(cudaMalloc((void**)&side_device, numberOfEntities*sizeof(char))); cuCall(cudaMalloc((void**)&side2_device, numberOfEntities*sizeof(char))); for(int i=0 ; i<numberOfTrees ; ++i){ std::cout<<"The tree with number: "<<i+1<<" is building\n"; //Inicjalizacja std::vector<TreeNode> treeNodes; trees_host[i] = treeNodes; std::vector<TreeNodeLeaf> treeNodesLeafs; treesLeafs_host[i] = treeNodesLeafs; TreeNode treeNode = {EMPTY_DIRECTION, EMPTY_DIRECTION, EMPTY_DIRECTION, numberOfEntities}; trees_host[i].push_back(treeNode); cuCall(cudaMemcpyAsync((void*)treeNodeDevice, (void*)trees_host[i].data(), sizeof(TreeNode), cudaMemcpyHostToDevice, executionStreams)); makePartitionOfLeaf0<<<1, 1, 0, executionStreams>>>(treeNodeSizeDevice); //Inicjalizacja tablic int pointIdx1 = 0; int pointIdx2 = 0; int count = numberOfEntities; pointIdx1 = int(((double)rand()/(RAND_MAX))*INT_MAX) % count; pointIdx2 = int(((double)rand()/(RAND_MAX))*INT_MAX) % count; if(pointIdx1 == pointIdx2){ pointIdx2 = (pointIdx1+1)%count; } for(int k=0 ; k<numberOfEntities ; ++k){ elemsPerLeafInCurrentLevel_host[k] = k; numberOfElemsPerLeafInCurrentLevel_host[k] = numberOfEntities; biasOfElemsPerLeafInCurrentLevel_host[k] = 0; points1_host[k] = pointIdx1; points2_host[k] = pointIdx2; idOfLeafParent_host[k] = 0; side_host[k] = SIDE_LEFT; } //Przeslanie na GPU odpowiednich tablic cuCall(cudaMemcpyAsync((void*)elemsPerLeafInCurrentLevel_device, (void*)elemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), cudaMemcpyHostToDevice, executionStreams)); cuCall(cudaMemcpyAsync((void*)numberOfElemsPerLeafInCurrentLevel_device, (void*)numberOfElemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), cudaMemcpyHostToDevice, executionStreams)); cuCall(cudaMemcpyAsync((void*)biasOfElemsPerLeafInCurrentLevel_device, (void*)biasOfElemsPerLeafInCurrentLevel_host, numberOfEntities*sizeof(int), cudaMemcpyHostToDevice, executionStreams)); cuCall(cudaMemcpyAsync((void*)points1_device, (void*)points1_host, numberOfEntities*sizeof(int), cudaMemcpyHostToDevice, executionStreams)); cuCall(cudaMemcpyAsync((void*)points2_device, (void*)points2_host, numberOfEntities*sizeof(int), cudaMemcpyHostToDevice, executionStreams)); cuCall(cudaMemcpyAsync((void*)idOfLeafParent_device, (void*)idOfLeafParent_host, numberOfEntities*sizeof(int), cudaMemcpyHostToDevice, executionStreams)); cuCall(cudaMemcpyAsync((void*)side_device, (void*)side_host, numberOfEntities*sizeof(char), cudaMemcpyHostToDevice, executionStreams)); //Dzielenie galezi bool treeeIsGoingToBeEdit = true; while(treeeIsGoingToBeEdit == true){ treeeIsGoingToBeEdit = false; //Przeliczenie dim3 grid(ceil(float(numberOfEntities)/256.0), 1); dim3 block(256, 1); makePartitionOfLeaf1<<<grid, block, 0, executionStreams>>>(dataTable_device, dataTable_Pitch, dimensionOfEntity, numberOfEntities, elemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel_device, points1_device, points2_device, side_device, minSize); makePartitionOfLeaf2<<<grid, block, 0, executionStreams>>>(dataTable_device, dataTable_Pitch, dimensionOfEntity, numberOfEntities, elemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel_device, points1_device, points2_device, side_device, minSize); //czasowo wszystko czyscimy cuCall(cudaMemsetAsync((void*)elemsPerLeafInCurrentLevel2_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(cudaMemsetAsync((void*)numberOfElemsPerLeafInCurrentLevel2_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(cudaMemsetAsync((void*)biasOfElemsPerLeafInCurrentLevel2_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(cudaMemsetAsync((void*)points12_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(cudaMemsetAsync((void*)points22_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(cudaMemsetAsync((void*)idOfLeafParent2_device, 0, numberOfEntities*sizeof(int), executionStreams)); cuCall(cudaMemsetAsync((void*)side2_device, 0, numberOfEntities*sizeof(char), executionStreams)); cuCall(cudaMemsetAsync((void*)thereWasDividing, 0, sizeof(int), executionStreams)); makePartitionOfLeaf3<<<grid, block, 0, executionStreams>>>(numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize); makePartitionOfLeaf4<<<grid, block, 0, executionStreams>>>(numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize); makePartitionOfLeaf5<<<grid, block, 0, executionStreams>>>(numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize); makePartitionOfLeaf6<<<grid, block, 0, executionStreams>>>(numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize); makePartitionOfLeaf7<<<grid, block, 0, executionStreams>>>(numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize, rand(), rand(), thereWasDividing); makePartitionOfLeaf8<<<grid, block, 0, executionStreams>>>(numberOfEntities, elemsPerLeafInCurrentLevel_device, elemsPerLeafInCurrentLevel2_device, numberOfElemsPerLeafInCurrentLevel_device, numberOfElemsPerLeafInCurrentLevel2_device, biasOfElemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel2_device, points1_device, points12_device, points2_device, points22_device, idOfLeafParent_device, idOfLeafParent2_device, side_device, side2_device, treeNodeDevice, treeNodeSizeDevice, minSize); int thereWasDividingHost; cuCall(cudaMemcpyAsync((void*)&thereWasDividingHost, (void*)thereWasDividing, sizeof(int), cudaMemcpyDeviceToHost, executionStreams)); cuCall(cudaStreamSynchronize(executionStreams)); int* tmp1 = elemsPerLeafInCurrentLevel_device; elemsPerLeafInCurrentLevel_device = elemsPerLeafInCurrentLevel2_device; elemsPerLeafInCurrentLevel2_device = tmp1; int* tmp2 = numberOfElemsPerLeafInCurrentLevel_device; numberOfElemsPerLeafInCurrentLevel_device = numberOfElemsPerLeafInCurrentLevel2_device; numberOfElemsPerLeafInCurrentLevel2_device = tmp2; int* tmp3 = biasOfElemsPerLeafInCurrentLevel_device; biasOfElemsPerLeafInCurrentLevel_device = biasOfElemsPerLeafInCurrentLevel2_device; biasOfElemsPerLeafInCurrentLevel2_device = tmp3; int* tmp4 = points1_device; points1_device = points12_device; points12_device = tmp4; int* tmp5 = points2_device; points2_device = points22_device; points22_device = tmp5; int* tmp6 = idOfLeafParent_device; idOfLeafParent_device = idOfLeafParent2_device; idOfLeafParent2_device = tmp6; char* tmp7 = side_device; side_device = side2_device; side2_device = tmp7; if(thereWasDividingHost != 0){ treeeIsGoingToBeEdit = true; } } //Utworzenie koncowych lisci z wlasciwymi elementami dim3 grid(ceil(float(numberOfEntities)/256.0), 1); dim3 block(256, 1); makePartitionOfLeaf9<<<grid, block, 0, executionStreams>>>(numberOfEntities, elemsPerLeafInCurrentLevel_device, biasOfElemsPerLeafInCurrentLevel_device, idOfLeafParent_device, treeNodesLeafsDevice, treeNodeDevice); int treeNodeSizeHost; cuCall(cudaMemcpyAsync((void*)&treeNodeSizeHost, (void*)treeNodeSizeDevice, sizeof(int), cudaMemcpyDeviceToHost, executionStreams)); cuCall(cudaStreamSynchronize(executionStreams)); trees_host[i].resize(treeNodeSizeHost); treesLeafs_host[i].resize(numberOfEntities); cuCall(cudaMemcpyAsync((void*)trees_host[i].data(), (void*)treeNodeDevice, treeNodeSizeHost*sizeof(TreeNode), cudaMemcpyDeviceToHost, executionStreams)); cuCall(cudaMemcpyAsync((void*)treesLeafs_host[i].data(), (void*)treeNodesLeafsDevice, numberOfEntities*sizeof(TreeNodeLeaf), cudaMemcpyDeviceToHost, executionStreams)); cuCall(cudaStreamSynchronize(executionStreams)); std::cout<<"The tree with number: "<<i+1<<" has been built\n"; } cuCall(cudaFree((void*)treeNodeDevice)); cuCall(cudaFree((void*)treeNodesLeafsDevice)); cuCall(cudaFree((void*)treeNodeSizeDevice)); cuCall(cudaFree((void*)thereWasDividing)); cuCall(cudaFreeHost((void*)elemsPerLeafInCurrentLevel_host)); cuCall(cudaFree((void*)elemsPerLeafInCurrentLevel_device)); cuCall(cudaFree((void*)elemsPerLeafInCurrentLevel2_device)); cuCall(cudaFreeHost((void*)numberOfElemsPerLeafInCurrentLevel_host)); cuCall(cudaFree((void*)numberOfElemsPerLeafInCurrentLevel_device)); cuCall(cudaFree((void*)numberOfElemsPerLeafInCurrentLevel2_device)); cuCall(cudaFreeHost((void*)biasOfElemsPerLeafInCurrentLevel_host)); cuCall(cudaFree((void*)biasOfElemsPerLeafInCurrentLevel_device)); cuCall(cudaFree((void*)biasOfElemsPerLeafInCurrentLevel2_device)); cuCall(cudaFreeHost((void*)points1_host)); cuCall(cudaFree((void*)points1_device)); cuCall(cudaFree((void*)points12_device)); cuCall(cudaFreeHost((void*)points2_host)); cuCall(cudaFree((void*)points2_device)); cuCall(cudaFree((void*)points22_device)); cuCall(cudaFreeHost((void*)idOfLeafParent_host)); cuCall(cudaFree((void*)idOfLeafParent_device)); cuCall(cudaFree((void*)idOfLeafParent2_device)); cuCall(cudaFreeHost((void*)side_host)); cuCall(cudaFree((void*)side_device)); cuCall(cudaFree((void*)side2_device)); } EuclideanDistanceMatrixGPU::EuclideanDistanceMatrixGPU(){ typeOfDistance = DISTANCE_EUCLIDEAN; this->numberOfBlocksPerMultiprocessors = 10; this->numberOfMultiprocessors = 1; this->debugMode = false; this->minSize = 1; } EuclideanDistanceMatrixGPU::EuclideanDistanceMatrixGPU(bool debugMode){ typeOfDistance = DISTANCE_EUCLIDEAN; this->numberOfBlocksPerMultiprocessors = 10; this->numberOfMultiprocessors = 1; this->debugMode = debugMode; this->minSize = 1; } EuclideanDistanceMatrixGPU::~EuclideanDistanceMatrixGPU(){ } void EuclideanDistanceMatrixGPU::setDataFile(std::string nameOfFile){ this->inputFile = nameOfFile; } bool EuclideanDistanceMatrixGPU::loadData(){ std::ifstream myfile; myfile.open(this->inputFile.c_str()); if (myfile.is_open()){ std::cout<<"The datafile has been opened\n"; }else{ std::cout<<"Error opening the file\n"; return true; } std::string line; std::getline(myfile, line); std::getline(myfile, line); int idOfEntity = 0; char* lineChar; while ((std::getline(myfile, line))&&(idOfEntity<numberOfEntities)){ std::vector<std::string> cuttedString; lineChar = new char[line.length() + 1]; std::strcpy(lineChar, line.c_str()); std::string str; char* pch = strtok(lineChar,","); while (pch != NULL){ str = std::string(pch); str = trim(str); cuttedString.push_back(str); pch = strtok (NULL, ","); } delete [] lineChar; if(klaster){ for(int i=0 ; i<cuttedString.size()-1 ; ++i){ this->dataTable_host[idOfEntity+numberOfEntities*i] = atof(cuttedString[i].c_str()); } this->dataTableId_host[idOfEntity] = atoi(cuttedString[cuttedString.size()-1].c_str()); }else{ for(int i=0 ; i<cuttedString.size() ; ++i){ this->dataTable_host[idOfEntity+numberOfEntities*i] = atof(cuttedString[i].c_str()); } } idOfEntity++; } return false; } bool EuclideanDistanceMatrixGPU::initialize(int numberOfEntities, int dimensionOfEntity, int numberOfNeighbors, int device, int typeOfDistance, bool klaster, int numberOfTrees, int numberOfPropagations, int minSize){ this->typeOfDistance = typeOfDistance; this->klaster = klaster; this->numberOfEntities = numberOfEntities; this->numberOfNeighbors = numberOfNeighbors; this->dimensionOfEntity = dimensionOfEntity; this->numberOfTrees = numberOfTrees; this->numberOfPropagations = numberOfPropagations; this->dimensionOfIndexesAndDistances = min(numberOfNeighbors*numberOfNeighbors+numberOfNeighbors, numberOfEntities); this->minSize = minSize; this->device = device; bool error = false; error |= cuCall(cudaSetDevice(device)); error |= cuCall(cudaDeviceReset()); cudaDeviceProp devProp; error |= cuCall(cudaGetDeviceProperties(&devProp, device)); this->numberOfMultiprocessors = devProp.multiProcessorCount; error |= cuCall(cudaHostAlloc((void**)&dataTable_host, numberOfEntities*dimensionOfEntity*sizeof(float), cudaHostAllocPortable)); error |= cuCall(cudaHostAlloc((void**)&dataTableId_host, numberOfEntities*sizeof(int), cudaHostAllocPortable)); error |= cuCall(cudaMallocPitch((void**)&dataTable_device, &dataTable_Pitch, numberOfEntities*sizeof(float), dimensionOfEntity)); error |= cuCall(cudaMallocHost((void**)&neighboursDistance_host, numberOfNeighbors*numberOfEntities*sizeof(float))); error |= cuCall(cudaMalloc((void**)&neighboursDistance_device, numberOfNeighbors*numberOfEntities*sizeof(float))); error |= cuCall(cudaMalloc((void**)&neighboursDistance2_device, numberOfNeighbors*numberOfEntities*sizeof(float))); error |= cuCall(cudaMallocHost((void**)&neighboursId_host, numberOfNeighbors*numberOfEntities*sizeof(int))); error |= cuCall(cudaMalloc((void**)&neighboursId_device, numberOfNeighbors*numberOfEntities*sizeof(int))); error |= cuCall(cudaMalloc((void**)&neighboursId2_device, numberOfNeighbors*numberOfEntities*sizeof(int))); error |= cuCall(cudaStreamCreate(&executionStreams)); error |= cuCall(cudaEventCreate(&startEvents)); error |= cuCall(cudaEventCreate(&stopEvents)); error |= cuCall(cudaMalloc((void**)&idxChecking_device, numberOfMultiprocessors*numberOfBlocksPerMultiprocessors*numberOfEntities*sizeof(char))); error |= cuCall(cudaMalloc((void**)&indexes_device, numberOfBlocksPerMultiprocessors*numberOfMultiprocessors*dimensionOfIndexesAndDistances*sizeof(int))); error |= cuCall(cudaMalloc((void**)&distances_device, numberOfBlocksPerMultiprocessors*numberOfMultiprocessors*dimensionOfIndexesAndDistances*sizeof(float))); error |= cuCall(cudaMalloc((void**)&distances2_device, numberOfBlocksPerMultiprocessors*numberOfMultiprocessors*dimensionOfIndexesAndDistances*sizeof(float))); error |= cuCall(cudaMalloc((void**)&distances3_device, numberOfBlocksPerMultiprocessors*numberOfMultiprocessors*dimensionOfIndexesAndDistances*sizeof(float))); error |= cuCall(cudaMalloc((void**)&marker_device, numberOfBlocksPerMultiprocessors*numberOfMultiprocessors*dimensionOfIndexesAndDistances*sizeof(short))); error |= loadData(); //send data to GPU error |= cuCall(cudaMemcpy2D((void*)dataTable_device, dataTable_Pitch, (void*)dataTable_host, numberOfEntities*sizeof(float), numberOfEntities*sizeof(float), dimensionOfEntity, cudaMemcpyHostToDevice)); error |= cuCall(cudaMalloc((void**)&graphTraversalStartPoint_device, numberOfTrees*numberOfEntities*sizeof(int))); Partition p = {0, numberOfEntities}; partition = p; return error; } bool EuclideanDistanceMatrixGPU::deinitialize(){ bool error = false; error |= cuCall(cudaSetDevice(device)); error |= cuCall(cudaDeviceSynchronize()); error |= cuCall(cudaFreeHost((void*)dataTable_host)); error |= cuCall(cudaFreeHost((void*)dataTableId_host)); error |= cuCall(cudaFree((void*)dataTable_device)); error |= cuCall(cudaFreeHost((void*)neighboursDistance_host)); error |= cuCall(cudaFree((void*)neighboursDistance_device)); error |= cuCall(cudaFree((void*)neighboursDistance2_device)); error |= cuCall(cudaFreeHost((void*)neighboursId_host)); error |= cuCall(cudaFree((void*)neighboursId_device)); error |= cuCall(cudaFree((void*)neighboursId2_device)); error |= cuCall(cudaStreamDestroy(executionStreams)); error |= cuCall(cudaEventDestroy(startEvents)); error |= cuCall(cudaEventDestroy(stopEvents)); error |= cuCall(cudaFree((void*)idxChecking_device)); error |= cuCall(cudaFree((void*)indexes_device)); error |= cuCall(cudaFree((void*)distances_device)); error |= cuCall(cudaFree((void*)distances2_device)); error |= cuCall(cudaFree((void*)distances3_device)); error |= cuCall(cudaFree((void*)marker_device)); error |= cuCall(cudaFree((void*)graphTraversalStartPoint_device)); error |= cuCall(cudaDeviceReset()); return error; } bool EuclideanDistanceMatrixGPU::calculate(){ bool error = false; std::cout<<"The device "<<device<<" is calculating the neighbours for: "<<partition.start<<" - "<<partition.end-1<<"\n"; error |= cuCall(cudaSetDevice(device)); error |= cuCall(cudaEventRecord(startEvents, executionStreams)); dim3 grid1(ceil(float(dimensionOfEntity)/256.0), 1); dim3 block1(256, 1); normalizeDataStep1<<<grid1, block1, 0, executionStreams>>>(dataTable_device, dataTable_Pitch, numberOfEntities, dimensionOfEntity); dim3 grid2(1, 1); dim3 block2(256, 1); normalizeDataStep2<<<grid2, block2, 0, executionStreams>>>(dataTable_device, dataTable_Pitch, numberOfEntities, dimensionOfEntity); buildUpTheTrees(); initilizeGPUStructuresForTrees(); stubInitialization(); findInitialKNN(); propagate(); deinitializeGPUStructuresForTrees(); error |= cuCall(cudaEventRecord(stopEvents, executionStreams)); error |= cuCall(cudaEventSynchronize(stopEvents)); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, startEvents, stopEvents); std::cout<<"The device "<<device<<": has done task in: "<<milliseconds<<" ms\n"; return error; } void EuclideanDistanceMatrixGPU::setResultsFile(std::string nameOfFile){ this->outputFile = nameOfFile; } template <typename T> std::string tostr(const T& t) { std::ostringstream os; os<<t; return os.str(); } bool EuclideanDistanceMatrixGPU::saveResultToResultFile(){ bool error = false; error |= cuCall(cudaSetDevice(device)); error |= cuCall(cudaMemcpyAsync((void*)neighboursDistance_host, (void*)neighboursDistance_device, numberOfNeighbors*numberOfEntities*sizeof(float), cudaMemcpyDeviceToHost, executionStreams)); error |= cuCall(cudaMemcpyAsync((void*)neighboursId_host, (void*)neighboursId_device, numberOfNeighbors*numberOfEntities*sizeof(int), cudaMemcpyDeviceToHost, executionStreams)); error |= cuCall(cudaStreamSynchronize(executionStreams)); //Zapisanie rezultatu do pliku std::ofstream ofs; ofs.open(outputFile.c_str(), std::ofstream::trunc | std::ofstream::binary); std::ofstream ofsDebug; if(debugMode){ ofsDebug.open((outputFile+"DEBUG").c_str(), std::ofstream::trunc | std::ofstream::binary); } bool validationSuccess = true; std::ofstream ofsValidation; ofsValidation.open((outputFile+"VALIDATION").c_str(), std::ofstream::trunc | std::ofstream::binary); if(ofs.is_open()){ ofs<<numberOfEntities<<";"<< numberOfNeighbors<<";"<<sizeof(long)<<"\n"; long l = 0x01020304; ofs.write((char*)&l, sizeof(long)); //zapisywanie punktow for(int lp=partition.start ; lp<partition.end ; ++lp){ std::vector<DataPoint> liczbyNear; for(int c=0 ; c<numberOfNeighbors ; ++c){ DataPoint dp = {neighboursId_host[lp*numberOfNeighbors+c], neighboursDistance_host[lp*numberOfNeighbors+c]}; liczbyNear.push_back(dp); } if(typeOfDistance == DISTANCE_COSINE){ std::sort(liczbyNear.begin(), liczbyNear.end(), compareByLengthMinCosine); }else{ std::sort(liczbyNear.begin(), liczbyNear.end(), compareByLengthMin); } for(std::vector<DataPoint>::iterator it = liczbyNear.begin() ; it != liczbyNear.end() ; ++it){ DataPoint f = *it; ofs.write((char*)&f.id, sizeof(long)); if((debugMode)&&(ofsDebug.is_open())){ ofsDebug<<"NEAR: <"<<lp<<", "<<f.id<<">("<<f.distance<<") "; } } /* for(std::vector<DataPoint>::iterator it = liczbyNear.begin() ; it != liczbyNear.end() ; ++it){ long tmp = -1; ofs.write((char*)&tmp, sizeof(long)); if((debugMode)&&(ofsDebug.is_open())){ ofsDebug<<"FAR: <"<<lp<<", "<<tmp<<">("<<FLT_MAX<<") "; } } */ if((debugMode)&&(ofsDebug.is_open())){ ofsDebug<<";\n"; } } ofs.close(); if((debugMode)&&(ofsDebug.is_open())){ ofsDebug.close(); } if(ofsValidation.is_open()){ if(validationSuccess){ ofsValidation<<"Everything is OK."; } ofsValidation.close(); } }else{ std::cout <<"Can not open the file for saving result.\n"; error |= true; } return error; }
2880915b4cc7d3382f077c4939113fcf951473e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> #include <IL/il.h> #include <IL/ilu.h> __global__ void MinMax(unsigned char* edgemap, int width, int* mingrad, int* maxgrad) { __shared__ int blockmin; __shared__ int blockmax; if(threadIdx.x == 0 && threadIdx.y == 0){ blockmin = 50000; blockmax = -1; } __syncthreads(); int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; atomicMax(&blockmax, edgemap[i * width + j]); atomicMin(&blockmin, edgemap[i * width + j]); __syncthreads(); if(threadIdx.x == 0 && threadIdx.y == 0){ atomicMax(maxgrad, blockmax); atomicMin(mingrad, blockmin); } } __global__ void edgeMap(unsigned char* edgemap, unsigned char* bitmap, int width, int height){ int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; /* Prewitt operators' values used as it is, as storing it in an array implies use of local memory, which is slow to access. */ int tl, tm, tr, ml, mr, bl, bm, br; unsigned char val; int grad_x, grad_y; if (!i || !j || i == (height-1) || j == (width-1)) edgemap[width*i + j] = 0; else { tl = width*(i-1) + (j-1), tm = width*i+(j-1), tr = width*(i+1) + (j-1); ml = width*(i-1) + j, mr = width*(i+1) + j; bl = width*(i-1) + (j+1), bm = width*i+(j+1), br = width*(i+1) + (j+1); grad_x = (-1*(int)bitmap[tl]) + (1*(int)bitmap[tr]) + (-1*(int)bitmap[ml]) + (1*(int)bitmap[mr]) + (-1*(int)bitmap[bl]) + (1*(int)bitmap[br]); grad_y = (1*(int)bitmap[tl]) + (1*(int)bitmap[tm]) + (1*(int)bitmap[tr]) + (-1*(int)bitmap[bl]) + (-1*(int)bitmap[bm]) + (-1*(int)bitmap[br]); val = (int)ceil(sqrt((float)((grad_x*grad_x) + (grad_y*grad_y)))); edgemap[width*i+j] = val; } } __global__ void normalizeEdgemap(unsigned char* edgemap, int maxgrad, int mingrad, int width, int height){ int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; float pixval; pixval = (float)(edgemap[width*i + j] - mingrad)/(float)(maxgrad - mingrad); edgemap[width*i + j] = (unsigned char)ceil(pixval*256.0f); } void saveImage(const char* filename, int width, int height, unsigned char * bitmap) { ILuint imageID = ilGenImage(); ilBindImage(imageID); ilTexImage(width, height, 0, 1,IL_LUMINANCE, IL_UNSIGNED_BYTE, bitmap); iluFlipImage(); ilEnable(IL_FILE_OVERWRITE); ilSave(IL_PNG, filename); fprintf(stderr, "Image saved as: %s\n", filename); } ILuint loadImage(const char *filename, unsigned char ** bitmap, int &width, int &height) { ILuint imageID = ilGenImage(); ilBindImage(imageID); ILboolean success = ilLoadImage(filename); if (!success) return 0; width = ilGetInteger(IL_IMAGE_WIDTH); height = ilGetInteger(IL_IMAGE_HEIGHT); printf("Width: %d\t Height: %d\n", width, height); *bitmap = ilGetData(); return imageID; } int main() { int width, height; unsigned char *image, *edgemap; int *min_grad, *max_grad; unsigned char *cuda_img, *cuda_edgemap; int *cuda_mingrad, *cuda_maxgrad; ilInit(); ILuint image_id = loadImage("./images/wall256.png", &image, width, height); edgemap = (unsigned char*)malloc(width * height); min_grad = (int*)malloc(sizeof(int)); max_grad = (int*)malloc(sizeof(int)); min_grad[0] = 50000; max_grad[0] = -1; if(image_id == 0) {fprintf(stderr, "Error while reading image... aborting.\n"); exit(0);} hipMalloc((void**) &cuda_img, width * height); hipMalloc((void**) &cuda_edgemap, width * height); hipMalloc((void**) &cuda_mingrad, sizeof(int)); hipMalloc((void**) &cuda_maxgrad, sizeof(int)); hipMemcpy(cuda_img, image, width * height, hipMemcpyHostToDevice); hipMemset(cuda_edgemap, 0, width * height); hipMemcpy(cuda_maxgrad, max_grad, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(cuda_mingrad, min_grad, sizeof(int), hipMemcpyHostToDevice); int block_dim = 32; dim3 threadsPerBlock(block_dim, block_dim); dim3 numBlocks(width/block_dim, height/block_dim); // Compute edgemap hipLaunchKernelGGL(( edgeMap), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cuda_edgemap, cuda_img, width, height); // Find min and max pixel values over the edgemap hipLaunchKernelGGL(( MinMax), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cuda_edgemap, width, cuda_mingrad, cuda_maxgrad); hipMemcpy(min_grad, cuda_mingrad, sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(max_grad, cuda_maxgrad, sizeof(int), hipMemcpyDeviceToHost); // Normalize edgemap image using overall maximum and minimum hipLaunchKernelGGL(( normalizeEdgemap), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, cuda_edgemap, max_grad[0], min_grad[0], width, height); hipMemcpy(edgemap, cuda_edgemap, width * height, hipMemcpyDeviceToHost); saveImage("./ohho.png", width, height, edgemap); hipFree(cuda_img); hipFree(cuda_edgemap); hipFree(cuda_mingrad); hipFree(cuda_maxgrad); free(edgemap); free(max_grad); free(min_grad); ilBindImage(0); ilDeleteImage(image_id); return 0; }
2880915b4cc7d3382f077c4939113fcf951473e5.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <IL/il.h> #include <IL/ilu.h> __global__ void MinMax(unsigned char* edgemap, int width, int* mingrad, int* maxgrad) { __shared__ int blockmin; __shared__ int blockmax; if(threadIdx.x == 0 && threadIdx.y == 0){ blockmin = 50000; blockmax = -1; } __syncthreads(); int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; atomicMax(&blockmax, edgemap[i * width + j]); atomicMin(&blockmin, edgemap[i * width + j]); __syncthreads(); if(threadIdx.x == 0 && threadIdx.y == 0){ atomicMax(maxgrad, blockmax); atomicMin(mingrad, blockmin); } } __global__ void edgeMap(unsigned char* edgemap, unsigned char* bitmap, int width, int height){ int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; /* Prewitt operators' values used as it is, as storing it in an array implies use of local memory, which is slow to access. */ int tl, tm, tr, ml, mr, bl, bm, br; unsigned char val; int grad_x, grad_y; if (!i || !j || i == (height-1) || j == (width-1)) edgemap[width*i + j] = 0; else { tl = width*(i-1) + (j-1), tm = width*i+(j-1), tr = width*(i+1) + (j-1); ml = width*(i-1) + j, mr = width*(i+1) + j; bl = width*(i-1) + (j+1), bm = width*i+(j+1), br = width*(i+1) + (j+1); grad_x = (-1*(int)bitmap[tl]) + (1*(int)bitmap[tr]) + (-1*(int)bitmap[ml]) + (1*(int)bitmap[mr]) + (-1*(int)bitmap[bl]) + (1*(int)bitmap[br]); grad_y = (1*(int)bitmap[tl]) + (1*(int)bitmap[tm]) + (1*(int)bitmap[tr]) + (-1*(int)bitmap[bl]) + (-1*(int)bitmap[bm]) + (-1*(int)bitmap[br]); val = (int)ceil(sqrt((float)((grad_x*grad_x) + (grad_y*grad_y)))); edgemap[width*i+j] = val; } } __global__ void normalizeEdgemap(unsigned char* edgemap, int maxgrad, int mingrad, int width, int height){ int i = (blockIdx.x * blockDim.x) + threadIdx.x; int j = (blockIdx.y * blockDim.y) + threadIdx.y; float pixval; pixval = (float)(edgemap[width*i + j] - mingrad)/(float)(maxgrad - mingrad); edgemap[width*i + j] = (unsigned char)ceil(pixval*256.0f); } void saveImage(const char* filename, int width, int height, unsigned char * bitmap) { ILuint imageID = ilGenImage(); ilBindImage(imageID); ilTexImage(width, height, 0, 1,IL_LUMINANCE, IL_UNSIGNED_BYTE, bitmap); iluFlipImage(); ilEnable(IL_FILE_OVERWRITE); ilSave(IL_PNG, filename); fprintf(stderr, "Image saved as: %s\n", filename); } ILuint loadImage(const char *filename, unsigned char ** bitmap, int &width, int &height) { ILuint imageID = ilGenImage(); ilBindImage(imageID); ILboolean success = ilLoadImage(filename); if (!success) return 0; width = ilGetInteger(IL_IMAGE_WIDTH); height = ilGetInteger(IL_IMAGE_HEIGHT); printf("Width: %d\t Height: %d\n", width, height); *bitmap = ilGetData(); return imageID; } int main() { int width, height; unsigned char *image, *edgemap; int *min_grad, *max_grad; unsigned char *cuda_img, *cuda_edgemap; int *cuda_mingrad, *cuda_maxgrad; ilInit(); ILuint image_id = loadImage("./images/wall256.png", &image, width, height); edgemap = (unsigned char*)malloc(width * height); min_grad = (int*)malloc(sizeof(int)); max_grad = (int*)malloc(sizeof(int)); min_grad[0] = 50000; max_grad[0] = -1; if(image_id == 0) {fprintf(stderr, "Error while reading image... aborting.\n"); exit(0);} cudaMalloc((void**) &cuda_img, width * height); cudaMalloc((void**) &cuda_edgemap, width * height); cudaMalloc((void**) &cuda_mingrad, sizeof(int)); cudaMalloc((void**) &cuda_maxgrad, sizeof(int)); cudaMemcpy(cuda_img, image, width * height, cudaMemcpyHostToDevice); cudaMemset(cuda_edgemap, 0, width * height); cudaMemcpy(cuda_maxgrad, max_grad, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(cuda_mingrad, min_grad, sizeof(int), cudaMemcpyHostToDevice); int block_dim = 32; dim3 threadsPerBlock(block_dim, block_dim); dim3 numBlocks(width/block_dim, height/block_dim); // Compute edgemap edgeMap<<<numBlocks, threadsPerBlock>>>(cuda_edgemap, cuda_img, width, height); // Find min and max pixel values over the edgemap MinMax<<<numBlocks, threadsPerBlock>>>(cuda_edgemap, width, cuda_mingrad, cuda_maxgrad); cudaMemcpy(min_grad, cuda_mingrad, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(max_grad, cuda_maxgrad, sizeof(int), cudaMemcpyDeviceToHost); // Normalize edgemap image using overall maximum and minimum normalizeEdgemap<<<numBlocks, threadsPerBlock>>>(cuda_edgemap, max_grad[0], min_grad[0], width, height); cudaMemcpy(edgemap, cuda_edgemap, width * height, cudaMemcpyDeviceToHost); saveImage("./ohho.png", width, height, edgemap); cudaFree(cuda_img); cudaFree(cuda_edgemap); cudaFree(cuda_mingrad); cudaFree(cuda_maxgrad); free(edgemap); free(max_grad); free(min_grad); ilBindImage(0); ilDeleteImage(image_id); return 0; }
4f6dcc9aea5fa2117c88d114f829611de7375787.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utils.h> #include <algorithm> #include <common/cumlHandle.hpp> #include <common/device_buffer.hpp> #include <fstream> #include <iostream> #include <map> #include <numeric> #include "random/rng.h" #include "randomforest.h" namespace ML { /** * @brief Construct RF_metrics. * @param[in] cfg_accuracy: accuracy. */ RF_metrics::RF_metrics(float cfg_accuracy) : accuracy(cfg_accuracy){}; /** * @brief Print accuracy metric. */ void RF_metrics::print() { std::cout << "Accuracy: " << accuracy << std::endl; } /** * @brief Update labels so they are unique from 0 to n_unique_labels values. Create/update an old label to new label map per random forest. * @param[in] n_rows: number of rows (labels) * @param[in,out] labels: 1D labels array to be changed in-place. * @param[in,out] labels_map: map of old label values to new ones. * @param[in] verbose: debugging flag. */ void preprocess_labels(int n_rows, std::vector<int>& labels, std::map<int, int>& labels_map, bool verbose) { std::pair<std::map<int, int>::iterator, bool> ret; int n_unique_labels = 0; if (verbose) std::cout << "Preprocessing labels\n"; for (int i = 0; i < n_rows; i++) { ret = labels_map.insert(std::pair<int, int>(labels[i], n_unique_labels)); if (ret.second) { n_unique_labels += 1; } if (verbose) std::cout << "Mapping " << labels[i] << " to "; labels[i] = ret.first->second; //Update labels **IN-PLACE** if (verbose) std::cout << labels[i] << std::endl; } if (verbose) std::cout << "Finished preprocessing labels\n"; } /** * @brief Revert label preprocessing effect, if needed. * @param[in] n_rows: number of rows (labels) * @param[in,out] labels: 1D labels array to be changed in-place. * @param[in] labels_map: map of old to new label values used during preprocessing. * @param[in] verbose: debugging flag. */ void postprocess_labels(int n_rows, std::vector<int>& labels, std::map<int, int>& labels_map, bool verbose) { if (verbose) std::cout << "Postrocessing labels\n"; std::map<int, int>::iterator it; int n_unique_cnt = labels_map.size(); std::vector<int> reverse_map; reverse_map.resize(n_unique_cnt); for (auto it = labels_map.begin(); it != labels_map.end(); it++) { reverse_map[it->second] = it->first; } for (int i = 0; i < n_rows; i++) { if (verbose) std::cout << "Mapping " << labels[i] << " back to " << reverse_map[labels[i]] << std::endl; labels[i] = reverse_map[labels[i]]; } if (verbose) std::cout << "Finished postrocessing labels\n"; } /** * @brief Random forest default constructor. */ RF_params::RF_params() : n_trees(1) {} /** * @brief Random forest hyper-parameter object constructor to set n_trees member. */ RF_params::RF_params(int cfg_n_trees) : n_trees(cfg_n_trees) {} /** * @brief Random forest hyper-parameter object constructor to set bootstrap, bootstrap_features, n_trees and rows_sample members. */ RF_params::RF_params(bool cfg_bootstrap, bool cfg_bootstrap_features, int cfg_n_trees, float cfg_rows_sample) : bootstrap(cfg_bootstrap), bootstrap_features(cfg_bootstrap_features), n_trees(cfg_n_trees), rows_sample(cfg_rows_sample) { tree_params.bootstrap_features = cfg_bootstrap_features; } /** * @brief Random forest hyper-parameter object constructor to set all RF_params members. */ RF_params::RF_params(bool cfg_bootstrap, bool cfg_bootstrap_features, int cfg_n_trees, float cfg_rows_sample, DecisionTree::DecisionTreeParams cfg_tree_params) : bootstrap(cfg_bootstrap), bootstrap_features(cfg_bootstrap_features), n_trees(cfg_n_trees), rows_sample(cfg_rows_sample), tree_params(cfg_tree_params) { tree_params.bootstrap_features = cfg_bootstrap_features; } /** * @brief Check validity of all random forest hyper-parameters. */ void RF_params::validity_check() const { ASSERT((n_trees > 0), "Invalid n_trees %d", n_trees); ASSERT((rows_sample > 0) && (rows_sample <= 1.0), "rows_sample value %f outside permitted (0, 1] range", rows_sample); tree_params.validity_check(); } /** * @brief Print all random forest hyper-parameters. */ void RF_params::print() const { std::cout << "bootstrap: " << bootstrap << std::endl; std::cout << "bootstrap features: " << bootstrap_features << std::endl; std::cout << "n_trees: " << n_trees << std::endl; std::cout << "rows_sample: " << rows_sample << std::endl; tree_params.print(); } /** * @brief Construct rf (random forest) object. * @tparam T: data type for input data (float or double). * @param[in] cfg_rf_params: Random forest hyper-parameter struct. * @param[in] cfg_rf_type: Random forest type. Only CLASSIFICATION is currently supported. */ template <typename T> rf<T>::rf(RF_params cfg_rf_params, int cfg_rf_type) : rf_params(cfg_rf_params), rf_type(cfg_rf_type), trees(nullptr) { rf_params.validity_check(); } /** * @brief Destructor for random forest object. * @tparam T: data type for input data (float or double). */ template <typename T> rf<T>::~rf() { delete[] trees; } /** * @brief Return number of trees in the forest. * @tparam T: data type for input data (float or double). */ template <typename T> int rf<T>::get_ntrees() { return rf_params.n_trees; } /** * @brief Print summary for all trees in the random forest. * @tparam T: data type for input data (float or double). */ template <typename T> void rf<T>::print_rf_summary() { if (!trees) { std::cout << "Empty forest" << std::endl; } else { std::cout << "Forest has " << rf_params.n_trees << " trees, max_depth " << rf_params.tree_params.max_depth; std::cout << ", and max_leaves " << rf_params.tree_params.max_leaves << std::endl; for (int i = 0; i < rf_params.n_trees; i++) { std::cout << "Tree #" << i << std::endl; trees[i].print_tree_summary(); } } } /** * @brief Print detailed view of all trees in the random forest. * @tparam T: data type for input data (float or double). */ template <typename T> void rf<T>::print_rf_detailed() { if (!trees) { std::cout << "Empty forest" << std::endl; } else { std::cout << "Forest has " << rf_params.n_trees << " trees, max_depth " << rf_params.tree_params.max_depth; std::cout << ", and max_leaves " << rf_params.tree_params.max_leaves << std::endl; for (int i = 0; i < rf_params.n_trees; i++) { std::cout << "Tree #" << i << std::endl; trees[i].print(); } } } /** * @brief Construct rfClassifier object. * @tparam T: data type for input data (float or double). * @param[in] cfg_rf_params: Random forest hyper-parameter struct. */ template <typename T> rfClassifier<T>::rfClassifier(RF_params cfg_rf_params) : rf<T>::rf(cfg_rf_params, RF_type::CLASSIFICATION){}; /** * @brief Build (i.e., fit, train) random forest classifier for input data. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ template <typename T> void rfClassifier<T>::fit(const cumlHandle& user_handle, T* input, int n_rows, int n_cols, int* labels, int n_unique_labels) { ASSERT(!this->trees, "Cannot fit an existing forest."); ASSERT((n_rows > 0), "Invalid n_rows %d", n_rows); ASSERT((n_cols > 0), "Invalid n_cols %d", n_cols); rfClassifier::trees = new DecisionTree::DecisionTreeClassifier<T>[this->rf_params.n_trees]; int n_sampled_rows = this->rf_params.rows_sample * n_rows; const cumlHandle_impl& handle = user_handle.getImpl(); hipStream_t stream = user_handle.getStream(); for (int i = 0; i < this->rf_params.n_trees; i++) { // Select n_sampled_rows (with replacement) numbers from [0, n_rows) per tree. // selected_rows: randomly generated IDs for bootstrapped samples (w/ replacement); a device ptr. MLCommon::device_buffer<unsigned int> selected_rows( handle.getDeviceAllocator(), stream, n_sampled_rows); if (this->rf_params.bootstrap) { MLCommon::Random::Rng r( i * 1000); // Ensure the seed for each tree is different and meaningful. r.uniformInt(selected_rows.data(), n_sampled_rows, (unsigned int)0, (unsigned int)n_rows, stream); } else { std::vector<unsigned int> h_selected_rows(n_rows); std::iota(h_selected_rows.begin(), h_selected_rows.end(), 0); std::random_shuffle(h_selected_rows.begin(), h_selected_rows.end()); h_selected_rows.resize(n_sampled_rows); MLCommon::updateDevice(selected_rows.data(), h_selected_rows.data(), n_sampled_rows, stream); } /* Build individual tree in the forest. - input is a pointer to orig data that have n_cols features and n_rows rows. - n_sampled_rows: # rows sampled for tree's bootstrap sample. - selected_rows: points to a list of row #s (w/ n_sampled_rows elements) used to build the bootstrapped sample. Expectation: Each tree node will contain (a) # n_sampled_rows and (b) a pointer to a list of row numbers w.r.t original data. */ this->trees[i].fit(user_handle, input, n_cols, n_rows, labels, selected_rows.data(), n_sampled_rows, n_unique_labels, this->rf_params.tree_params); //Cleanup selected_rows.release(stream); } } /** * @brief Predict target feature for input data; n-ary classification for single feature supported. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ template <typename T> void rfClassifier<T>::predict(const cumlHandle& user_handle, const T* input, int n_rows, int n_cols, int* predictions, bool verbose) const { ASSERT(this->trees, "Cannot predict! No trees in the forest."); ASSERT((n_rows > 0), "Invalid n_rows %d", n_rows); ASSERT((n_cols > 0), "Invalid n_cols %d", n_cols); ASSERT(predictions != nullptr, "Error! User has not allocated memory for predictions."); int row_size = n_cols; for (int row_id = 0; row_id < n_rows; row_id++) { if (verbose) { std::cout << "\n\n"; std::cout << "Predict for sample: "; for (int i = 0; i < n_cols; i++) std::cout << input[row_id * row_size + i] << ", "; std::cout << std::endl; } std::map<int, int> prediction_to_cnt; std::pair<std::map<int, int>::iterator, bool> ret; int max_cnt_so_far = 0; int majority_prediction = -1; for (int i = 0; i < this->rf_params.n_trees; i++) { //Return prediction for one sample. if (verbose) { std::cout << "Printing tree " << i << std::endl; //this->trees[i].print(); } int prediction; this->trees[i].predict(user_handle, &input[row_id * row_size], 1, n_cols, &prediction, verbose); ret = prediction_to_cnt.insert(std::pair<int, int>(prediction, 1)); if (!(ret.second)) { ret.first->second += 1; } if (max_cnt_so_far < ret.first->second) { max_cnt_so_far = ret.first->second; majority_prediction = ret.first->first; } } predictions[row_id] = majority_prediction; } } /** * @brief Predict target feature for input data and validate against ref_labels. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ template <typename T> RF_metrics rfClassifier<T>::score(const cumlHandle& user_handle, const T* input, const int* ref_labels, int n_rows, int n_cols, int* predictions, bool verbose) const { predict(user_handle, input, n_rows, n_cols, predictions, verbose); unsigned long long correctly_predicted = 0ULL; for (int i = 0; i < n_rows; i++) { correctly_predicted += (predictions[i] == ref_labels[i]); } float accuracy = correctly_predicted * 1.0f / n_rows; RF_metrics stats(accuracy); if (verbose) stats.print(); /* TODO: Potentially augment RF_metrics w/ more metrics (e.g., precision, F1, etc.). For non binary classification problems (i.e., one target and > 2 labels), need avg for each of these metrics */ return stats; } template class rf<float>; template class rf<double>; template class rfClassifier<float>; template class rfClassifier<double>; // Stateless API functions: fit, predict and score /** * @brief Build (i.e., fit, train) random forest classifier for input data of type float. * @param[in] user_handle: cumlHandle * @param[in,out] rf_classifier: pointer to the rfClassifier object, previously constructed by the user. * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl. in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ void fit(const cumlHandle& user_handle, rfClassifier<float>* rf_classifier, float* input, int n_rows, int n_cols, int* labels, int n_unique_labels) { rf_classifier->fit(user_handle, input, n_rows, n_cols, labels, n_unique_labels); } /** * @brief Build (i.e., fit, train) random forest classifier for input data of type double. * @param[in] user_handle: cumlHandle * @param[in,out] rf_classifier: pointer to the rfClassifier object, previously constructed by the user. * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl. in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ void fit(const cumlHandle& user_handle, rfClassifier<double>* rf_classifier, double* input, int n_rows, int n_cols, int* labels, int n_unique_labels) { rf_classifier->fit(user_handle, input, n_rows, n_cols, labels, n_unique_labels); } /** * @brief Predict target feature for input data of type float; n-ary classification for single feature supported. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ void predict(const cumlHandle& user_handle, const rfClassifier<float>* rf_classifier, const float* input, int n_rows, int n_cols, int* predictions, bool verbose) { rf_classifier->predict(user_handle, input, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type double; n-ary classification for single feature supported. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ void predict(const cumlHandle& user_handle, const rfClassifier<double>* rf_classifier, const double* input, int n_rows, int n_cols, int* predictions, bool verbose) { rf_classifier->predict(user_handle, input, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type float and validate against ref_labels. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ RF_metrics score(const cumlHandle& user_handle, const rfClassifier<float>* rf_classifier, const float* input, const int* ref_labels, int n_rows, int n_cols, int* predictions, bool verbose) { return rf_classifier->score(user_handle, input, ref_labels, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type double and validate against ref_labels. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ RF_metrics score(const cumlHandle& user_handle, const rfClassifier<double>* rf_classifier, const double* input, const int* ref_labels, int n_rows, int n_cols, int* predictions, bool verbose) { return rf_classifier->score(user_handle, input, ref_labels, n_rows, n_cols, predictions, verbose); } RF_params set_rf_class_obj(int max_depth, int max_leaves, float max_features, int n_bins, int split_algo, int min_rows_per_node, bool bootstrap_features, bool bootstrap, int n_trees, float rows_sample) { DecisionTree::DecisionTreeParams tree_params( max_depth, max_leaves, max_features, n_bins, split_algo, min_rows_per_node, bootstrap_features); RF_params rf_params(bootstrap, bootstrap_features, n_trees, rows_sample, tree_params); return rf_params; } }; // namespace ML // end namespace ML
4f6dcc9aea5fa2117c88d114f829611de7375787.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <utils.h> #include <algorithm> #include <common/cumlHandle.hpp> #include <common/device_buffer.hpp> #include <fstream> #include <iostream> #include <map> #include <numeric> #include "random/rng.h" #include "randomforest.h" namespace ML { /** * @brief Construct RF_metrics. * @param[in] cfg_accuracy: accuracy. */ RF_metrics::RF_metrics(float cfg_accuracy) : accuracy(cfg_accuracy){}; /** * @brief Print accuracy metric. */ void RF_metrics::print() { std::cout << "Accuracy: " << accuracy << std::endl; } /** * @brief Update labels so they are unique from 0 to n_unique_labels values. Create/update an old label to new label map per random forest. * @param[in] n_rows: number of rows (labels) * @param[in,out] labels: 1D labels array to be changed in-place. * @param[in,out] labels_map: map of old label values to new ones. * @param[in] verbose: debugging flag. */ void preprocess_labels(int n_rows, std::vector<int>& labels, std::map<int, int>& labels_map, bool verbose) { std::pair<std::map<int, int>::iterator, bool> ret; int n_unique_labels = 0; if (verbose) std::cout << "Preprocessing labels\n"; for (int i = 0; i < n_rows; i++) { ret = labels_map.insert(std::pair<int, int>(labels[i], n_unique_labels)); if (ret.second) { n_unique_labels += 1; } if (verbose) std::cout << "Mapping " << labels[i] << " to "; labels[i] = ret.first->second; //Update labels **IN-PLACE** if (verbose) std::cout << labels[i] << std::endl; } if (verbose) std::cout << "Finished preprocessing labels\n"; } /** * @brief Revert label preprocessing effect, if needed. * @param[in] n_rows: number of rows (labels) * @param[in,out] labels: 1D labels array to be changed in-place. * @param[in] labels_map: map of old to new label values used during preprocessing. * @param[in] verbose: debugging flag. */ void postprocess_labels(int n_rows, std::vector<int>& labels, std::map<int, int>& labels_map, bool verbose) { if (verbose) std::cout << "Postrocessing labels\n"; std::map<int, int>::iterator it; int n_unique_cnt = labels_map.size(); std::vector<int> reverse_map; reverse_map.resize(n_unique_cnt); for (auto it = labels_map.begin(); it != labels_map.end(); it++) { reverse_map[it->second] = it->first; } for (int i = 0; i < n_rows; i++) { if (verbose) std::cout << "Mapping " << labels[i] << " back to " << reverse_map[labels[i]] << std::endl; labels[i] = reverse_map[labels[i]]; } if (verbose) std::cout << "Finished postrocessing labels\n"; } /** * @brief Random forest default constructor. */ RF_params::RF_params() : n_trees(1) {} /** * @brief Random forest hyper-parameter object constructor to set n_trees member. */ RF_params::RF_params(int cfg_n_trees) : n_trees(cfg_n_trees) {} /** * @brief Random forest hyper-parameter object constructor to set bootstrap, bootstrap_features, n_trees and rows_sample members. */ RF_params::RF_params(bool cfg_bootstrap, bool cfg_bootstrap_features, int cfg_n_trees, float cfg_rows_sample) : bootstrap(cfg_bootstrap), bootstrap_features(cfg_bootstrap_features), n_trees(cfg_n_trees), rows_sample(cfg_rows_sample) { tree_params.bootstrap_features = cfg_bootstrap_features; } /** * @brief Random forest hyper-parameter object constructor to set all RF_params members. */ RF_params::RF_params(bool cfg_bootstrap, bool cfg_bootstrap_features, int cfg_n_trees, float cfg_rows_sample, DecisionTree::DecisionTreeParams cfg_tree_params) : bootstrap(cfg_bootstrap), bootstrap_features(cfg_bootstrap_features), n_trees(cfg_n_trees), rows_sample(cfg_rows_sample), tree_params(cfg_tree_params) { tree_params.bootstrap_features = cfg_bootstrap_features; } /** * @brief Check validity of all random forest hyper-parameters. */ void RF_params::validity_check() const { ASSERT((n_trees > 0), "Invalid n_trees %d", n_trees); ASSERT((rows_sample > 0) && (rows_sample <= 1.0), "rows_sample value %f outside permitted (0, 1] range", rows_sample); tree_params.validity_check(); } /** * @brief Print all random forest hyper-parameters. */ void RF_params::print() const { std::cout << "bootstrap: " << bootstrap << std::endl; std::cout << "bootstrap features: " << bootstrap_features << std::endl; std::cout << "n_trees: " << n_trees << std::endl; std::cout << "rows_sample: " << rows_sample << std::endl; tree_params.print(); } /** * @brief Construct rf (random forest) object. * @tparam T: data type for input data (float or double). * @param[in] cfg_rf_params: Random forest hyper-parameter struct. * @param[in] cfg_rf_type: Random forest type. Only CLASSIFICATION is currently supported. */ template <typename T> rf<T>::rf(RF_params cfg_rf_params, int cfg_rf_type) : rf_params(cfg_rf_params), rf_type(cfg_rf_type), trees(nullptr) { rf_params.validity_check(); } /** * @brief Destructor for random forest object. * @tparam T: data type for input data (float or double). */ template <typename T> rf<T>::~rf() { delete[] trees; } /** * @brief Return number of trees in the forest. * @tparam T: data type for input data (float or double). */ template <typename T> int rf<T>::get_ntrees() { return rf_params.n_trees; } /** * @brief Print summary for all trees in the random forest. * @tparam T: data type for input data (float or double). */ template <typename T> void rf<T>::print_rf_summary() { if (!trees) { std::cout << "Empty forest" << std::endl; } else { std::cout << "Forest has " << rf_params.n_trees << " trees, max_depth " << rf_params.tree_params.max_depth; std::cout << ", and max_leaves " << rf_params.tree_params.max_leaves << std::endl; for (int i = 0; i < rf_params.n_trees; i++) { std::cout << "Tree #" << i << std::endl; trees[i].print_tree_summary(); } } } /** * @brief Print detailed view of all trees in the random forest. * @tparam T: data type for input data (float or double). */ template <typename T> void rf<T>::print_rf_detailed() { if (!trees) { std::cout << "Empty forest" << std::endl; } else { std::cout << "Forest has " << rf_params.n_trees << " trees, max_depth " << rf_params.tree_params.max_depth; std::cout << ", and max_leaves " << rf_params.tree_params.max_leaves << std::endl; for (int i = 0; i < rf_params.n_trees; i++) { std::cout << "Tree #" << i << std::endl; trees[i].print(); } } } /** * @brief Construct rfClassifier object. * @tparam T: data type for input data (float or double). * @param[in] cfg_rf_params: Random forest hyper-parameter struct. */ template <typename T> rfClassifier<T>::rfClassifier(RF_params cfg_rf_params) : rf<T>::rf(cfg_rf_params, RF_type::CLASSIFICATION){}; /** * @brief Build (i.e., fit, train) random forest classifier for input data. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ template <typename T> void rfClassifier<T>::fit(const cumlHandle& user_handle, T* input, int n_rows, int n_cols, int* labels, int n_unique_labels) { ASSERT(!this->trees, "Cannot fit an existing forest."); ASSERT((n_rows > 0), "Invalid n_rows %d", n_rows); ASSERT((n_cols > 0), "Invalid n_cols %d", n_cols); rfClassifier::trees = new DecisionTree::DecisionTreeClassifier<T>[this->rf_params.n_trees]; int n_sampled_rows = this->rf_params.rows_sample * n_rows; const cumlHandle_impl& handle = user_handle.getImpl(); cudaStream_t stream = user_handle.getStream(); for (int i = 0; i < this->rf_params.n_trees; i++) { // Select n_sampled_rows (with replacement) numbers from [0, n_rows) per tree. // selected_rows: randomly generated IDs for bootstrapped samples (w/ replacement); a device ptr. MLCommon::device_buffer<unsigned int> selected_rows( handle.getDeviceAllocator(), stream, n_sampled_rows); if (this->rf_params.bootstrap) { MLCommon::Random::Rng r( i * 1000); // Ensure the seed for each tree is different and meaningful. r.uniformInt(selected_rows.data(), n_sampled_rows, (unsigned int)0, (unsigned int)n_rows, stream); } else { std::vector<unsigned int> h_selected_rows(n_rows); std::iota(h_selected_rows.begin(), h_selected_rows.end(), 0); std::random_shuffle(h_selected_rows.begin(), h_selected_rows.end()); h_selected_rows.resize(n_sampled_rows); MLCommon::updateDevice(selected_rows.data(), h_selected_rows.data(), n_sampled_rows, stream); } /* Build individual tree in the forest. - input is a pointer to orig data that have n_cols features and n_rows rows. - n_sampled_rows: # rows sampled for tree's bootstrap sample. - selected_rows: points to a list of row #s (w/ n_sampled_rows elements) used to build the bootstrapped sample. Expectation: Each tree node will contain (a) # n_sampled_rows and (b) a pointer to a list of row numbers w.r.t original data. */ this->trees[i].fit(user_handle, input, n_cols, n_rows, labels, selected_rows.data(), n_sampled_rows, n_unique_labels, this->rf_params.tree_params); //Cleanup selected_rows.release(stream); } } /** * @brief Predict target feature for input data; n-ary classification for single feature supported. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ template <typename T> void rfClassifier<T>::predict(const cumlHandle& user_handle, const T* input, int n_rows, int n_cols, int* predictions, bool verbose) const { ASSERT(this->trees, "Cannot predict! No trees in the forest."); ASSERT((n_rows > 0), "Invalid n_rows %d", n_rows); ASSERT((n_cols > 0), "Invalid n_cols %d", n_cols); ASSERT(predictions != nullptr, "Error! User has not allocated memory for predictions."); int row_size = n_cols; for (int row_id = 0; row_id < n_rows; row_id++) { if (verbose) { std::cout << "\n\n"; std::cout << "Predict for sample: "; for (int i = 0; i < n_cols; i++) std::cout << input[row_id * row_size + i] << ", "; std::cout << std::endl; } std::map<int, int> prediction_to_cnt; std::pair<std::map<int, int>::iterator, bool> ret; int max_cnt_so_far = 0; int majority_prediction = -1; for (int i = 0; i < this->rf_params.n_trees; i++) { //Return prediction for one sample. if (verbose) { std::cout << "Printing tree " << i << std::endl; //this->trees[i].print(); } int prediction; this->trees[i].predict(user_handle, &input[row_id * row_size], 1, n_cols, &prediction, verbose); ret = prediction_to_cnt.insert(std::pair<int, int>(prediction, 1)); if (!(ret.second)) { ret.first->second += 1; } if (max_cnt_so_far < ret.first->second) { max_cnt_so_far = ret.first->second; majority_prediction = ret.first->first; } } predictions[row_id] = majority_prediction; } } /** * @brief Predict target feature for input data and validate against ref_labels. * @tparam T: data type for input data (float or double). * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ template <typename T> RF_metrics rfClassifier<T>::score(const cumlHandle& user_handle, const T* input, const int* ref_labels, int n_rows, int n_cols, int* predictions, bool verbose) const { predict(user_handle, input, n_rows, n_cols, predictions, verbose); unsigned long long correctly_predicted = 0ULL; for (int i = 0; i < n_rows; i++) { correctly_predicted += (predictions[i] == ref_labels[i]); } float accuracy = correctly_predicted * 1.0f / n_rows; RF_metrics stats(accuracy); if (verbose) stats.print(); /* TODO: Potentially augment RF_metrics w/ more metrics (e.g., precision, F1, etc.). For non binary classification problems (i.e., one target and > 2 labels), need avg for each of these metrics */ return stats; } template class rf<float>; template class rf<double>; template class rfClassifier<float>; template class rfClassifier<double>; // Stateless API functions: fit, predict and score /** * @brief Build (i.e., fit, train) random forest classifier for input data of type float. * @param[in] user_handle: cumlHandle * @param[in,out] rf_classifier: pointer to the rfClassifier object, previously constructed by the user. * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl. in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ void fit(const cumlHandle& user_handle, rfClassifier<float>* rf_classifier, float* input, int n_rows, int n_cols, int* labels, int n_unique_labels) { rf_classifier->fit(user_handle, input, n_rows, n_cols, labels, n_unique_labels); } /** * @brief Build (i.e., fit, train) random forest classifier for input data of type double. * @param[in] user_handle: cumlHandle * @param[in,out] rf_classifier: pointer to the rfClassifier object, previously constructed by the user. * @param[in] input: train data (n_rows samples, n_cols features) in column major format, excluding labels. Device pointer. * @param[in] n_rows: number of training data samples. * @param[in] n_cols: number of features (i.e., columns) excluding target feature. * @param[in] labels: 1D array of target features (int only), with one label per training sample. Device pointer. Assumption: labels were preprocessed to map to ascending numbers from 0; needed for current gini impl. in decision tree * @param[in] n_unique_labels: #unique label values (known during preprocessing) */ void fit(const cumlHandle& user_handle, rfClassifier<double>* rf_classifier, double* input, int n_rows, int n_cols, int* labels, int n_unique_labels) { rf_classifier->fit(user_handle, input, n_rows, n_cols, labels, n_unique_labels); } /** * @brief Predict target feature for input data of type float; n-ary classification for single feature supported. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ void predict(const cumlHandle& user_handle, const rfClassifier<float>* rf_classifier, const float* input, int n_rows, int n_cols, int* predictions, bool verbose) { rf_classifier->predict(user_handle, input, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type double; n-ary classification for single feature supported. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ void predict(const cumlHandle& user_handle, const rfClassifier<double>* rf_classifier, const double* input, int n_rows, int n_cols, int* predictions, bool verbose) { rf_classifier->predict(user_handle, input, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type float and validate against ref_labels. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ RF_metrics score(const cumlHandle& user_handle, const rfClassifier<float>* rf_classifier, const float* input, const int* ref_labels, int n_rows, int n_cols, int* predictions, bool verbose) { return rf_classifier->score(user_handle, input, ref_labels, n_rows, n_cols, predictions, verbose); } /** * @brief Predict target feature for input data of type double and validate against ref_labels. * @param[in] user_handle: cumlHandle (currently unused; API placeholder) * @param[in] rf_classifier: pointer to the rfClassifier object. The user should have previously called fit to build the random forest. * @param[in] input: test data (n_rows samples, n_cols features) in row major format. CPU pointer. * @param[in] ref_labels: label values for cross validation (n_rows elements); CPU pointer. * @param[in] n_rows: number of data samples. * @param[in] n_cols: number of features (excluding target feature). * @param[in, out] predictions: n_rows predicted labels. CPU pointer, user allocated. * @param[in] verbose: flag for debugging purposes. */ RF_metrics score(const cumlHandle& user_handle, const rfClassifier<double>* rf_classifier, const double* input, const int* ref_labels, int n_rows, int n_cols, int* predictions, bool verbose) { return rf_classifier->score(user_handle, input, ref_labels, n_rows, n_cols, predictions, verbose); } RF_params set_rf_class_obj(int max_depth, int max_leaves, float max_features, int n_bins, int split_algo, int min_rows_per_node, bool bootstrap_features, bool bootstrap, int n_trees, float rows_sample) { DecisionTree::DecisionTreeParams tree_params( max_depth, max_leaves, max_features, n_bins, split_algo, min_rows_per_node, bootstrap_features); RF_params rf_params(bootstrap, bootstrap_features, n_trees, rows_sample, tree_params); return rf_params; } }; // namespace ML // end namespace ML
9da723a173e9639abedccba7afd6af3a61dfa5d2.hip
// !!! This is a file automatically generated by hipify!!! /* Host code for the Jacobi method of solving a system of linear equations * by iteration. * Author: Naga Kandasamy * Date modified: May 13, 2019 */ #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include "jacobi_iteration.h" /* Include the kernel code. */ #include "jacobi_iteration_kernel.hip" /* Uncomment the line below if you want the code to spit out debug information. */ /* #define DEBUG */ int main (int argc, char** argv) { struct timeval start, stop; if (argc > 1) { printf ("This program accepts no arguments\n"); exit (EXIT_FAILURE); } matrix_t A; /* N x N constant matrix. */ matrix_t B; /* N x 1 b matrix. */ matrix_t reference_x; /* Reference solution. */ matrix_t gpu_naive_solution_x; /* Solution computed by naive kernel. */ matrix_t gpu_opt_solution_x; /* Solution computed by optimized kernel. */ /* Initialize the random number generator. */ srand (time (NULL)); /* Generate diagonally dominant matrix. */ A = create_diagonally_dominant_matrix (MATRIX_SIZE, MATRIX_SIZE); if (A.elements == NULL) { printf ("Error creating matrix\n"); exit (EXIT_FAILURE); } /* Create the other vectors. */ B = allocate_matrix_on_host (MATRIX_SIZE, 1, 1); reference_x = allocate_matrix_on_host (MATRIX_SIZE, 1, 0); gpu_naive_solution_x = allocate_matrix_on_host (MATRIX_SIZE, 1, 0); gpu_opt_solution_x = allocate_matrix_on_host (MATRIX_SIZE, 1, 0); #ifdef DEBUG print_matrix (A); print_matrix (B); print_matrix (reference_x); #endif /* Compute the Jacobi solution on the CPU. */ printf ("Performing Jacobi iteration on the CPU\n"); gettimeofday(&start, NULL); compute_gold (A, reference_x, B); gettimeofday(&stop, NULL); printf ("Execution time for CPU = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\ (stop.tv_usec - start.tv_usec)/(float)1000000)); display_jacobi_solution (A, reference_x, B); /* Display statistics. */ /* Compute the Jacobi solution on the GPU. The solutions are returned in gpu_naive_solution_x and gpu_opt_solution_x. */ printf ("\nPerforming Jacobi iteration on device. \n"); compute_on_device (A, gpu_naive_solution_x, gpu_opt_solution_x, B); /* Check GPU results for correctness */ printf("\nShowing results for gpu_naive_solution\n"); display_jacobi_solution (A, gpu_naive_solution_x, B); printf("\nShowing results for gpu_opt_solution\n"); display_jacobi_solution (A, gpu_opt_solution_x, B); free (A.elements); free (B.elements); free (reference_x.elements); free (gpu_naive_solution_x.elements); free (gpu_opt_solution_x.elements); exit (EXIT_SUCCESS); } /* Perform the Jacobi calculation on the GPU. */ void compute_on_device (const matrix_t A, matrix_t gpu_naive_sol_x, matrix_t gpu_opt_sol_x, const matrix_t B) { int done = 0; int num_iter = 0; double ssd; double *d_ssd = NULL; double mse; int i; struct timeval start, stop; matrix_t new_x_naive = allocate_matrix_on_host (MATRIX_SIZE, 1, 0); matrix_t new_x_opt = allocate_matrix_on_host (MATRIX_SIZE, 1, 0); /* Initialize gpu x solutions */ for (i = 0; i < A.num_rows; i++) { float e = B.elements[i]; gpu_naive_sol_x.elements[i] = e; gpu_opt_sol_x.elements[i] = e; } /* Allocate space on GPU for the matricies */ matrix_t d_A = allocate_matrix_on_device(A); check_CUDA_error("Allocating matrix A"); matrix_t d_naive_sol_x = allocate_matrix_on_device(gpu_naive_sol_x); check_CUDA_error("Allocating matrix naive_sol_x"); matrix_t d_opt_sol_x = allocate_matrix_on_device(gpu_opt_sol_x); check_CUDA_error("Allocating matrix opt_sol_x"); matrix_t d_B = allocate_matrix_on_device(B); check_CUDA_error("Allocating matrix B"); matrix_t d_new_x_naive = allocate_matrix_on_device(new_x_naive); check_CUDA_error("Allocating new_x_naive"); matrix_t d_new_x_opt = allocate_matrix_on_device(new_x_opt); /* Copy over matricies A, B, and x solutions to GPU*/ copy_matrix_to_device(d_A, A); check_CUDA_error("Copying matrix A to device"); copy_matrix_to_device(d_B, B); check_CUDA_error("Copying matrix B to device"); copy_matrix_to_device(d_naive_sol_x, gpu_naive_sol_x); check_CUDA_error("Copying matrix naive_sol_x to device"); copy_matrix_to_device(d_opt_sol_x, gpu_opt_sol_x); check_CUDA_error("Copying matrix opt_sol_x to device"); /* Allocate space for the ssd on the GPU */ hipMalloc ((void**) &d_ssd, sizeof (double)); /* Allocate space for the lock on the GPU and initialize it. */ int *mutex_on_device = NULL; hipMalloc ((void **) &mutex_on_device, sizeof (int)); hipMemset (mutex_on_device, 0, sizeof (int)); /* Kernel setup */ dim3 thread_block (1, THREAD_BLOCK_SIZE, 1); dim3 grid (1, (A.num_rows + THREAD_BLOCK_SIZE - 1)/ THREAD_BLOCK_SIZE); printf ("Performing Jacobi naive solution\n"); gettimeofday(&start, NULL); while (!done) { hipMemset (d_ssd, 0.0, sizeof (double)); hipLaunchKernelGGL(( jacobi_iteration_kernel_naive), dim3(grid), dim3(thread_block), 0, 0, d_A, d_naive_sol_x, d_new_x_naive, d_B, mutex_on_device, d_ssd); check_CUDA_error("KERNEL FAILURE: jacobi_iteration_kernel_naive\n"); hipDeviceSynchronize (); hipLaunchKernelGGL(( jacobi_update_x), dim3(grid),dim3(thread_block), 0, 0, d_naive_sol_x, d_new_x_naive); check_CUDA_error("KERNEL FAILURE: jacobi_update_x"); hipDeviceSynchronize(); // Check for convergence hipMemcpy (&ssd, d_ssd, sizeof (double), hipMemcpyDeviceToHost); num_iter++; mse = sqrt(ssd); if (mse <= THRESHOLD) { done = 1; printf ("\nConvergence achieved after %d iterations \n", num_iter); } //printf ("Iteration: %d. MSE = %f\n", num_iter, mse); } gettimeofday(&stop, NULL); printf ("Execution time for GPU (naive) = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\ (stop.tv_usec - start.tv_usec)/(float)1000000)); /* Modify kernel setup for optimized kernel */ thread_block.x = thread_block.y = TILE_SIZE; grid.x = 1; grid.y = (gpu_opt_sol_x.num_rows + TILE_SIZE - 1)/TILE_SIZE; printf("\nPerforming Jacobi optimized solution\n"); gettimeofday(&start, NULL); done = 0; num_iter = 0; while (!done) { hipMemset (d_ssd, 0.0, sizeof (double)); hipLaunchKernelGGL(( jacobi_iteration_kernel_optimized), dim3(grid), dim3(thread_block), 0, 0, d_A, d_opt_sol_x, d_new_x_opt, d_B, mutex_on_device, d_ssd); check_CUDA_error("KERNEL FAILURE: jacobi_iteration_kernel_optimized\n"); hipDeviceSynchronize (); hipLaunchKernelGGL(( jacobi_update_x), dim3(grid),dim3(thread_block), 0, 0, d_opt_sol_x, d_new_x_opt); check_CUDA_error("KERNEL FAILURE: jacobi_update_x"); hipDeviceSynchronize(); // Check for convergence hipMemcpy (&ssd, d_ssd, sizeof (double), hipMemcpyDeviceToHost); num_iter++; mse = sqrt(ssd); if (mse <= THRESHOLD) { done = 1; printf ("\nConvergence achieved after %d iterations \n", num_iter); } //printf ("Iteration: %d. MSE = %f\n", num_iter, mse); } gettimeofday(&stop, NULL); printf ("Execution time for GPU (optimized) = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\ (stop.tv_usec - start.tv_usec)/(float)1000000)); /* Copy back solutions from GPU */ copy_matrix_from_device(gpu_naive_sol_x, d_naive_sol_x); check_CUDA_error("Copying matrix d_naive_sol_x from device"); copy_matrix_from_device(gpu_opt_sol_x, d_opt_sol_x); check_CUDA_error("Copying matrix d_opt_sol_x from device"); /* Free memory on GPU */ hipFree(d_A.elements); hipFree(d_B.elements); hipFree(d_naive_sol_x.elements); hipFree(d_opt_sol_x.elements); hipFree(d_ssd); hipFree(mutex_on_device); hipFree(d_new_x_naive.elements); hipFree(d_new_x_opt.elements); free (new_x_naive.elements); free (new_x_opt.elements); return; } /* Allocate matrix on the device of same size as M. */ matrix_t allocate_matrix_on_device (const matrix_t M) { matrix_t Mdevice = M; int size = M.num_rows * M.num_columns * sizeof(float); hipMalloc ((void **) &Mdevice.elements, size); return Mdevice; } /* Allocate a matrix of dimensions height * width. If init == 0, initialize to all zeroes. If init == 1, perform random initialization. */ matrix_t allocate_matrix_on_host (int num_rows, int num_columns, int init) { matrix_t M; M.num_columns = num_columns; M.num_rows = num_rows; int size = M.num_rows * M.num_columns; M.elements = (float *) malloc (size * sizeof (float)); for (unsigned int i = 0; i < size; i++) { if (init == 0) M.elements[i] = 0; else M.elements[i] = get_random_number (MIN_NUMBER, MAX_NUMBER); } return M; } /* Copy matrix to a device. */ void copy_matrix_to_device (matrix_t Mdevice, const matrix_t Mhost) { int size = Mhost.num_rows * Mhost.num_columns * sizeof (float); Mdevice.num_rows = Mhost.num_rows; Mdevice.num_columns = Mhost.num_columns; hipMemcpy (Mdevice.elements, Mhost.elements, size, hipMemcpyHostToDevice); return; } /* Copy matrix from device to host. */ void copy_matrix_from_device (matrix_t Mhost, const matrix_t Mdevice){ int size = Mdevice.num_rows * Mdevice.num_columns * sizeof (float); hipMemcpy (Mhost.elements, Mdevice.elements, size, hipMemcpyDeviceToHost); return; } /* Prints the matrix out to screen. */ void print_matrix (const matrix_t M) { for (unsigned int i = 0; i < M.num_rows; i++) { for (unsigned int j = 0; j < M.num_columns; j++) { printf ("%f ", M.elements[i * M.num_rows + j]); } printf ("\n"); } printf ("\n"); return; } /* Returns a floating-point value between min and max values. */ float get_random_number (int min, int max) { float r = rand ()/(float) RAND_MAX; return (float) floor ((double) (min + (max - min + 1) * r)); } /* Check for errors in kernel execution. */ void check_CUDA_error (const char *msg) { hipError_t err = hipGetLastError (); if ( hipSuccess != err) { printf ("CUDA ERROR: %s (%s).\n", msg, hipGetErrorString(err)); exit (EXIT_FAILURE); } return; } /* Checks the reference and GPU results. */ int check_results (float *reference, float *gpu_result, int num_elements, float eps) { int check = 1; float max_eps = 0.0; for (int i = 0; i < num_elements; i++) { if (fabsf((reference[i] - gpu_result[i])/reference[i]) > eps) { check = 0; printf("Error at index %d\n",i); printf("Element r %.10f and g %.10f\n", reference[i] ,gpu_result[i]); break; } } int maxEle = 0; for (int i = 0; i < num_elements; i++) { if (fabsf((reference[i] - gpu_result[i])/reference[i]) > max_eps) { max_eps = fabsf ((reference[i] - gpu_result[i])/reference[i]); maxEle=i; } } printf ("Max epsilon = %f at i = %d value at cpu %f and gpu %f \n", max_eps, maxEle, reference[maxEle], gpu_result[maxEle]); return check; } /* Function checks if the matrix is diagonally dominant. */ int check_if_diagonal_dominant (const matrix_t M) { float diag_element; float sum; for (unsigned int i = 0; i < M.num_rows; i++) { sum = 0.0; diag_element = M.elements[i * M.num_rows + i]; for (unsigned int j = 0; j < M.num_columns; j++) { if (i != j) sum += abs(M.elements[i * M.num_rows + j]); } if (diag_element <= sum) return 0; } return 1; } /* Create a diagonally dominant matrix. */ matrix_t create_diagonally_dominant_matrix (unsigned int num_rows, unsigned int num_columns) { matrix_t M; M.num_columns = num_columns; M.num_rows = num_rows; unsigned int size = M.num_rows * M.num_columns; M.elements = (float *) malloc (size * sizeof (float)); /* Create a matrix with random numbers between [-.5 and .5]. */ unsigned int i, j; printf ("Generating %d x %d matrix with numbers between [-.5, .5]\n", num_rows, num_columns); for (i = 0; i < size; i++) // M.elements[i] = ((float)rand ()/(float)RAND_MAX) - 0.5; M.elements[i] = get_random_number (MIN_NUMBER, MAX_NUMBER); /* Make diagonal entries large with respect to the entries on each row. */ for (i = 0; i < num_rows; i++) { float row_sum = 0.0; for (j = 0; j < num_columns; j++) { row_sum += fabs (M.elements[i * M.num_rows + j]); } M.elements[i * M.num_rows + i] = 0.5 + row_sum; } /* Check if matrix is diagonal dominant. */ if (!check_if_diagonal_dominant (M)) { free (M.elements); M.elements = NULL; } return M; }
9da723a173e9639abedccba7afd6af3a61dfa5d2.cu
/* Host code for the Jacobi method of solving a system of linear equations * by iteration. * Author: Naga Kandasamy * Date modified: May 13, 2019 */ #include <stdlib.h> #include <stdio.h> #include <sys/time.h> #include <string.h> #include <math.h> #include <cuda_runtime.h> #include "jacobi_iteration.h" /* Include the kernel code. */ #include "jacobi_iteration_kernel.cu" /* Uncomment the line below if you want the code to spit out debug information. */ /* #define DEBUG */ int main (int argc, char** argv) { struct timeval start, stop; if (argc > 1) { printf ("This program accepts no arguments\n"); exit (EXIT_FAILURE); } matrix_t A; /* N x N constant matrix. */ matrix_t B; /* N x 1 b matrix. */ matrix_t reference_x; /* Reference solution. */ matrix_t gpu_naive_solution_x; /* Solution computed by naive kernel. */ matrix_t gpu_opt_solution_x; /* Solution computed by optimized kernel. */ /* Initialize the random number generator. */ srand (time (NULL)); /* Generate diagonally dominant matrix. */ A = create_diagonally_dominant_matrix (MATRIX_SIZE, MATRIX_SIZE); if (A.elements == NULL) { printf ("Error creating matrix\n"); exit (EXIT_FAILURE); } /* Create the other vectors. */ B = allocate_matrix_on_host (MATRIX_SIZE, 1, 1); reference_x = allocate_matrix_on_host (MATRIX_SIZE, 1, 0); gpu_naive_solution_x = allocate_matrix_on_host (MATRIX_SIZE, 1, 0); gpu_opt_solution_x = allocate_matrix_on_host (MATRIX_SIZE, 1, 0); #ifdef DEBUG print_matrix (A); print_matrix (B); print_matrix (reference_x); #endif /* Compute the Jacobi solution on the CPU. */ printf ("Performing Jacobi iteration on the CPU\n"); gettimeofday(&start, NULL); compute_gold (A, reference_x, B); gettimeofday(&stop, NULL); printf ("Execution time for CPU = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\ (stop.tv_usec - start.tv_usec)/(float)1000000)); display_jacobi_solution (A, reference_x, B); /* Display statistics. */ /* Compute the Jacobi solution on the GPU. The solutions are returned in gpu_naive_solution_x and gpu_opt_solution_x. */ printf ("\nPerforming Jacobi iteration on device. \n"); compute_on_device (A, gpu_naive_solution_x, gpu_opt_solution_x, B); /* Check GPU results for correctness */ printf("\nShowing results for gpu_naive_solution\n"); display_jacobi_solution (A, gpu_naive_solution_x, B); printf("\nShowing results for gpu_opt_solution\n"); display_jacobi_solution (A, gpu_opt_solution_x, B); free (A.elements); free (B.elements); free (reference_x.elements); free (gpu_naive_solution_x.elements); free (gpu_opt_solution_x.elements); exit (EXIT_SUCCESS); } /* Perform the Jacobi calculation on the GPU. */ void compute_on_device (const matrix_t A, matrix_t gpu_naive_sol_x, matrix_t gpu_opt_sol_x, const matrix_t B) { int done = 0; int num_iter = 0; double ssd; double *d_ssd = NULL; double mse; int i; struct timeval start, stop; matrix_t new_x_naive = allocate_matrix_on_host (MATRIX_SIZE, 1, 0); matrix_t new_x_opt = allocate_matrix_on_host (MATRIX_SIZE, 1, 0); /* Initialize gpu x solutions */ for (i = 0; i < A.num_rows; i++) { float e = B.elements[i]; gpu_naive_sol_x.elements[i] = e; gpu_opt_sol_x.elements[i] = e; } /* Allocate space on GPU for the matricies */ matrix_t d_A = allocate_matrix_on_device(A); check_CUDA_error("Allocating matrix A"); matrix_t d_naive_sol_x = allocate_matrix_on_device(gpu_naive_sol_x); check_CUDA_error("Allocating matrix naive_sol_x"); matrix_t d_opt_sol_x = allocate_matrix_on_device(gpu_opt_sol_x); check_CUDA_error("Allocating matrix opt_sol_x"); matrix_t d_B = allocate_matrix_on_device(B); check_CUDA_error("Allocating matrix B"); matrix_t d_new_x_naive = allocate_matrix_on_device(new_x_naive); check_CUDA_error("Allocating new_x_naive"); matrix_t d_new_x_opt = allocate_matrix_on_device(new_x_opt); /* Copy over matricies A, B, and x solutions to GPU*/ copy_matrix_to_device(d_A, A); check_CUDA_error("Copying matrix A to device"); copy_matrix_to_device(d_B, B); check_CUDA_error("Copying matrix B to device"); copy_matrix_to_device(d_naive_sol_x, gpu_naive_sol_x); check_CUDA_error("Copying matrix naive_sol_x to device"); copy_matrix_to_device(d_opt_sol_x, gpu_opt_sol_x); check_CUDA_error("Copying matrix opt_sol_x to device"); /* Allocate space for the ssd on the GPU */ cudaMalloc ((void**) &d_ssd, sizeof (double)); /* Allocate space for the lock on the GPU and initialize it. */ int *mutex_on_device = NULL; cudaMalloc ((void **) &mutex_on_device, sizeof (int)); cudaMemset (mutex_on_device, 0, sizeof (int)); /* Kernel setup */ dim3 thread_block (1, THREAD_BLOCK_SIZE, 1); dim3 grid (1, (A.num_rows + THREAD_BLOCK_SIZE - 1)/ THREAD_BLOCK_SIZE); printf ("Performing Jacobi naive solution\n"); gettimeofday(&start, NULL); while (!done) { cudaMemset (d_ssd, 0.0, sizeof (double)); jacobi_iteration_kernel_naive<<<grid, thread_block>>>(d_A, d_naive_sol_x, d_new_x_naive, d_B, mutex_on_device, d_ssd); check_CUDA_error("KERNEL FAILURE: jacobi_iteration_kernel_naive\n"); cudaDeviceSynchronize (); jacobi_update_x<<<grid,thread_block>>>(d_naive_sol_x, d_new_x_naive); check_CUDA_error("KERNEL FAILURE: jacobi_update_x"); cudaDeviceSynchronize(); // Check for convergence cudaMemcpy (&ssd, d_ssd, sizeof (double), cudaMemcpyDeviceToHost); num_iter++; mse = sqrt(ssd); if (mse <= THRESHOLD) { done = 1; printf ("\nConvergence achieved after %d iterations \n", num_iter); } //printf ("Iteration: %d. MSE = %f\n", num_iter, mse); } gettimeofday(&stop, NULL); printf ("Execution time for GPU (naive) = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\ (stop.tv_usec - start.tv_usec)/(float)1000000)); /* Modify kernel setup for optimized kernel */ thread_block.x = thread_block.y = TILE_SIZE; grid.x = 1; grid.y = (gpu_opt_sol_x.num_rows + TILE_SIZE - 1)/TILE_SIZE; printf("\nPerforming Jacobi optimized solution\n"); gettimeofday(&start, NULL); done = 0; num_iter = 0; while (!done) { cudaMemset (d_ssd, 0.0, sizeof (double)); jacobi_iteration_kernel_optimized<<<grid, thread_block>>>(d_A, d_opt_sol_x, d_new_x_opt, d_B, mutex_on_device, d_ssd); check_CUDA_error("KERNEL FAILURE: jacobi_iteration_kernel_optimized\n"); cudaDeviceSynchronize (); jacobi_update_x<<<grid,thread_block>>>(d_opt_sol_x, d_new_x_opt); check_CUDA_error("KERNEL FAILURE: jacobi_update_x"); cudaDeviceSynchronize(); // Check for convergence cudaMemcpy (&ssd, d_ssd, sizeof (double), cudaMemcpyDeviceToHost); num_iter++; mse = sqrt(ssd); if (mse <= THRESHOLD) { done = 1; printf ("\nConvergence achieved after %d iterations \n", num_iter); } //printf ("Iteration: %d. MSE = %f\n", num_iter, mse); } gettimeofday(&stop, NULL); printf ("Execution time for GPU (optimized) = %fs. \n", (float)(stop.tv_sec - start.tv_sec +\ (stop.tv_usec - start.tv_usec)/(float)1000000)); /* Copy back solutions from GPU */ copy_matrix_from_device(gpu_naive_sol_x, d_naive_sol_x); check_CUDA_error("Copying matrix d_naive_sol_x from device"); copy_matrix_from_device(gpu_opt_sol_x, d_opt_sol_x); check_CUDA_error("Copying matrix d_opt_sol_x from device"); /* Free memory on GPU */ cudaFree(d_A.elements); cudaFree(d_B.elements); cudaFree(d_naive_sol_x.elements); cudaFree(d_opt_sol_x.elements); cudaFree(d_ssd); cudaFree(mutex_on_device); cudaFree(d_new_x_naive.elements); cudaFree(d_new_x_opt.elements); free (new_x_naive.elements); free (new_x_opt.elements); return; } /* Allocate matrix on the device of same size as M. */ matrix_t allocate_matrix_on_device (const matrix_t M) { matrix_t Mdevice = M; int size = M.num_rows * M.num_columns * sizeof(float); cudaMalloc ((void **) &Mdevice.elements, size); return Mdevice; } /* Allocate a matrix of dimensions height * width. If init == 0, initialize to all zeroes. If init == 1, perform random initialization. */ matrix_t allocate_matrix_on_host (int num_rows, int num_columns, int init) { matrix_t M; M.num_columns = num_columns; M.num_rows = num_rows; int size = M.num_rows * M.num_columns; M.elements = (float *) malloc (size * sizeof (float)); for (unsigned int i = 0; i < size; i++) { if (init == 0) M.elements[i] = 0; else M.elements[i] = get_random_number (MIN_NUMBER, MAX_NUMBER); } return M; } /* Copy matrix to a device. */ void copy_matrix_to_device (matrix_t Mdevice, const matrix_t Mhost) { int size = Mhost.num_rows * Mhost.num_columns * sizeof (float); Mdevice.num_rows = Mhost.num_rows; Mdevice.num_columns = Mhost.num_columns; cudaMemcpy (Mdevice.elements, Mhost.elements, size, cudaMemcpyHostToDevice); return; } /* Copy matrix from device to host. */ void copy_matrix_from_device (matrix_t Mhost, const matrix_t Mdevice){ int size = Mdevice.num_rows * Mdevice.num_columns * sizeof (float); cudaMemcpy (Mhost.elements, Mdevice.elements, size, cudaMemcpyDeviceToHost); return; } /* Prints the matrix out to screen. */ void print_matrix (const matrix_t M) { for (unsigned int i = 0; i < M.num_rows; i++) { for (unsigned int j = 0; j < M.num_columns; j++) { printf ("%f ", M.elements[i * M.num_rows + j]); } printf ("\n"); } printf ("\n"); return; } /* Returns a floating-point value between min and max values. */ float get_random_number (int min, int max) { float r = rand ()/(float) RAND_MAX; return (float) floor ((double) (min + (max - min + 1) * r)); } /* Check for errors in kernel execution. */ void check_CUDA_error (const char *msg) { cudaError_t err = cudaGetLastError (); if ( cudaSuccess != err) { printf ("CUDA ERROR: %s (%s).\n", msg, cudaGetErrorString(err)); exit (EXIT_FAILURE); } return; } /* Checks the reference and GPU results. */ int check_results (float *reference, float *gpu_result, int num_elements, float eps) { int check = 1; float max_eps = 0.0; for (int i = 0; i < num_elements; i++) { if (fabsf((reference[i] - gpu_result[i])/reference[i]) > eps) { check = 0; printf("Error at index %d\n",i); printf("Element r %.10f and g %.10f\n", reference[i] ,gpu_result[i]); break; } } int maxEle = 0; for (int i = 0; i < num_elements; i++) { if (fabsf((reference[i] - gpu_result[i])/reference[i]) > max_eps) { max_eps = fabsf ((reference[i] - gpu_result[i])/reference[i]); maxEle=i; } } printf ("Max epsilon = %f at i = %d value at cpu %f and gpu %f \n", max_eps, maxEle, reference[maxEle], gpu_result[maxEle]); return check; } /* Function checks if the matrix is diagonally dominant. */ int check_if_diagonal_dominant (const matrix_t M) { float diag_element; float sum; for (unsigned int i = 0; i < M.num_rows; i++) { sum = 0.0; diag_element = M.elements[i * M.num_rows + i]; for (unsigned int j = 0; j < M.num_columns; j++) { if (i != j) sum += abs(M.elements[i * M.num_rows + j]); } if (diag_element <= sum) return 0; } return 1; } /* Create a diagonally dominant matrix. */ matrix_t create_diagonally_dominant_matrix (unsigned int num_rows, unsigned int num_columns) { matrix_t M; M.num_columns = num_columns; M.num_rows = num_rows; unsigned int size = M.num_rows * M.num_columns; M.elements = (float *) malloc (size * sizeof (float)); /* Create a matrix with random numbers between [-.5 and .5]. */ unsigned int i, j; printf ("Generating %d x %d matrix with numbers between [-.5, .5]\n", num_rows, num_columns); for (i = 0; i < size; i++) // M.elements[i] = ((float)rand ()/(float)RAND_MAX) - 0.5; M.elements[i] = get_random_number (MIN_NUMBER, MAX_NUMBER); /* Make diagonal entries large with respect to the entries on each row. */ for (i = 0; i < num_rows; i++) { float row_sum = 0.0; for (j = 0; j < num_columns; j++) { row_sum += fabs (M.elements[i * M.num_rows + j]); } M.elements[i * M.num_rows + i] = 0.5 + row_sum; } /* Check if matrix is diagonal dominant. */ if (!check_if_diagonal_dominant (M)) { free (M.elements); M.elements = NULL; } return M; }
10027a8236d473239332d57811613fd9bd971a1d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdlib> #include <iostream> #include <string> #include <time.h> #include "device_launch_parameters.h" #include "rocblas.h" #include "hiprand/hiprand.h" #define NONE_TRANS 'N' #define BLOCK_SIZE 32 __global__ void makePositiveOrientire( const int n, float* matrix ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < n ) matrix[ i * n + i ] += n; } bool isMethodSuccess( const int iter, const float error, const int nIters, const float epsilon ) { return nIters <= iter || error <= epsilon; } int main( int argc, char** argv ) { const int dim = 2048; const int nIters = 100; const float epsilon = 1e-5; hipblasInit(); hiprandGenerator_t generator; hiprandCreateGenerator( &generator, HIPRAND_RNG_PSEUDO_DEFAULT ); hiprandSetPseudoRandomGeneratorSeed( generator, 2000 ); float *devA, *devX, *devB; hipblasAlloc( dim*dim, sizeof(float), (void**) &devA ); hipblasAlloc( dim*1 , sizeof(float), (void**) &devX ); hipblasAlloc( dim*1 , sizeof(float), (void**) &devB ); hiprandGenerateUniform( generator, devA, dim*dim ); hiprandGenerateUniform( generator, devB, dim*1 ); hipLaunchKernelGGL(( makePositiveOrientire) , dim3(( dim + BLOCK_SIZE - 1 ) / BLOCK_SIZE), dim3(BLOCK_SIZE) , 0, 0, dim, devA ); hipDeviceSynchronize(); hipMemcpy((void*) devX, (const void*) devB, sizeof( float )*dim, hipMemcpyDeviceToDevice); int iter = 0; float error = 0.0f; float *devR, *devE; hipblasAlloc( dim*1, sizeof( float ), (void**) &devR ); hipblasAlloc( dim*1, sizeof( float ), (void**) &devE ); clock_t start = clock(); do { /* */ /* r = A*x */ hipblasSgemv( NONE_TRANS, dim, dim, 1.0f, (const float*) devA, dim, (const float*) devX, 1, 0.0f, devR, 1 ); /* r = r - b */ hipblasSaxpy( dim, -1.0f, (const float*) devB, 1, devR, 1 ); /* e = A*r */ hipblasSgemv( NONE_TRANS, dim, dim, 1.0f, (const float*) devA, dim, (const float*) devR, 1, 0.0f, devE, 1 ); float t = hipblasSdot( dim, (const float*) devE, 1, (const float*) devR, 1 ); t /= hipblasSdot( dim, (const float*) devE, 1, (const float*) devE, 1 ); /* x = x - t*r */ hipblasSaxpy( dim, -t, (const float*) devR, 1, devX, 1 ); /* */ /* e = A*x */ hipblasSgemv( NONE_TRANS, dim, dim, 1.0f, (const float*) devA, dim, (const float*) devX, 1, 0.0f, devE, 1 ); /* e = e - b */ hipblasSaxpy( dim, -1, (const float*) devB, 1, devE, 1 ); /* error = ||e|| */ error = hipblasSnrm2( dim, (const float*) devE, 1 ); } while ( !isMethodSuccess( ++iter, error, nIters, epsilon ) ); clock_t finish = clock(); float time = (float)(finish - start)/CLOCKS_PER_SEC; std::cout << "time: " << time << std::endl << "Accuracy of method: " << error << std::endl << "Iter: "<< iter << " of " << nIters << std::endl; hipblasFree( devR ); hipblasFree( devE ); hipblasFree( devA ); hipblasFree( devX ); hipblasFree( devB ); hipblasShutdown(); hiprandDestroyGenerator( generator ); return 0; }
10027a8236d473239332d57811613fd9bd971a1d.cu
#include <cstdlib> #include <iostream> #include <string> #include <time.h> #include "device_launch_parameters.h" #include "cublas.h" #include "curand.h" #define NONE_TRANS 'N' #define BLOCK_SIZE 32 __global__ void makePositiveOrientire( const int n, float* matrix ) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < n ) matrix[ i * n + i ] += n; } bool isMethodSuccess( const int iter, const float error, const int nIters, const float epsilon ) { return nIters <= iter || error <= epsilon; } int main( int argc, char** argv ) { const int dim = 2048; const int nIters = 100; const float epsilon = 1e-5; cublasInit(); curandGenerator_t generator; curandCreateGenerator( &generator, CURAND_RNG_PSEUDO_DEFAULT ); curandSetPseudoRandomGeneratorSeed( generator, 2000 ); float *devA, *devX, *devB; cublasAlloc( dim*dim, sizeof(float), (void**) &devA ); cublasAlloc( dim*1 , sizeof(float), (void**) &devX ); cublasAlloc( dim*1 , sizeof(float), (void**) &devB ); curandGenerateUniform( generator, devA, dim*dim ); curandGenerateUniform( generator, devB, dim*1 ); makePositiveOrientire <<< ( dim + BLOCK_SIZE - 1 ) / BLOCK_SIZE, BLOCK_SIZE >>> ( dim, devA ); cudaDeviceSynchronize(); cudaMemcpy((void*) devX, (const void*) devB, sizeof( float )*dim, cudaMemcpyDeviceToDevice); int iter = 0; float error = 0.0f; float *devR, *devE; cublasAlloc( dim*1, sizeof( float ), (void**) &devR ); cublasAlloc( dim*1, sizeof( float ), (void**) &devE ); clock_t start = clock(); do { /* вычисления */ /* r = A*x */ cublasSgemv( NONE_TRANS, dim, dim, 1.0f, (const float*) devA, dim, (const float*) devX, 1, 0.0f, devR, 1 ); /* r = r - b */ cublasSaxpy( dim, -1.0f, (const float*) devB, 1, devR, 1 ); /* e = A*r */ cublasSgemv( NONE_TRANS, dim, dim, 1.0f, (const float*) devA, dim, (const float*) devR, 1, 0.0f, devE, 1 ); float t = cublasSdot( dim, (const float*) devE, 1, (const float*) devR, 1 ); t /= cublasSdot( dim, (const float*) devE, 1, (const float*) devE, 1 ); /* x = x - t*r */ cublasSaxpy( dim, -t, (const float*) devR, 1, devX, 1 ); /* проверка */ /* e = A*x */ cublasSgemv( NONE_TRANS, dim, dim, 1.0f, (const float*) devA, dim, (const float*) devX, 1, 0.0f, devE, 1 ); /* e = e - b */ cublasSaxpy( dim, -1, (const float*) devB, 1, devE, 1 ); /* error = ||e|| */ error = cublasSnrm2( dim, (const float*) devE, 1 ); } while ( !isMethodSuccess( ++iter, error, nIters, epsilon ) ); clock_t finish = clock(); float time = (float)(finish - start)/CLOCKS_PER_SEC; std::cout << "time: " << time << std::endl << "Accuracy of method: " << error << std::endl << "Iter: "<< iter << " of " << nIters << std::endl; cublasFree( devR ); cublasFree( devE ); cublasFree( devA ); cublasFree( devX ); cublasFree( devB ); cublasShutdown(); curandDestroyGenerator( generator ); return 0; }
236a895c1f9d41e30da2f494bf0405ce5d3e9e59.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <unistd.h> #include <ctype.h> __global__ void vectorMult(float *a, float *b, float *c, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; while (i < n) { c[i] = a[i] * b[i]; i+= blockDim.x * gridDim.x; } } int main(int argc, char **argv) { float *a, *b, *c; float *d_a, *d_b, *d_c; int i; int size = 100000; int elapsed_time = 10; int option; while ((option = getopt (argc, argv, "s:t:")) != -1) { switch (option) { case 's': size = atoi(optarg); break; case 't': elapsed_time = atoi(optarg); break; case '?': if (optopt == 's' || optopt == 't') fprintf (stderr, "Option -%c requires an argument.\n", optopt); else if (isprint (optopt)) fprintf (stderr, "Unknown option `-%c'.\n", optopt); else fprintf (stderr, "Unknown option character `\\x%x'.\n", optopt); return 1; default: abort (); } } time_t curTime, baseTime; a = (float*)malloc(size*sizeof(float)); b = (float*)malloc(size*sizeof(float)); c = (float*)malloc(size*sizeof(float)); hipMalloc(&d_a, size*sizeof(float)); hipMalloc(&d_b, size*sizeof(float)); hipMalloc(&d_c, size*sizeof(float)); for(i = 0; i < size; i++) { a[i] = b[i] = (float)i; c[i] = 0; } hipMemcpy(d_a, a, size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, b, size*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_c, c, size*sizeof(float), hipMemcpyHostToDevice); int count = 0; baseTime = curTime = time(NULL); while(curTime < baseTime + elapsed_time) { count++; hipDeviceSynchronize(); hipLaunchKernelGGL(( vectorMult), dim3((size+511)/512), dim3(512) , 0, 0, d_a, d_b, d_c, size); curTime = time(NULL); } hipMemcpy(c, d_c, size*sizeof(float), hipMemcpyDeviceToHost); free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); printf("Test Complete\n"); return 0; }
236a895c1f9d41e30da2f494bf0405ce5d3e9e59.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <unistd.h> #include <ctype.h> __global__ void vectorMult(float *a, float *b, float *c, int n) { int i = blockIdx.x * blockDim.x + threadIdx.x; while (i < n) { c[i] = a[i] * b[i]; i+= blockDim.x * gridDim.x; } } int main(int argc, char **argv) { float *a, *b, *c; float *d_a, *d_b, *d_c; int i; int size = 100000; int elapsed_time = 10; int option; while ((option = getopt (argc, argv, "s:t:")) != -1) { switch (option) { case 's': size = atoi(optarg); break; case 't': elapsed_time = atoi(optarg); break; case '?': if (optopt == 's' || optopt == 't') fprintf (stderr, "Option -%c requires an argument.\n", optopt); else if (isprint (optopt)) fprintf (stderr, "Unknown option `-%c'.\n", optopt); else fprintf (stderr, "Unknown option character `\\x%x'.\n", optopt); return 1; default: abort (); } } time_t curTime, baseTime; a = (float*)malloc(size*sizeof(float)); b = (float*)malloc(size*sizeof(float)); c = (float*)malloc(size*sizeof(float)); cudaMalloc(&d_a, size*sizeof(float)); cudaMalloc(&d_b, size*sizeof(float)); cudaMalloc(&d_c, size*sizeof(float)); for(i = 0; i < size; i++) { a[i] = b[i] = (float)i; c[i] = 0; } cudaMemcpy(d_a, a, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_c, c, size*sizeof(float), cudaMemcpyHostToDevice); int count = 0; baseTime = curTime = time(NULL); while(curTime < baseTime + elapsed_time) { count++; cudaDeviceSynchronize(); vectorMult<<< (size+511)/512, 512 >>>(d_a, d_b, d_c, size); curTime = time(NULL); } cudaMemcpy(c, d_c, size*sizeof(float), cudaMemcpyDeviceToHost); free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); printf("Test Complete\n"); return 0; }
85074e503d0c34781fcd7e36e47595cd527647b0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialAveragePooling.cu" #else #include "../common.h" static inline void THNN_(SpatialAveragePooling_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, int kH, int kW, int dH, int dW, int padH, int padW, bool ceil_mode) { THArgCheck(kW > 0 && kH > 0, 5, "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); THArgCheck(dW > 0 && dH > 0, 8, "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); int ndim = input->nDimension; int dimf = 0; int dimh = 1; int dimw = 2; if (ndim == 4) { dimf++; dimh++; dimw++; } THCUNN_argCheck(state, ndim == 3 || ndim == 4, 2, input, "3D or 4D input tensor expected but got: %s"); THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size, but got " "padW = %d, padH = %d, kW = %d, kH = %d", padW, padH, kW, kH); int64_t nInputPlane = input->size[dimh-1]; int64_t nInputRows = input->size[dimh]; int64_t nInputCols = input->size[dimw]; int64_t nOutputRows, nOutputCols; int64_t nOutputPlane = nInputPlane; if(ceil_mode) { nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } else { nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((nOutputRows - 1)*dH >= nInputRows + padH) --nOutputRows; if ((nOutputCols - 1)*dW >= nInputCols + padW) --nOutputCols; } if (nOutputCols < 1 || nOutputRows < 1) THError("Given input size: (%dx%dx%d). " "Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols); if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane); THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows); THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols); } } void THNN_(SpatialAveragePooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad) { THCUNN_assertSameGPU(state, 2, input, output); THNN_(SpatialAveragePooling_shapeCheck) (state, input, NULL, kH, kW, dH, dW, padH, padW, ceil_mode); int64_t nInputCols, nInputRows, nInputPlane, batchSize; int64_t nOutputCols, nOutputRows; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } if(ceil_mode) { nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } else { nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((nOutputRows - 1)*dH >= nInputRows + padH) --nOutputRows; if ((nOutputCols - 1)*dW >= nInputCols + padW) --nOutputCols; } input = THCTensor_(newContiguous)(state, input); real* input_data = THCTensor_(data)(state, input); THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols); real* output_data = THCTensor_(data)(state, output); int count = THCTensor_(nElement)(state, output); if(count_include_pad) hipLaunchKernelGGL(( AvePoolForward<real, accreal, true>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, input_data, batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, output_data); else hipLaunchKernelGGL(( AvePoolForward<real, accreal, false>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, input_data, batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, output_data); THCudaCheck(hipGetLastError()); if(input->nDimension == 3) THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols); THCTensor_(free)(state, input); } void THNN_(SpatialAveragePooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad) { THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); THNN_(SpatialAveragePooling_shapeCheck) (state, input, gradOutput, kH, kW, dH, dW, padH, padW, ceil_mode); input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); int64_t nInputCols, nInputRows, nInputPlane, batchSize; int64_t nOutputCols, nOutputRows; int dimCol = 2; int dimRow = 1; if (input->nDimension == 3) { nInputPlane = input->size[0]; batchSize = 1; } else { dimCol = 3; dimRow = 2; nInputPlane = input->size[1]; batchSize = input->size[0]; } nInputCols = input->size[dimCol]; nInputRows = input->size[dimRow]; if(ceil_mode) { nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } else { nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((nOutputRows - 1)*dH >= nInputRows + padH) --nOutputRows; if ((nOutputCols - 1)*dW >= nInputCols + padW) --nOutputCols; } THCUNN_check_dim_size(state, gradOutput, input->nDimension, dimRow, nOutputRows); THCUNN_check_dim_size(state, gradOutput, input->nDimension, dimCol, nOutputCols); THCTensor_(resizeAs)(state, gradInput, input); int count = THCTensor_(nElement)(state, input); if(count_include_pad) hipLaunchKernelGGL(( AvePoolBackward<real, accreal, true>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, THCTensor_(data)(state, gradOutput), batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, THCTensor_(data)(state, gradInput)); else hipLaunchKernelGGL(( AvePoolBackward<real, accreal, false>) , dim3(GET_BLOCKS(count)), dim3(CUDA_NUM_THREADS), 0, THCState_getCurrentStream(state) , count, THCTensor_(data)(state, gradOutput), batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, THCTensor_(data)(state, gradInput)); THCudaCheck(hipGetLastError()); // clean THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); } #endif
85074e503d0c34781fcd7e36e47595cd527647b0.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/SpatialAveragePooling.cu" #else #include "../common.h" static inline void THNN_(SpatialAveragePooling_shapeCheck)( THCState *state, THCTensor *input, THCTensor *gradOutput, int kH, int kW, int dH, int dW, int padH, int padW, bool ceil_mode) { THArgCheck(kW > 0 && kH > 0, 5, "kernel size should be greater than zero, but got kH: %d kW: %d", kH, kW); THArgCheck(dW > 0 && dH > 0, 8, "stride should be greater than zero, but got dH: %d dW: %d", dH, dW); int ndim = input->nDimension; int dimf = 0; int dimh = 1; int dimw = 2; if (ndim == 4) { dimf++; dimh++; dimw++; } THCUNN_argCheck(state, ndim == 3 || ndim == 4, 2, input, "3D or 4D input tensor expected but got: %s"); THArgCheck(kW/2 >= padW && kH/2 >= padH, 2, "pad should be smaller than half of kernel size, but got " "padW = %d, padH = %d, kW = %d, kH = %d", padW, padH, kW, kH); int64_t nInputPlane = input->size[dimh-1]; int64_t nInputRows = input->size[dimh]; int64_t nInputCols = input->size[dimw]; int64_t nOutputRows, nOutputCols; int64_t nOutputPlane = nInputPlane; if(ceil_mode) { nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } else { nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((nOutputRows - 1)*dH >= nInputRows + padH) --nOutputRows; if ((nOutputCols - 1)*dW >= nInputCols + padW) --nOutputCols; } if (nOutputCols < 1 || nOutputRows < 1) THError("Given input size: (%dx%dx%d). " "Calculated output size: (%dx%dx%d). Output size is too small", nInputPlane,nInputRows,nInputCols,nInputPlane,nOutputRows,nOutputCols); if (gradOutput != NULL) { THCUNN_check_dim_size(state, gradOutput, ndim, dimf, nOutputPlane); THCUNN_check_dim_size(state, gradOutput, ndim, dimh, nOutputRows); THCUNN_check_dim_size(state, gradOutput, ndim, dimw, nOutputCols); } } void THNN_(SpatialAveragePooling_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad) { THCUNN_assertSameGPU(state, 2, input, output); THNN_(SpatialAveragePooling_shapeCheck) (state, input, NULL, kH, kW, dH, dW, padH, padW, ceil_mode); int64_t nInputCols, nInputRows, nInputPlane, batchSize; int64_t nOutputCols, nOutputRows; if (input->nDimension == 3) { nInputCols = input->size[2]; nInputRows = input->size[1]; nInputPlane = input->size[0]; batchSize = 1; } else { nInputCols = input->size[3]; nInputRows = input->size[2]; nInputPlane = input->size[1]; batchSize = input->size[0]; } if(ceil_mode) { nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } else { nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((nOutputRows - 1)*dH >= nInputRows + padH) --nOutputRows; if ((nOutputCols - 1)*dW >= nInputCols + padW) --nOutputCols; } input = THCTensor_(newContiguous)(state, input); real* input_data = THCTensor_(data)(state, input); THCTensor_(resize4d)(state, output, batchSize, nInputPlane, nOutputRows, nOutputCols); real* output_data = THCTensor_(data)(state, output); int count = THCTensor_(nElement)(state, output); if(count_include_pad) AvePoolForward<real, accreal, true> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>( count, input_data, batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, output_data); else AvePoolForward<real, accreal, false> <<<GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>>( count, input_data, batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, output_data); THCudaCheck(cudaGetLastError()); if(input->nDimension == 3) THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols); THCTensor_(free)(state, input); } void THNN_(SpatialAveragePooling_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, int kW, int kH, int dW, int dH, int padW, int padH, bool ceil_mode, bool count_include_pad) { THCUNN_assertSameGPU(state, 3, input, gradOutput, gradInput); THNN_(SpatialAveragePooling_shapeCheck) (state, input, gradOutput, kH, kW, dH, dW, padH, padW, ceil_mode); input = THCTensor_(newContiguous)(state, input); gradOutput = THCTensor_(newContiguous)(state, gradOutput); int64_t nInputCols, nInputRows, nInputPlane, batchSize; int64_t nOutputCols, nOutputRows; int dimCol = 2; int dimRow = 1; if (input->nDimension == 3) { nInputPlane = input->size[0]; batchSize = 1; } else { dimCol = 3; dimRow = 2; nInputPlane = input->size[1]; batchSize = input->size[0]; } nInputCols = input->size[dimCol]; nInputRows = input->size[dimRow]; if(ceil_mode) { nOutputCols = ceil(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = ceil(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } else { nOutputCols = floor(float(nInputCols - kW + 2*padW) / float(dW)) + 1; nOutputRows = floor(float(nInputRows - kH + 2*padH) / float(dH)) + 1; } if (padW || padH) { // ensure that the last pooling starts inside the image // needed to avoid problems in ceil mode if ((nOutputRows - 1)*dH >= nInputRows + padH) --nOutputRows; if ((nOutputCols - 1)*dW >= nInputCols + padW) --nOutputCols; } THCUNN_check_dim_size(state, gradOutput, input->nDimension, dimRow, nOutputRows); THCUNN_check_dim_size(state, gradOutput, input->nDimension, dimCol, nOutputCols); THCTensor_(resizeAs)(state, gradInput, input); int count = THCTensor_(nElement)(state, input); if(count_include_pad) AvePoolBackward<real, accreal, true> <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCTensor_(data)(state, gradOutput), batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, THCTensor_(data)(state, gradInput)); else AvePoolBackward<real, accreal, false> <<< GET_BLOCKS(count), CUDA_NUM_THREADS, 0, THCState_getCurrentStream(state) >>> (count, THCTensor_(data)(state, gradOutput), batchSize, nInputPlane, nInputRows, nInputCols, nOutputRows, nOutputCols, kH, kW, dH, dW, padH, padW, THCTensor_(data)(state, gradInput)); THCudaCheck(cudaGetLastError()); // clean THCTensor_(free)(state, input); THCTensor_(free)(state, gradOutput); } #endif
025afe860995cf9edbeda806fbff1a4f9c70c985.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <hipcub/hipcub.hpp> #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/softmax_with_cross_entropy_op.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; namespace { template <typename T> __global__ void CrossEntropyGrad(T* logit_grad, const int64_t* labels, const int n, const int d, const int remain, const int ignore_index) { CUDA_KERNEL_LOOP(index, n * remain) { int idx_n = index / remain; int idx_remain = index % remain; int tmp = labels[index]; if (ignore_index != tmp) { int idx = idx_n * d + tmp * remain + idx_remain; logit_grad[idx] -= static_cast<T>(1.); } } } template <typename T> __global__ void Scale(T* logit_grad, const T* loss_grad, const int num, const int d, const int remain, const int64_t* labels, const int ignore_index) { CUDA_KERNEL_LOOP(index, num) { int idx_n = index / d; int idx_remain = index % remain; int idx_lbl = idx_n * remain + idx_remain; if (labels[idx_lbl] == ignore_index) { logit_grad[index] = static_cast<T>(0.); } else { logit_grad[index] *= loss_grad[idx_lbl]; } } } template <typename T> __global__ void SoftCrossEntropyGradientKernel(T* logit_grad, const T* loss_grad, const T* labels, const int n, const int d, const int remain) { int ids = blockIdx.x * blockDim.x + threadIdx.x; if (ids < n * d) { int idx_n = ids / d; int idx_remain = ids % remain; int idx_loss = idx_n * remain + idx_remain; logit_grad[ids] = loss_grad[idx_loss] * (logit_grad[ids] - labels[ids]); } } } // namespace static __device__ __forceinline__ platform::float16 exp_on_device( platform::float16 x) { return ::Eigen::numext::exp(x); } static __device__ __forceinline__ float exp_on_device(float x) { return expf(x); } static __device__ __forceinline__ double exp_on_device(double x) { return exp(x); } static __device__ __forceinline__ platform::float16 log_on_device( platform::float16 x) { return math::TolerableValue<platform::float16>()(::Eigen::numext::log(x)); } static __device__ __forceinline__ float log_on_device(float x) { return math::TolerableValue<float>()(logf(x)); } static __device__ __forceinline__ double log_on_device(double x) { return math::TolerableValue<double>()(log(x)); } /** In the following codes, 3 CUDA kernels are implemented to calculate softmax * and loss **/ /* Supposing the x is `logits` and y is `labels`, the equations are as followings: cross\_entropy_i = \sum_{j}[- y_i_j * log({e^{x_i_j}/\sum_{j}e^{x_i_j}})] = \sum_{j}[- y_i_j * log({e^{x_i_j - max_i}/\sum_{j}e^{x_i_j-max_i}})] = \sum_{j}[-y_i_j * (x_i_j - max_i - log\sum_{j}e^{x_i_j - max_i})] = \sum_{j}[-y_i_j * (x_i_j - max_i - logDiffMaxSum_i)] = \sum_{j}(-y_i_j * tmp_i_j) softmax_i_j = e^{tmp_i_j} where: max_i = \max_{j}{x_i_j} logDiffMaxSum_i = log\sum_{j}e^{x_i_j - max_i} tmp_i_j = x_i_j - max_i - logDiffMaxSum_i Therefore, the calculation can be separated into 3 steps: Step 1: row-wise operation to calculate max_i Step 2: row-wise operation to calculate logDiffMaxSum_i Step 3: calculate tmp_i_j, and finally get softmax_i_j and cross\_entropy_i To save memory, we can share memory among max_i, logDiffMaxSum_i and cross\_entropy_i. In this way, the 3 steps should be changed to: Step 1 (RowReductionForMax): row-wise operation to calculate max_i Step 2 (RowReductionForDiffMaxSum): calculate immediate result of softmax'_i_j = x_i_j - max_i, and row-wise operation to calculate logDiffMaxSum_i Step 3 (RowReductionForSoftmaxAndCrossEntropy): calculate tmp_i_j = softmax'_i_j - logDiffMaxSum_i, and finally get softmax_i_j and cross\_entropy_i */ // There are 3 kinds of reduce algorithms in cub: // BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY // BLOCK_REDUCE_RAKING // BLOCK_REDUCE_WARP_REDUCTIONS (default) template <typename T, int BlockDim> using BlockReduce = hipcub::BlockReduce<T, BlockDim /*, hipcub::BLOCK_REDUCE_WARP_REDUCTIONS*/>; template <typename T, int BlockDim> using BlockReduceTempStorage = typename BlockReduce<T, BlockDim>::TempStorage; // Make sure that BlockDim <= axis_dim // This kernel is used to calculate the max element of each row template <typename T, int BlockDim> static __global__ void RowReductionForMax(const T* logits_data, T* max_data, int d, int axis_dim) { __shared__ BlockReduceTempStorage<T, BlockDim> temp_storage; // logits_data view as [n, axis_dim, remain] // max_data view as [n, 1, remain] // blockDim = n * remain, split blockIdx to idx_n and idx_remain int remain = d / axis_dim; int idx_n = blockIdx.x / remain; int idx_remain = blockIdx.x % remain; int beg_idx = idx_n * d + threadIdx.x * remain + idx_remain; int end_idx = (idx_n + 1) * d; int step = BlockDim * remain; T cur_max = logits_data[beg_idx]; beg_idx += step; while (beg_idx < end_idx) { if (cur_max < logits_data[beg_idx]) { cur_max = logits_data[beg_idx]; } beg_idx += step; } cur_max = BlockReduce<T, BlockDim>(temp_storage).Reduce(cur_max, hipcub::Max()); if (threadIdx.x == 0) max_data[blockIdx.x] = cur_max; } // Make sure that BlockDim <= axis_dim template <typename T, int BlockDim, bool CalculateLogSoftmax = false> static __global__ void RowReductionForDiffMaxSum(const T* logits_data, T* max_data, T* softmax, int d, int axis_dim) { __shared__ BlockReduceTempStorage<T, BlockDim> temp_storage; // logits, softmax data view as [n, axis_dim, remain] // max_data view as [n, 1, remain] // blockDim = n * remain, split blockIdx to idx_n and idx_remain int remain = d / axis_dim; int idx_n = blockIdx.x / remain; int idx_remain = blockIdx.x % remain; int beg_idx = idx_n * d + threadIdx.x * remain + idx_remain; int end_idx = (idx_n + 1) * d; auto block_max = max_data[blockIdx.x]; int step = BlockDim * remain; // In numeric stable mode softmax_with_loss, we calc loss with // tmp_i_j = x_i_j - max_i - logDiffMaxSum_i, instead of // log(exp(x_i_j - max_i)/DiffMaxSum_i). Therefore, log(0) will not occur. // Also we calc softmax_i_j = e^{tmp_i_j}, the maximum and minimum value will // be 1.0 and 0.0, represent prob is 1.0 and 0.0. // So there is no need to clip on shift_softmax. softmax[beg_idx] = logits_data[beg_idx] - block_max; T diff_max_sum = exp_on_device(softmax[beg_idx]); auto idx = beg_idx + step; while (idx < end_idx) { softmax[idx] = logits_data[idx] - block_max; diff_max_sum += exp_on_device(softmax[idx]); idx += step; } diff_max_sum = BlockReduce<T, BlockDim>(temp_storage).Reduce(diff_max_sum, hipcub::Sum()); if (threadIdx.x == 0) max_data[blockIdx.x] = log_on_device(diff_max_sum); if (!CalculateLogSoftmax) return; __syncthreads(); diff_max_sum = max_data[blockIdx.x]; softmax[beg_idx] -= diff_max_sum; beg_idx += step; while (beg_idx < end_idx) { softmax[beg_idx] -= diff_max_sum; beg_idx += step; } // Note(zhiqiu): since different threads may use max_data[blockIdx.x] to // calculate diff_max_sum, __syncthreads() is needed here. __syncthreads(); if (threadIdx.x == 0) max_data[blockIdx.x] = 0; } // Make sure that BlockDim <= axis_dim template <typename T, int BlockDim> static __global__ void RowReductionForSoftmaxAndCrossEntropy( const T* logits_data, const T* labels_data, T* loss_data, T* softmax, int d, int axis_dim) { __shared__ BlockReduceTempStorage<T, BlockDim> temp_storage; // logits, softmax, labels data view as [n, axis_dim, remain] // loss_data view as [n, 1, remain] // blockDim = n * remain, split blockIdx to idx_n and idx_remain int remain = d / axis_dim; int idx_n = blockIdx.x / remain; int idx_remain = blockIdx.x % remain; int beg_idx = idx_n * d + threadIdx.x * remain + idx_remain; int end_idx = (idx_n + 1) * d; // log_diff_max_sum shares memory with loss auto block_log_diff_max_sum = loss_data[blockIdx.x]; auto tmp = softmax[beg_idx] - block_log_diff_max_sum; softmax[beg_idx] = exp_on_device(tmp); auto loss = -labels_data[beg_idx] * tmp; int step = BlockDim * remain; beg_idx += step; while (beg_idx < end_idx) { tmp = softmax[beg_idx] - block_log_diff_max_sum; softmax[beg_idx] = exp_on_device(tmp); loss -= (labels_data[beg_idx] * tmp); beg_idx += step; } loss = BlockReduce<T, BlockDim>(temp_storage).Reduce(loss, hipcub::Sum()); if (threadIdx.x == 0) loss_data[blockIdx.x] = loss; } template <typename T> struct HardLabelSoftmaxWithCrossEntropyFunctor { public: HardLabelSoftmaxWithCrossEntropyFunctor(const int64_t* labels, T* loss, T* log_softmax, int d, int axis_dim) : labels_(labels), loss_(loss), log_softmax_(log_softmax), d_(d), axis_dim_(axis_dim) {} __device__ void operator()(int idx) const { // logits view as [n, axis_dim, remain], where d = axis_dim * remain int remain = d_ / axis_dim_; int idx_n = idx / d_; int idx_axis = (idx % d_) / remain; int idx_remain = idx % remain; // labels, loss view as [n, remain] int idx_lbl = idx_n * remain + idx_remain; // It also would ignore labels not in range(class_num). if (idx_axis != labels_[idx_lbl]) { log_softmax_[idx] = exp_on_device(log_softmax_[idx]); } else { auto softmax = log_softmax_[idx]; log_softmax_[idx] = exp_on_device(softmax); loss_[idx_lbl] = -softmax; } } private: const int64_t* labels_; T* loss_; T* log_softmax_; int d_; int axis_dim_; }; template <typename T> struct HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx { public: HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx(const int64_t* labels, T* loss, T* log_softmax, int d, int axis_dim, int ignore_idx) : labels_(labels), loss_(loss), log_softmax_(log_softmax), d_(d), axis_dim_(axis_dim), ignore_idx_(ignore_idx) {} __device__ void operator()(int idx) const { // logits view as [n, axis_dim, remain], where d = axis_dim * remain int remain = d_ / axis_dim_; int idx_n = idx / d_; int idx_axis = (idx % d_) / remain; int idx_remain = idx % remain; // labels, loss view as [n, remain] int idx_lbl = idx_n * remain + idx_remain; if (idx_axis != labels_[idx_lbl] || idx_axis == ignore_idx_) { log_softmax_[idx] = exp_on_device(log_softmax_[idx]); } else { auto softmax = log_softmax_[idx]; log_softmax_[idx] = exp_on_device(softmax); loss_[idx_lbl] = -softmax; } } private: const int64_t* labels_; T* loss_; T* log_softmax_; int d_; int axis_dim_; int ignore_idx_; }; template <typename T> static void HardLabelSoftmaxWithCrossEntropy( const platform::CUDADeviceContext& ctx, const T* logits_data, const int64_t* labels_data, T* loss_data, T* softmax_data, int n, int d, int axis_dim, int ignore_idx) { constexpr int kMaxBlockDim = 512; int block_dim = axis_dim >= kMaxBlockDim ? kMaxBlockDim : (1 << static_cast<int>(std::log2(axis_dim))); int grid_dim = n * d / axis_dim; auto stream = ctx.stream(); #define CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(BlockDim) \ case BlockDim: { \ hipLaunchKernelGGL(( RowReductionForMax<T, BlockDim>), dim3(grid_dim), dim3(BlockDim), 0, stream, \ logits_data, loss_data, d, axis_dim); \ hipLaunchKernelGGL(( RowReductionForDiffMaxSum<T, BlockDim, \ true>), dim3(grid_dim), dim3(BlockDim), 0, stream, \ logits_data, loss_data, softmax_data, d, axis_dim); \ platform::ForRange<platform::CUDADeviceContext> for_range(ctx, n* d); \ if (ignore_idx >= 0 && ignore_idx < axis_dim) { \ for_range(HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx<T>( \ labels_data, loss_data, softmax_data, d, axis_dim, ignore_idx)); \ } else { \ for_range(HardLabelSoftmaxWithCrossEntropyFunctor<T>( \ labels_data, loss_data, softmax_data, d, axis_dim)); \ } \ } break switch (block_dim) { CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(512); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(256); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(128); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(64); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(32); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(16); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(8); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(4); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(2); default: PADDLE_THROW(platform::errors::Unavailable( "Block Dimension must be 2^n in softmax_with_cross_entropy_op.")); break; } #undef CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL } template <typename T> static void SoftmaxWithCrossEntropyFusedKernel(const T* logits_data, const T* labels_data, T* softmax_data, T* loss_data, int n, int d, int axis_dim, hipStream_t stream) { constexpr int kMaxBlockDim = 512; int block_dim = axis_dim >= kMaxBlockDim ? kMaxBlockDim : (1 << static_cast<int>(std::log2(axis_dim))); int grid_dim = n * d / axis_dim; #define CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(BlockDim) \ case BlockDim: \ hipLaunchKernelGGL(( RowReductionForMax<T, BlockDim>), dim3(grid_dim), dim3(BlockDim), 0, stream, \ logits_data, loss_data, d, axis_dim); \ hipLaunchKernelGGL(( RowReductionForDiffMaxSum<T, BlockDim>), dim3(grid_dim), dim3(BlockDim), 0, stream, \ logits_data, loss_data, softmax_data, d, axis_dim); \ hipLaunchKernelGGL(( RowReductionForSoftmaxAndCrossEntropy< \ T, BlockDim>), dim3(grid_dim), dim3(BlockDim), 0, stream, \ logits_data, labels_data, loss_data, softmax_data, d, axis_dim); \ break switch (block_dim) { CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(512); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(256); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(128); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(64); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(32); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(16); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(8); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(4); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(2); default: PADDLE_THROW(platform::errors::Unavailable( "Block Dimension must be 2^n in softmax_with_cross_entropy_op.")); break; } #undef CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL } template <typename T> class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::Unavailable("softmax_with_cross_entropy operator's " "CUDA kernel only runs on GPU device.")); const Tensor* logits = context.Input<Tensor>("Logits"); const Tensor* labels = context.Input<Tensor>("Label"); Tensor* softmax = context.Output<Tensor>("Softmax"); Tensor* loss = context.Output<Tensor>("Loss"); const int rank = logits->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int axis_dim = logits->dims()[axis]; const int n = SizeToAxis(axis, logits->dims()); const int d = SizeFromAxis(axis, logits->dims()); auto* softmax_data = softmax->mutable_data<T>(context.GetPlace()); auto* loss_data = loss->mutable_data<T>(context.GetPlace()); if (axis_dim == 1) { math::SetConstant<platform::CUDADeviceContext, T> set_constant; set_constant(context.cuda_device_context(), softmax, static_cast<T>(1)); set_constant(context.cuda_device_context(), loss, static_cast<T>(0)); return; } auto soft_label = context.Attr<bool>("soft_label"); auto ignore_index = context.Attr<int>("ignore_index"); if (soft_label) { auto* logits_data = logits->data<T>(); auto* labels_data = labels->data<T>(); SoftmaxWithCrossEntropyFusedKernel( logits_data, labels_data, softmax_data, loss_data, n, d, axis_dim, context.cuda_device_context().stream()); } else { if (!context.Attr<bool>("numeric_stable_mode")) { // CUDNN kernel only suppoer 2-D tensor and perfome softmax on last dim Tensor logits_2d, softmax_2d, labels_2d, loss_2d; logits_2d.ShareDataWith(*logits).Resize({n, d}); softmax_2d.ShareDataWith(*softmax).Resize({n, d}); labels_2d.ShareDataWith(*labels).Resize({n, labels->numel() / n}); loss_2d.ShareDataWith(*loss).Resize({n, 1}); math::SoftmaxCUDNNFunctor<T>()(context.cuda_device_context(), &logits_2d, &softmax_2d); math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()( context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d, false, ignore_index, axis_dim); } else { auto* logits_data = logits->data<T>(); auto* labels_data = labels->data<int64_t>(); HardLabelSoftmaxWithCrossEntropy<T>( context.cuda_device_context(), logits_data, labels_data, loss_data, softmax_data, n, d, axis_dim, ignore_index); } } } }; template <typename T> class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::Unavailable("softmax_with_cross_entropy operator's " "CUDA kernel only runs on GPU device.")); const Tensor* labels = context.Input<Tensor>("Label"); const T* loss_grad_data = context.Input<Tensor>(framework::GradVarName("Loss"))->data<T>(); Tensor* logit_grad = context.Output<Tensor>(framework::GradVarName("Logits")); const Tensor* softmax = context.Input<Tensor>("Softmax"); if (logit_grad != softmax) { framework::TensorCopy(*softmax, context.GetPlace(), context.device_context(), logit_grad); } T* logit_grad_data = logit_grad->data<T>(); const int rank = logit_grad->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int axis_dim = logit_grad->dims()[axis]; const int n = SizeToAxis(axis, logit_grad->dims()); const int d = SizeFromAxis(axis, logit_grad->dims()); const int remain = d / axis_dim; int block = 512; auto stream = context.cuda_device_context().stream(); auto ignore_index = context.Attr<int>("ignore_index"); if (context.Attr<bool>("soft_label")) { int grid = (n * d + block - 1) / block; const T* label_data = labels->data<T>(); hipLaunchKernelGGL(( SoftCrossEntropyGradientKernel<T>), dim3(grid), dim3(block), 0, stream, logit_grad_data, loss_grad_data, label_data, n, d, remain); } else { int grid = (n * remain + block - 1) / block; const int64_t* label_data = labels->data<int64_t>(); hipLaunchKernelGGL(( CrossEntropyGrad<T>), dim3(grid), dim3(block), 0, stream, logit_grad_data, label_data, n, d, remain, ignore_index); int num = n * d; grid = (num + block - 1) / block; hipLaunchKernelGGL(( Scale<T>), dim3(grid), dim3(block), 0, stream, logit_grad_data, loss_grad_data, num, d, remain, label_data, ignore_index); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>, ops::SoftmaxWithCrossEntropyCUDAKernel<paddle::platform::float16>, ops::SoftmaxWithCrossEntropyCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy_grad, ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<double>);
025afe860995cf9edbeda806fbff1a4f9c70c985.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cub/cub.cuh> #include "paddle/fluid/operators/math/cross_entropy.h" #include "paddle/fluid/operators/math/math_function.h" #include "paddle/fluid/operators/softmax_with_cross_entropy_op.h" #include "paddle/fluid/platform/for_range.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; namespace { template <typename T> __global__ void CrossEntropyGrad(T* logit_grad, const int64_t* labels, const int n, const int d, const int remain, const int ignore_index) { CUDA_KERNEL_LOOP(index, n * remain) { int idx_n = index / remain; int idx_remain = index % remain; int tmp = labels[index]; if (ignore_index != tmp) { int idx = idx_n * d + tmp * remain + idx_remain; logit_grad[idx] -= static_cast<T>(1.); } } } template <typename T> __global__ void Scale(T* logit_grad, const T* loss_grad, const int num, const int d, const int remain, const int64_t* labels, const int ignore_index) { CUDA_KERNEL_LOOP(index, num) { int idx_n = index / d; int idx_remain = index % remain; int idx_lbl = idx_n * remain + idx_remain; if (labels[idx_lbl] == ignore_index) { logit_grad[index] = static_cast<T>(0.); } else { logit_grad[index] *= loss_grad[idx_lbl]; } } } template <typename T> __global__ void SoftCrossEntropyGradientKernel(T* logit_grad, const T* loss_grad, const T* labels, const int n, const int d, const int remain) { int ids = blockIdx.x * blockDim.x + threadIdx.x; if (ids < n * d) { int idx_n = ids / d; int idx_remain = ids % remain; int idx_loss = idx_n * remain + idx_remain; logit_grad[ids] = loss_grad[idx_loss] * (logit_grad[ids] - labels[ids]); } } } // namespace static __device__ __forceinline__ platform::float16 exp_on_device( platform::float16 x) { return ::Eigen::numext::exp(x); } static __device__ __forceinline__ float exp_on_device(float x) { return expf(x); } static __device__ __forceinline__ double exp_on_device(double x) { return exp(x); } static __device__ __forceinline__ platform::float16 log_on_device( platform::float16 x) { return math::TolerableValue<platform::float16>()(::Eigen::numext::log(x)); } static __device__ __forceinline__ float log_on_device(float x) { return math::TolerableValue<float>()(logf(x)); } static __device__ __forceinline__ double log_on_device(double x) { return math::TolerableValue<double>()(log(x)); } /** In the following codes, 3 CUDA kernels are implemented to calculate softmax * and loss **/ /* Supposing the x is `logits` and y is `labels`, the equations are as followings: cross\_entropy_i = \sum_{j}[- y_i_j * log({e^{x_i_j}/\sum_{j}e^{x_i_j}})] = \sum_{j}[- y_i_j * log({e^{x_i_j - max_i}/\sum_{j}e^{x_i_j-max_i}})] = \sum_{j}[-y_i_j * (x_i_j - max_i - log\sum_{j}e^{x_i_j - max_i})] = \sum_{j}[-y_i_j * (x_i_j - max_i - logDiffMaxSum_i)] = \sum_{j}(-y_i_j * tmp_i_j) softmax_i_j = e^{tmp_i_j} where: max_i = \max_{j}{x_i_j} logDiffMaxSum_i = log\sum_{j}e^{x_i_j - max_i} tmp_i_j = x_i_j - max_i - logDiffMaxSum_i Therefore, the calculation can be separated into 3 steps: Step 1: row-wise operation to calculate max_i Step 2: row-wise operation to calculate logDiffMaxSum_i Step 3: calculate tmp_i_j, and finally get softmax_i_j and cross\_entropy_i To save memory, we can share memory among max_i, logDiffMaxSum_i and cross\_entropy_i. In this way, the 3 steps should be changed to: Step 1 (RowReductionForMax): row-wise operation to calculate max_i Step 2 (RowReductionForDiffMaxSum): calculate immediate result of softmax'_i_j = x_i_j - max_i, and row-wise operation to calculate logDiffMaxSum_i Step 3 (RowReductionForSoftmaxAndCrossEntropy): calculate tmp_i_j = softmax'_i_j - logDiffMaxSum_i, and finally get softmax_i_j and cross\_entropy_i */ // There are 3 kinds of reduce algorithms in cub: // BLOCK_REDUCE_RAKING_COMMUTATIVE_ONLY // BLOCK_REDUCE_RAKING // BLOCK_REDUCE_WARP_REDUCTIONS (default) template <typename T, int BlockDim> using BlockReduce = cub::BlockReduce<T, BlockDim /*, cub::BLOCK_REDUCE_WARP_REDUCTIONS*/>; template <typename T, int BlockDim> using BlockReduceTempStorage = typename BlockReduce<T, BlockDim>::TempStorage; // Make sure that BlockDim <= axis_dim // This kernel is used to calculate the max element of each row template <typename T, int BlockDim> static __global__ void RowReductionForMax(const T* logits_data, T* max_data, int d, int axis_dim) { __shared__ BlockReduceTempStorage<T, BlockDim> temp_storage; // logits_data view as [n, axis_dim, remain] // max_data view as [n, 1, remain] // blockDim = n * remain, split blockIdx to idx_n and idx_remain int remain = d / axis_dim; int idx_n = blockIdx.x / remain; int idx_remain = blockIdx.x % remain; int beg_idx = idx_n * d + threadIdx.x * remain + idx_remain; int end_idx = (idx_n + 1) * d; int step = BlockDim * remain; T cur_max = logits_data[beg_idx]; beg_idx += step; while (beg_idx < end_idx) { if (cur_max < logits_data[beg_idx]) { cur_max = logits_data[beg_idx]; } beg_idx += step; } cur_max = BlockReduce<T, BlockDim>(temp_storage).Reduce(cur_max, cub::Max()); if (threadIdx.x == 0) max_data[blockIdx.x] = cur_max; } // Make sure that BlockDim <= axis_dim template <typename T, int BlockDim, bool CalculateLogSoftmax = false> static __global__ void RowReductionForDiffMaxSum(const T* logits_data, T* max_data, T* softmax, int d, int axis_dim) { __shared__ BlockReduceTempStorage<T, BlockDim> temp_storage; // logits, softmax data view as [n, axis_dim, remain] // max_data view as [n, 1, remain] // blockDim = n * remain, split blockIdx to idx_n and idx_remain int remain = d / axis_dim; int idx_n = blockIdx.x / remain; int idx_remain = blockIdx.x % remain; int beg_idx = idx_n * d + threadIdx.x * remain + idx_remain; int end_idx = (idx_n + 1) * d; auto block_max = max_data[blockIdx.x]; int step = BlockDim * remain; // In numeric stable mode softmax_with_loss, we calc loss with // tmp_i_j = x_i_j - max_i - logDiffMaxSum_i, instead of // log(exp(x_i_j - max_i)/DiffMaxSum_i). Therefore, log(0) will not occur. // Also we calc softmax_i_j = e^{tmp_i_j}, the maximum and minimum value will // be 1.0 and 0.0, represent prob is 1.0 and 0.0. // So there is no need to clip on shift_softmax. softmax[beg_idx] = logits_data[beg_idx] - block_max; T diff_max_sum = exp_on_device(softmax[beg_idx]); auto idx = beg_idx + step; while (idx < end_idx) { softmax[idx] = logits_data[idx] - block_max; diff_max_sum += exp_on_device(softmax[idx]); idx += step; } diff_max_sum = BlockReduce<T, BlockDim>(temp_storage).Reduce(diff_max_sum, cub::Sum()); if (threadIdx.x == 0) max_data[blockIdx.x] = log_on_device(diff_max_sum); if (!CalculateLogSoftmax) return; __syncthreads(); diff_max_sum = max_data[blockIdx.x]; softmax[beg_idx] -= diff_max_sum; beg_idx += step; while (beg_idx < end_idx) { softmax[beg_idx] -= diff_max_sum; beg_idx += step; } // Note(zhiqiu): since different threads may use max_data[blockIdx.x] to // calculate diff_max_sum, __syncthreads() is needed here. __syncthreads(); if (threadIdx.x == 0) max_data[blockIdx.x] = 0; } // Make sure that BlockDim <= axis_dim template <typename T, int BlockDim> static __global__ void RowReductionForSoftmaxAndCrossEntropy( const T* logits_data, const T* labels_data, T* loss_data, T* softmax, int d, int axis_dim) { __shared__ BlockReduceTempStorage<T, BlockDim> temp_storage; // logits, softmax, labels data view as [n, axis_dim, remain] // loss_data view as [n, 1, remain] // blockDim = n * remain, split blockIdx to idx_n and idx_remain int remain = d / axis_dim; int idx_n = blockIdx.x / remain; int idx_remain = blockIdx.x % remain; int beg_idx = idx_n * d + threadIdx.x * remain + idx_remain; int end_idx = (idx_n + 1) * d; // log_diff_max_sum shares memory with loss auto block_log_diff_max_sum = loss_data[blockIdx.x]; auto tmp = softmax[beg_idx] - block_log_diff_max_sum; softmax[beg_idx] = exp_on_device(tmp); auto loss = -labels_data[beg_idx] * tmp; int step = BlockDim * remain; beg_idx += step; while (beg_idx < end_idx) { tmp = softmax[beg_idx] - block_log_diff_max_sum; softmax[beg_idx] = exp_on_device(tmp); loss -= (labels_data[beg_idx] * tmp); beg_idx += step; } loss = BlockReduce<T, BlockDim>(temp_storage).Reduce(loss, cub::Sum()); if (threadIdx.x == 0) loss_data[blockIdx.x] = loss; } template <typename T> struct HardLabelSoftmaxWithCrossEntropyFunctor { public: HardLabelSoftmaxWithCrossEntropyFunctor(const int64_t* labels, T* loss, T* log_softmax, int d, int axis_dim) : labels_(labels), loss_(loss), log_softmax_(log_softmax), d_(d), axis_dim_(axis_dim) {} __device__ void operator()(int idx) const { // logits view as [n, axis_dim, remain], where d = axis_dim * remain int remain = d_ / axis_dim_; int idx_n = idx / d_; int idx_axis = (idx % d_) / remain; int idx_remain = idx % remain; // labels, loss view as [n, remain] int idx_lbl = idx_n * remain + idx_remain; // It also would ignore labels not in range(class_num). if (idx_axis != labels_[idx_lbl]) { log_softmax_[idx] = exp_on_device(log_softmax_[idx]); } else { auto softmax = log_softmax_[idx]; log_softmax_[idx] = exp_on_device(softmax); loss_[idx_lbl] = -softmax; } } private: const int64_t* labels_; T* loss_; T* log_softmax_; int d_; int axis_dim_; }; template <typename T> struct HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx { public: HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx(const int64_t* labels, T* loss, T* log_softmax, int d, int axis_dim, int ignore_idx) : labels_(labels), loss_(loss), log_softmax_(log_softmax), d_(d), axis_dim_(axis_dim), ignore_idx_(ignore_idx) {} __device__ void operator()(int idx) const { // logits view as [n, axis_dim, remain], where d = axis_dim * remain int remain = d_ / axis_dim_; int idx_n = idx / d_; int idx_axis = (idx % d_) / remain; int idx_remain = idx % remain; // labels, loss view as [n, remain] int idx_lbl = idx_n * remain + idx_remain; if (idx_axis != labels_[idx_lbl] || idx_axis == ignore_idx_) { log_softmax_[idx] = exp_on_device(log_softmax_[idx]); } else { auto softmax = log_softmax_[idx]; log_softmax_[idx] = exp_on_device(softmax); loss_[idx_lbl] = -softmax; } } private: const int64_t* labels_; T* loss_; T* log_softmax_; int d_; int axis_dim_; int ignore_idx_; }; template <typename T> static void HardLabelSoftmaxWithCrossEntropy( const platform::CUDADeviceContext& ctx, const T* logits_data, const int64_t* labels_data, T* loss_data, T* softmax_data, int n, int d, int axis_dim, int ignore_idx) { constexpr int kMaxBlockDim = 512; int block_dim = axis_dim >= kMaxBlockDim ? kMaxBlockDim : (1 << static_cast<int>(std::log2(axis_dim))); int grid_dim = n * d / axis_dim; auto stream = ctx.stream(); #define CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(BlockDim) \ case BlockDim: { \ RowReductionForMax<T, BlockDim><<<grid_dim, BlockDim, 0, stream>>>( \ logits_data, loss_data, d, axis_dim); \ RowReductionForDiffMaxSum<T, BlockDim, \ true><<<grid_dim, BlockDim, 0, stream>>>( \ logits_data, loss_data, softmax_data, d, axis_dim); \ platform::ForRange<platform::CUDADeviceContext> for_range(ctx, n* d); \ if (ignore_idx >= 0 && ignore_idx < axis_dim) { \ for_range(HardLabelSoftmaxWithCrossEntropyFunctorWithIgnoreIdx<T>( \ labels_data, loss_data, softmax_data, d, axis_dim, ignore_idx)); \ } else { \ for_range(HardLabelSoftmaxWithCrossEntropyFunctor<T>( \ labels_data, loss_data, softmax_data, d, axis_dim)); \ } \ } break switch (block_dim) { CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(512); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(256); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(128); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(64); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(32); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(16); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(8); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(4); CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(2); default: PADDLE_THROW(platform::errors::Unavailable( "Block Dimension must be 2^n in softmax_with_cross_entropy_op.")); break; } #undef CALL_HARD_LABEL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL } template <typename T> static void SoftmaxWithCrossEntropyFusedKernel(const T* logits_data, const T* labels_data, T* softmax_data, T* loss_data, int n, int d, int axis_dim, cudaStream_t stream) { constexpr int kMaxBlockDim = 512; int block_dim = axis_dim >= kMaxBlockDim ? kMaxBlockDim : (1 << static_cast<int>(std::log2(axis_dim))); int grid_dim = n * d / axis_dim; #define CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(BlockDim) \ case BlockDim: \ RowReductionForMax<T, BlockDim><<<grid_dim, BlockDim, 0, stream>>>( \ logits_data, loss_data, d, axis_dim); \ RowReductionForDiffMaxSum<T, BlockDim><<<grid_dim, BlockDim, 0, stream>>>( \ logits_data, loss_data, softmax_data, d, axis_dim); \ RowReductionForSoftmaxAndCrossEntropy< \ T, BlockDim><<<grid_dim, BlockDim, 0, stream>>>( \ logits_data, labels_data, loss_data, softmax_data, d, axis_dim); \ break switch (block_dim) { CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(512); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(256); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(128); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(64); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(32); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(16); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(8); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(4); CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL(2); default: PADDLE_THROW(platform::errors::Unavailable( "Block Dimension must be 2^n in softmax_with_cross_entropy_op.")); break; } #undef CALL_SOFTMAX_WITH_CROSS_ENTROPY_FUSED_KERNEL } template <typename T> class SoftmaxWithCrossEntropyCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::Unavailable("softmax_with_cross_entropy operator's " "CUDA kernel only runs on GPU device.")); const Tensor* logits = context.Input<Tensor>("Logits"); const Tensor* labels = context.Input<Tensor>("Label"); Tensor* softmax = context.Output<Tensor>("Softmax"); Tensor* loss = context.Output<Tensor>("Loss"); const int rank = logits->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int axis_dim = logits->dims()[axis]; const int n = SizeToAxis(axis, logits->dims()); const int d = SizeFromAxis(axis, logits->dims()); auto* softmax_data = softmax->mutable_data<T>(context.GetPlace()); auto* loss_data = loss->mutable_data<T>(context.GetPlace()); if (axis_dim == 1) { math::SetConstant<platform::CUDADeviceContext, T> set_constant; set_constant(context.cuda_device_context(), softmax, static_cast<T>(1)); set_constant(context.cuda_device_context(), loss, static_cast<T>(0)); return; } auto soft_label = context.Attr<bool>("soft_label"); auto ignore_index = context.Attr<int>("ignore_index"); if (soft_label) { auto* logits_data = logits->data<T>(); auto* labels_data = labels->data<T>(); SoftmaxWithCrossEntropyFusedKernel( logits_data, labels_data, softmax_data, loss_data, n, d, axis_dim, context.cuda_device_context().stream()); } else { if (!context.Attr<bool>("numeric_stable_mode")) { // CUDNN kernel only suppoer 2-D tensor and perfome softmax on last dim Tensor logits_2d, softmax_2d, labels_2d, loss_2d; logits_2d.ShareDataWith(*logits).Resize({n, d}); softmax_2d.ShareDataWith(*softmax).Resize({n, d}); labels_2d.ShareDataWith(*labels).Resize({n, labels->numel() / n}); loss_2d.ShareDataWith(*loss).Resize({n, 1}); math::SoftmaxCUDNNFunctor<T>()(context.cuda_device_context(), &logits_2d, &softmax_2d); math::CrossEntropyFunctor<platform::CUDADeviceContext, T>()( context.cuda_device_context(), &loss_2d, &softmax_2d, &labels_2d, false, ignore_index, axis_dim); } else { auto* logits_data = logits->data<T>(); auto* labels_data = labels->data<int64_t>(); HardLabelSoftmaxWithCrossEntropy<T>( context.cuda_device_context(), logits_data, labels_data, loss_data, softmax_data, n, d, axis_dim, ignore_index); } } } }; template <typename T> class SoftmaxWithCrossEntropyGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& context) const override { PADDLE_ENFORCE_EQ( platform::is_gpu_place(context.GetPlace()), true, platform::errors::Unavailable("softmax_with_cross_entropy operator's " "CUDA kernel only runs on GPU device.")); const Tensor* labels = context.Input<Tensor>("Label"); const T* loss_grad_data = context.Input<Tensor>(framework::GradVarName("Loss"))->data<T>(); Tensor* logit_grad = context.Output<Tensor>(framework::GradVarName("Logits")); const Tensor* softmax = context.Input<Tensor>("Softmax"); if (logit_grad != softmax) { framework::TensorCopy(*softmax, context.GetPlace(), context.device_context(), logit_grad); } T* logit_grad_data = logit_grad->data<T>(); const int rank = logit_grad->dims().size(); const int axis = CanonicalAxis(context.Attr<int>("axis"), rank); int axis_dim = logit_grad->dims()[axis]; const int n = SizeToAxis(axis, logit_grad->dims()); const int d = SizeFromAxis(axis, logit_grad->dims()); const int remain = d / axis_dim; int block = 512; auto stream = context.cuda_device_context().stream(); auto ignore_index = context.Attr<int>("ignore_index"); if (context.Attr<bool>("soft_label")) { int grid = (n * d + block - 1) / block; const T* label_data = labels->data<T>(); SoftCrossEntropyGradientKernel<T><<<grid, block, 0, stream>>>( logit_grad_data, loss_grad_data, label_data, n, d, remain); } else { int grid = (n * remain + block - 1) / block; const int64_t* label_data = labels->data<int64_t>(); CrossEntropyGrad<T><<<grid, block, 0, stream>>>( logit_grad_data, label_data, n, d, remain, ignore_index); int num = n * d; grid = (num + block - 1) / block; Scale<T><<<grid, block, 0, stream>>>(logit_grad_data, loss_grad_data, num, d, remain, label_data, ignore_index); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy, ops::SoftmaxWithCrossEntropyCUDAKernel<float>, ops::SoftmaxWithCrossEntropyCUDAKernel<paddle::platform::float16>, ops::SoftmaxWithCrossEntropyCUDAKernel<double>); REGISTER_OP_CUDA_KERNEL( softmax_with_cross_entropy_grad, ops::SoftmaxWithCrossEntropyGradCUDAKernel<float>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<paddle::platform::float16>, ops::SoftmaxWithCrossEntropyGradCUDAKernel<double>);
16bc70cb43e889780598c8ab2d97fc8aaa2653b0.hip
// !!! This is a file automatically generated by hipify!!! #include "matmul.cuh" #include <cstdio> int main(int argc, char *argv[]) { unsigned int n = atol(argv[1]); unsigned int block_dim = atol(argv[2]); float *A, *B, *C; hipMallocManaged((void **)&A, n * n * sizeof(float)); hipMallocManaged((void **)&B, n * n * sizeof(float)); hipMallocManaged((void **)&C, n * n * sizeof(float)); for (size_t i = 0; i < n * n; ++i) { A[i] = 2; B[i] = 0.5; } hipEvent_t start; hipEvent_t stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start); matmul(A, B, C, n, block_dim); hipEventRecord(stop); hipEventSynchronize(stop); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (C[i * n + j] != (float) n){ printf("Error: (%d, %d) is %f\n", i, j, C[i * n + j]); } } } float ms; hipEventElapsedTime(&ms, start, stop); printf("%f\n%f\n%f\n", C[0], C[n * n - 1], ms); hipFree(A); hipFree(B); hipFree(C); }
16bc70cb43e889780598c8ab2d97fc8aaa2653b0.cu
#include "matmul.cuh" #include <cstdio> int main(int argc, char *argv[]) { unsigned int n = atol(argv[1]); unsigned int block_dim = atol(argv[2]); float *A, *B, *C; cudaMallocManaged((void **)&A, n * n * sizeof(float)); cudaMallocManaged((void **)&B, n * n * sizeof(float)); cudaMallocManaged((void **)&C, n * n * sizeof(float)); for (size_t i = 0; i < n * n; ++i) { A[i] = 2; B[i] = 0.5; } cudaEvent_t start; cudaEvent_t stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start); matmul(A, B, C, n, block_dim); cudaEventRecord(stop); cudaEventSynchronize(stop); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (C[i * n + j] != (float) n){ printf("Error: (%d, %d) is %f\n", i, j, C[i * n + j]); } } } float ms; cudaEventElapsedTime(&ms, start, stop); printf("%f\n%f\n%f\n", C[0], C[n * n - 1], ms); cudaFree(A); cudaFree(B); cudaFree(C); }
1b3e8617fb5bedea7e04c43bb9749cc3fbba6ac8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/lists/lists_column_view.hpp> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/types.hpp> #include <thrust/optional.h> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> namespace cudf { namespace detail { namespace { /** * @brief Struct which contains per-column information necessary to * traverse a column hierarchy on the gpu. * * When `row_bit_count` is called, the input column hierarchy is flattened into a * vector of column_device_views. For each one of them, we store a column_info * struct. The `depth` field represents the depth of the column in the original * hierarchy. * * As we traverse the hierarchy for each input row, we maintain a span representing * the start and end rows for the current nesting depth. At depth 0, this span is * always just 1 row. As we cross list boundaries int the hierarchy, this span * grows. So for each column we visit we always know how many rows of it are relevant * and can compute it's contribution to the overall size. * * An example using a list<list<int>> column, computing the size of row 1. * * { {{1, 2}, {3, 4}, {5, 6}}, {{7}, {8, 9, 10}, {11, 12, 13, 14}} } * * L0 = List<List<int32_t>>: * Length : 2 * Offsets : 0, 3, 6 * L1 = List<int32_t>: * Length : 6 * Offsets : 0, 2, 4, 6, 7, 10, 14 * I = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 * * * span0 = [1, 2] row 1 is represented by the span [1, 2] * span1 = [L0.offsets[span0[0]], L0.offsets[span0[1]]] expand by the offsets of L0 * span1 = [3, 6] span applied to children of L0 * span2 = [L1.offsets[span1[0]], L1.offsets[span1[1]]] expand by the offsets of L1 * span2 = [6, 14] span applied to children of L1 * * The total size of our row is computed as: * (span0[1] - span0[0]) * sizeof(int) the cost of the offsets for L0 * + * (span1[1] - span1[0]) * sizeof(int) the cost of the offsets for L1 * + * (span2[1] - span2[0]) * sizeof(int) the cost of the integers in I * * `depth` represents our depth in the source column hierarchy. * * "branches" within the spans can occur when we have lists inside of structs. * consider a case where we are entering a struct<list, float> with a span of [4, 8]. * The internal list column will change that span to something else, say [5, 9]. * But when we finish processing the list column, the final float column wants to * go back and use the original span [4, 8]. * * [4, 8] [5, 9] [4, 8] * struct< list<> float> * * To accomplish this we maintain a stack of spans. Pushing the current span * whenever we enter a branch, and popping a span whenever we leave a branch. * * `branch_depth_start` represents the branch depth as we reach a new column. * if `branch_depth_start` is < the last branch depth we saw, we are returning * from a branch and should pop off the stack. * * `branch_depth_end` represents the new branch depth caused by this column. * if branch_depth_end > branch_depth_start, we are branching and need to * push the current span on the stack. * */ struct column_info { size_type depth; size_type branch_depth_start; size_type branch_depth_end; }; /** * @brief Struct which contains hierarchy information precomputed on the host. * * If the input data contains only fixed-width types, this preprocess step * produces the value `simple_per_row_size` which is a constant for every * row in the output. We can use this value and skip the more complicated * processing for lists, structs and strings entirely if `complex_type_count` * is 0. * */ struct hierarchy_info { hierarchy_info() : simple_per_row_size(0), complex_type_count(0), max_branch_depth(0) {} // These two fields act as an optimization. If we find that the entire table // is just fixed-width types, we do not need to do the more expensive kernel call that // traverses the individual columns. So if complex_type_count is 0, we can just // return a column where every row contains the value simple_per_row_size size_type simple_per_row_size; // in bits size_type complex_type_count; // max depth of span branches present in the hierarchy. size_type max_branch_depth; }; /** * @brief Function which flattens the incoming column hierarchy into a vector * of column_views and produces accompanying column_info and hierarchy_info * metadata. * * @param begin: Beginning of a range of column views * @param end: End of a range of column views * @param out: (output) Flattened vector of output column_views * @param info: (output) Additional per-output column_view metadata needed by the gpu * @param h_info: (output) Information about the hierarchy * @param cur_depth: Current absolute depth in the hierarchy * @param cur_branch_depth: Current branch depth * @param parent_index: Index into `out` representing our owning parent column */ template <typename ColIter> void flatten_hierarchy(ColIter begin, ColIter end, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view stream, size_type cur_depth = 0, size_type cur_branch_depth = 0, thrust::optional<int> parent_index = {}); /** * @brief Type-dispatched functor called by flatten_hierarchy. * */ struct flatten_functor { rmm::cuda_stream_view stream; // fixed width template <typename T, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr> void operator()(column_view const& col, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view, size_type cur_depth, size_type cur_branch_depth, thrust::optional<int>) { out.push_back(col); info.push_back({cur_depth, cur_branch_depth, cur_branch_depth}); h_info.simple_per_row_size += (sizeof(device_storage_type_t<T>) * CHAR_BIT) + (col.nullable() ? 1 : 0); } // strings template <typename T, std::enable_if_t<std::is_same<T, string_view>::value>* = nullptr> void operator()(column_view const& col, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view, size_type cur_depth, size_type cur_branch_depth, thrust::optional<int>) { out.push_back(col); info.push_back({cur_depth, cur_branch_depth, cur_branch_depth}); h_info.complex_type_count++; } // lists template <typename T, std::enable_if_t<std::is_same<T, list_view>::value>* = nullptr> void operator()(column_view const& col, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view stream, size_type cur_depth, size_type cur_branch_depth, thrust::optional<int> parent_index) { // track branch depth as we reach this list and after we pass it size_type const branch_depth_start = cur_branch_depth; auto const is_list_inside_struct = parent_index && out[parent_index.value()].type().id() == type_id::STRUCT; if (is_list_inside_struct) { cur_branch_depth++; h_info.max_branch_depth = max(h_info.max_branch_depth, cur_branch_depth); } size_type const branch_depth_end = cur_branch_depth; out.push_back(col); info.push_back({cur_depth, branch_depth_start, branch_depth_end}); lists_column_view lcv(col); auto iter = cudf::detail::make_counting_transform_iterator( 0, [col = lcv.get_sliced_child(stream)](auto) { return col; }); h_info.complex_type_count++; flatten_hierarchy( iter, iter + 1, out, info, h_info, stream, cur_depth + 1, cur_branch_depth, out.size() - 1); } // structs template <typename T, std::enable_if_t<std::is_same<T, struct_view>::value>* = nullptr> void operator()(column_view const& col, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view stream, size_type cur_depth, size_type cur_branch_depth, thrust::optional<int>) { out.push_back(col); info.push_back({cur_depth, cur_branch_depth, cur_branch_depth}); h_info.simple_per_row_size += col.nullable() ? 1 : 0; structs_column_view scv(col); auto iter = cudf::detail::make_counting_transform_iterator( 0, [&scv](auto i) { return scv.get_sliced_child(i); }); flatten_hierarchy(iter, iter + scv.num_children(), out, info, h_info, stream, cur_depth + 1, cur_branch_depth, out.size() - 1); } // everything else template <typename T, typename... Args> std::enable_if_t<!cudf::is_fixed_width<T>() && !std::is_same<T, string_view>::value && !std::is_same<T, list_view>::value && !std::is_same<T, struct_view>::value, void> operator()(Args&&...) { CUDF_FAIL("Unsupported column type in row_bit_count"); } }; template <typename ColIter> void flatten_hierarchy(ColIter begin, ColIter end, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view stream, size_type cur_depth, size_type cur_branch_depth, thrust::optional<int> parent_index) { std::for_each(begin, end, [&](column_view const& col) { cudf::type_dispatcher(col.type(), flatten_functor{stream}, col, out, info, h_info, stream, cur_depth, cur_branch_depth, parent_index); }); } /** * @brief Struct representing a span of rows. * */ struct row_span { size_type row_start, row_end; }; /** * @brief Functor for computing the size, in bits, of a `row_span` of rows for a given * `column_device_view` * */ struct row_size_functor { /** * @brief Computes size in bits of a span of rows in a fixed-width column. * * Computed as : ((# of rows) * sizeof(data type) * 8) * + * 1 bit per row for validity if applicable. */ template <typename T> __device__ size_type operator()(column_device_view const& col, row_span const& span) { auto const num_rows{span.row_end - span.row_start}; auto const element_size = sizeof(device_storage_type_t<T>) * CHAR_BIT; auto const validity_size = col.nullable() ? 1 : 0; return (element_size + validity_size) * num_rows; } }; /** * @brief Computes size in bits of a span of rows in a strings column. * * Computed as : ((# of rows) * sizeof(offset) * 8) + (total # of characters * 8)) * + * 1 bit per row for validity if applicable. */ template <> __device__ size_type row_size_functor::operator()<string_view>(column_device_view const& col, row_span const& span) { column_device_view const& offsets = col.child(strings_column_view::offsets_column_index); auto const num_rows{span.row_end - span.row_start}; auto const row_start{span.row_start + col.offset()}; auto const row_end{span.row_end + col.offset()}; auto const offsets_size = sizeof(offset_type) * CHAR_BIT; auto const validity_size = col.nullable() ? 1 : 0; auto const chars_size = (offsets.data<offset_type>()[row_end] - offsets.data<offset_type>()[row_start]) * CHAR_BIT; return ((offsets_size + validity_size) * num_rows) + chars_size; } /** * @brief Computes size in bits of a span of rows in a list column. * * Computed as : ((# of rows) * sizeof(offset) * 8) * + * 1 bit per row for validity if applicable. */ template <> __device__ size_type row_size_functor::operator()<list_view>(column_device_view const& col, row_span const& span) { auto const num_rows{span.row_end - span.row_start}; auto const offsets_size = sizeof(offset_type) * CHAR_BIT; auto const validity_size = col.nullable() ? 1 : 0; return (offsets_size + validity_size) * num_rows; } /** * @brief Computes size in bits of a span of rows in a struct column. * * Computed as : 1 bit per row for validity if applicable. */ template <> __device__ size_type row_size_functor::operator()<struct_view>(column_device_view const& col, row_span const& span) { auto const num_rows{span.row_end - span.row_start}; return (col.nullable() ? 1 : 0) * num_rows; // cost of validity } /** * @brief Kernel for computing per-row sizes in bits. * * @param cols An span of column_device_views representing a column hierarchy * @param info An span of column_info structs corresponding the elements in `cols` * @param output Output span of size (# rows) where per-row bit sizes are stored * @param max_branch_depth Maximum depth of the span stack needed per-thread */ __global__ void compute_row_sizes(device_span<column_device_view const> cols, device_span<column_info const> info, device_span<size_type> output, size_type max_branch_depth) { extern __shared__ row_span thread_branch_stacks[]; int const tid = threadIdx.x + blockIdx.x * blockDim.x; auto const num_rows = output.size(); if (tid >= num_rows) { return; } // branch stack. points to the last list prior to branching. row_span* my_branch_stack = thread_branch_stacks + (tid * max_branch_depth); size_type branch_depth{0}; // current row span - always starts at 1 row. row_span cur_span{tid, tid + 1}; // output size size_type& size = output[tid]; size = 0; size_type last_branch_depth{0}; for (size_type idx = 0; idx < cols.size(); idx++) { column_device_view const& col = cols[idx]; // if we've returned from a branch if (info[idx].branch_depth_start < last_branch_depth) { cur_span = my_branch_stack[--branch_depth]; } // if we're entering a new branch. // NOTE: this case can happen (a pop and a push by the same column) // when we have a struct<list, list> if (info[idx].branch_depth_end > info[idx].branch_depth_start) { my_branch_stack[branch_depth++] = cur_span; } // if we're back at depth 0, this is a new top-level column, so reset // span info if (info[idx].depth == 0) { branch_depth = 0; last_branch_depth = 0; cur_span = row_span{tid, tid + 1}; } // add the contributing size of this row size += cudf::type_dispatcher(col.type(), row_size_functor{}, col, cur_span); // if this is a list column, update the working span from our offsets if (col.type().id() == type_id::LIST) { column_device_view const& offsets = col.child(lists_column_view::offsets_column_index); auto const base_offset = offsets.data<offset_type>()[col.offset()]; cur_span.row_start = offsets.data<offset_type>()[cur_span.row_start + col.offset()] - base_offset; cur_span.row_end = offsets.data<offset_type>()[cur_span.row_end + col.offset()] - base_offset; } last_branch_depth = info[idx].branch_depth_end; } } } // anonymous namespace /** * @copydoc cudf::detail::row_bit_count * */ std::unique_ptr<column> row_bit_count(table_view const& t, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // no rows if (t.num_rows() <= 0) { return cudf::make_empty_column(data_type{type_id::INT32}); } // flatten the hierarchy and determine some information about it. std::vector<cudf::column_view> cols; std::vector<column_info> info; hierarchy_info h_info; flatten_hierarchy(t.begin(), t.end(), cols, info, h_info, stream); CUDF_EXPECTS(info.size() == cols.size(), "Size/info mismatch"); // create output buffer and view auto output = cudf::make_fixed_width_column( data_type{type_id::INT32}, t.num_rows(), mask_state::UNALLOCATED, stream, mr); mutable_column_view mcv = output->mutable_view(); // simple case. if we have no complex types (lists, strings, etc), the per-row size is already // trivially computed if (h_info.complex_type_count <= 0) { thrust::fill(rmm::exec_policy(stream), mcv.begin<size_type>(), mcv.end<size_type>(), h_info.simple_per_row_size); return output; } // create a contiguous block of column_device_views auto d_cols = contiguous_copy_column_device_views<column_device_view>(cols, stream); // move stack info to the gpu rmm::device_uvector<column_info> d_info(info.size(), stream); CUDA_TRY(hipMemcpyAsync(d_info.data(), info.data(), sizeof(column_info) * info.size(), hipMemcpyHostToDevice, stream.value())); // each thread needs to maintain a stack of row spans of size max_branch_depth. we will use // shared memory to do this rather than allocating a potentially gigantic temporary buffer // of memory of size (# input rows * sizeof(row_span) * max_branch_depth). auto const shmem_per_thread = sizeof(row_span) * h_info.max_branch_depth; int device_id; CUDA_TRY(hipGetDevice(&device_id)); int shmem_limit_per_block; CUDA_TRY( hipDeviceGetAttribute(&shmem_limit_per_block, hipDeviceAttributeMaxSharedMemoryPerBlock, device_id)); constexpr int max_block_size = 256; auto const block_size = shmem_per_thread != 0 ? ::min(max_block_size, shmem_limit_per_block / static_cast<int>(shmem_per_thread)) : max_block_size; auto const shared_mem_size = shmem_per_thread * block_size; // should we be aborting if we reach some extremely small block size, or just if we hit 0? CUDF_EXPECTS(block_size > 0, "Encountered a column hierarchy too complex for row_bit_count"); cudf::detail::grid_1d grid{t.num_rows(), block_size, 1}; hipLaunchKernelGGL(( compute_row_sizes), dim3(grid.num_blocks), dim3(block_size), shared_mem_size, stream.value(), {std::get<1>(d_cols), cols.size()}, {d_info.data(), info.size()}, {mcv.data<size_type>(), static_cast<std::size_t>(t.num_rows())}, h_info.max_branch_depth); return output; } } // namespace detail /** * @copydoc cudf::row_bit_count * */ std::unique_ptr<column> row_bit_count(table_view const& t, rmm::mr::device_memory_resource* mr) { return detail::row_bit_count(t, rmm::cuda_stream_default, mr); } } // namespace cudf
1b3e8617fb5bedea7e04c43bb9749cc3fbba6ac8.cu
/* * Copyright (c) 2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_factories.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/lists/lists_column_view.hpp> #include <cudf/structs/structs_column_view.hpp> #include <cudf/table/table_device_view.cuh> #include <cudf/types.hpp> #include <thrust/optional.h> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> namespace cudf { namespace detail { namespace { /** * @brief Struct which contains per-column information necessary to * traverse a column hierarchy on the gpu. * * When `row_bit_count` is called, the input column hierarchy is flattened into a * vector of column_device_views. For each one of them, we store a column_info * struct. The `depth` field represents the depth of the column in the original * hierarchy. * * As we traverse the hierarchy for each input row, we maintain a span representing * the start and end rows for the current nesting depth. At depth 0, this span is * always just 1 row. As we cross list boundaries int the hierarchy, this span * grows. So for each column we visit we always know how many rows of it are relevant * and can compute it's contribution to the overall size. * * An example using a list<list<int>> column, computing the size of row 1. * * { {{1, 2}, {3, 4}, {5, 6}}, {{7}, {8, 9, 10}, {11, 12, 13, 14}} } * * L0 = List<List<int32_t>>: * Length : 2 * Offsets : 0, 3, 6 * L1 = List<int32_t>: * Length : 6 * Offsets : 0, 2, 4, 6, 7, 10, 14 * I = 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 * * * span0 = [1, 2] row 1 is represented by the span [1, 2] * span1 = [L0.offsets[span0[0]], L0.offsets[span0[1]]] expand by the offsets of L0 * span1 = [3, 6] span applied to children of L0 * span2 = [L1.offsets[span1[0]], L1.offsets[span1[1]]] expand by the offsets of L1 * span2 = [6, 14] span applied to children of L1 * * The total size of our row is computed as: * (span0[1] - span0[0]) * sizeof(int) the cost of the offsets for L0 * + * (span1[1] - span1[0]) * sizeof(int) the cost of the offsets for L1 * + * (span2[1] - span2[0]) * sizeof(int) the cost of the integers in I * * `depth` represents our depth in the source column hierarchy. * * "branches" within the spans can occur when we have lists inside of structs. * consider a case where we are entering a struct<list, float> with a span of [4, 8]. * The internal list column will change that span to something else, say [5, 9]. * But when we finish processing the list column, the final float column wants to * go back and use the original span [4, 8]. * * [4, 8] [5, 9] [4, 8] * struct< list<> float> * * To accomplish this we maintain a stack of spans. Pushing the current span * whenever we enter a branch, and popping a span whenever we leave a branch. * * `branch_depth_start` represents the branch depth as we reach a new column. * if `branch_depth_start` is < the last branch depth we saw, we are returning * from a branch and should pop off the stack. * * `branch_depth_end` represents the new branch depth caused by this column. * if branch_depth_end > branch_depth_start, we are branching and need to * push the current span on the stack. * */ struct column_info { size_type depth; size_type branch_depth_start; size_type branch_depth_end; }; /** * @brief Struct which contains hierarchy information precomputed on the host. * * If the input data contains only fixed-width types, this preprocess step * produces the value `simple_per_row_size` which is a constant for every * row in the output. We can use this value and skip the more complicated * processing for lists, structs and strings entirely if `complex_type_count` * is 0. * */ struct hierarchy_info { hierarchy_info() : simple_per_row_size(0), complex_type_count(0), max_branch_depth(0) {} // These two fields act as an optimization. If we find that the entire table // is just fixed-width types, we do not need to do the more expensive kernel call that // traverses the individual columns. So if complex_type_count is 0, we can just // return a column where every row contains the value simple_per_row_size size_type simple_per_row_size; // in bits size_type complex_type_count; // max depth of span branches present in the hierarchy. size_type max_branch_depth; }; /** * @brief Function which flattens the incoming column hierarchy into a vector * of column_views and produces accompanying column_info and hierarchy_info * metadata. * * @param begin: Beginning of a range of column views * @param end: End of a range of column views * @param out: (output) Flattened vector of output column_views * @param info: (output) Additional per-output column_view metadata needed by the gpu * @param h_info: (output) Information about the hierarchy * @param cur_depth: Current absolute depth in the hierarchy * @param cur_branch_depth: Current branch depth * @param parent_index: Index into `out` representing our owning parent column */ template <typename ColIter> void flatten_hierarchy(ColIter begin, ColIter end, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view stream, size_type cur_depth = 0, size_type cur_branch_depth = 0, thrust::optional<int> parent_index = {}); /** * @brief Type-dispatched functor called by flatten_hierarchy. * */ struct flatten_functor { rmm::cuda_stream_view stream; // fixed width template <typename T, std::enable_if_t<cudf::is_fixed_width<T>()>* = nullptr> void operator()(column_view const& col, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view, size_type cur_depth, size_type cur_branch_depth, thrust::optional<int>) { out.push_back(col); info.push_back({cur_depth, cur_branch_depth, cur_branch_depth}); h_info.simple_per_row_size += (sizeof(device_storage_type_t<T>) * CHAR_BIT) + (col.nullable() ? 1 : 0); } // strings template <typename T, std::enable_if_t<std::is_same<T, string_view>::value>* = nullptr> void operator()(column_view const& col, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view, size_type cur_depth, size_type cur_branch_depth, thrust::optional<int>) { out.push_back(col); info.push_back({cur_depth, cur_branch_depth, cur_branch_depth}); h_info.complex_type_count++; } // lists template <typename T, std::enable_if_t<std::is_same<T, list_view>::value>* = nullptr> void operator()(column_view const& col, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view stream, size_type cur_depth, size_type cur_branch_depth, thrust::optional<int> parent_index) { // track branch depth as we reach this list and after we pass it size_type const branch_depth_start = cur_branch_depth; auto const is_list_inside_struct = parent_index && out[parent_index.value()].type().id() == type_id::STRUCT; if (is_list_inside_struct) { cur_branch_depth++; h_info.max_branch_depth = max(h_info.max_branch_depth, cur_branch_depth); } size_type const branch_depth_end = cur_branch_depth; out.push_back(col); info.push_back({cur_depth, branch_depth_start, branch_depth_end}); lists_column_view lcv(col); auto iter = cudf::detail::make_counting_transform_iterator( 0, [col = lcv.get_sliced_child(stream)](auto) { return col; }); h_info.complex_type_count++; flatten_hierarchy( iter, iter + 1, out, info, h_info, stream, cur_depth + 1, cur_branch_depth, out.size() - 1); } // structs template <typename T, std::enable_if_t<std::is_same<T, struct_view>::value>* = nullptr> void operator()(column_view const& col, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view stream, size_type cur_depth, size_type cur_branch_depth, thrust::optional<int>) { out.push_back(col); info.push_back({cur_depth, cur_branch_depth, cur_branch_depth}); h_info.simple_per_row_size += col.nullable() ? 1 : 0; structs_column_view scv(col); auto iter = cudf::detail::make_counting_transform_iterator( 0, [&scv](auto i) { return scv.get_sliced_child(i); }); flatten_hierarchy(iter, iter + scv.num_children(), out, info, h_info, stream, cur_depth + 1, cur_branch_depth, out.size() - 1); } // everything else template <typename T, typename... Args> std::enable_if_t<!cudf::is_fixed_width<T>() && !std::is_same<T, string_view>::value && !std::is_same<T, list_view>::value && !std::is_same<T, struct_view>::value, void> operator()(Args&&...) { CUDF_FAIL("Unsupported column type in row_bit_count"); } }; template <typename ColIter> void flatten_hierarchy(ColIter begin, ColIter end, std::vector<cudf::column_view>& out, std::vector<column_info>& info, hierarchy_info& h_info, rmm::cuda_stream_view stream, size_type cur_depth, size_type cur_branch_depth, thrust::optional<int> parent_index) { std::for_each(begin, end, [&](column_view const& col) { cudf::type_dispatcher(col.type(), flatten_functor{stream}, col, out, info, h_info, stream, cur_depth, cur_branch_depth, parent_index); }); } /** * @brief Struct representing a span of rows. * */ struct row_span { size_type row_start, row_end; }; /** * @brief Functor for computing the size, in bits, of a `row_span` of rows for a given * `column_device_view` * */ struct row_size_functor { /** * @brief Computes size in bits of a span of rows in a fixed-width column. * * Computed as : ((# of rows) * sizeof(data type) * 8) * + * 1 bit per row for validity if applicable. */ template <typename T> __device__ size_type operator()(column_device_view const& col, row_span const& span) { auto const num_rows{span.row_end - span.row_start}; auto const element_size = sizeof(device_storage_type_t<T>) * CHAR_BIT; auto const validity_size = col.nullable() ? 1 : 0; return (element_size + validity_size) * num_rows; } }; /** * @brief Computes size in bits of a span of rows in a strings column. * * Computed as : ((# of rows) * sizeof(offset) * 8) + (total # of characters * 8)) * + * 1 bit per row for validity if applicable. */ template <> __device__ size_type row_size_functor::operator()<string_view>(column_device_view const& col, row_span const& span) { column_device_view const& offsets = col.child(strings_column_view::offsets_column_index); auto const num_rows{span.row_end - span.row_start}; auto const row_start{span.row_start + col.offset()}; auto const row_end{span.row_end + col.offset()}; auto const offsets_size = sizeof(offset_type) * CHAR_BIT; auto const validity_size = col.nullable() ? 1 : 0; auto const chars_size = (offsets.data<offset_type>()[row_end] - offsets.data<offset_type>()[row_start]) * CHAR_BIT; return ((offsets_size + validity_size) * num_rows) + chars_size; } /** * @brief Computes size in bits of a span of rows in a list column. * * Computed as : ((# of rows) * sizeof(offset) * 8) * + * 1 bit per row for validity if applicable. */ template <> __device__ size_type row_size_functor::operator()<list_view>(column_device_view const& col, row_span const& span) { auto const num_rows{span.row_end - span.row_start}; auto const offsets_size = sizeof(offset_type) * CHAR_BIT; auto const validity_size = col.nullable() ? 1 : 0; return (offsets_size + validity_size) * num_rows; } /** * @brief Computes size in bits of a span of rows in a struct column. * * Computed as : 1 bit per row for validity if applicable. */ template <> __device__ size_type row_size_functor::operator()<struct_view>(column_device_view const& col, row_span const& span) { auto const num_rows{span.row_end - span.row_start}; return (col.nullable() ? 1 : 0) * num_rows; // cost of validity } /** * @brief Kernel for computing per-row sizes in bits. * * @param cols An span of column_device_views representing a column hierarchy * @param info An span of column_info structs corresponding the elements in `cols` * @param output Output span of size (# rows) where per-row bit sizes are stored * @param max_branch_depth Maximum depth of the span stack needed per-thread */ __global__ void compute_row_sizes(device_span<column_device_view const> cols, device_span<column_info const> info, device_span<size_type> output, size_type max_branch_depth) { extern __shared__ row_span thread_branch_stacks[]; int const tid = threadIdx.x + blockIdx.x * blockDim.x; auto const num_rows = output.size(); if (tid >= num_rows) { return; } // branch stack. points to the last list prior to branching. row_span* my_branch_stack = thread_branch_stacks + (tid * max_branch_depth); size_type branch_depth{0}; // current row span - always starts at 1 row. row_span cur_span{tid, tid + 1}; // output size size_type& size = output[tid]; size = 0; size_type last_branch_depth{0}; for (size_type idx = 0; idx < cols.size(); idx++) { column_device_view const& col = cols[idx]; // if we've returned from a branch if (info[idx].branch_depth_start < last_branch_depth) { cur_span = my_branch_stack[--branch_depth]; } // if we're entering a new branch. // NOTE: this case can happen (a pop and a push by the same column) // when we have a struct<list, list> if (info[idx].branch_depth_end > info[idx].branch_depth_start) { my_branch_stack[branch_depth++] = cur_span; } // if we're back at depth 0, this is a new top-level column, so reset // span info if (info[idx].depth == 0) { branch_depth = 0; last_branch_depth = 0; cur_span = row_span{tid, tid + 1}; } // add the contributing size of this row size += cudf::type_dispatcher(col.type(), row_size_functor{}, col, cur_span); // if this is a list column, update the working span from our offsets if (col.type().id() == type_id::LIST) { column_device_view const& offsets = col.child(lists_column_view::offsets_column_index); auto const base_offset = offsets.data<offset_type>()[col.offset()]; cur_span.row_start = offsets.data<offset_type>()[cur_span.row_start + col.offset()] - base_offset; cur_span.row_end = offsets.data<offset_type>()[cur_span.row_end + col.offset()] - base_offset; } last_branch_depth = info[idx].branch_depth_end; } } } // anonymous namespace /** * @copydoc cudf::detail::row_bit_count * */ std::unique_ptr<column> row_bit_count(table_view const& t, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { // no rows if (t.num_rows() <= 0) { return cudf::make_empty_column(data_type{type_id::INT32}); } // flatten the hierarchy and determine some information about it. std::vector<cudf::column_view> cols; std::vector<column_info> info; hierarchy_info h_info; flatten_hierarchy(t.begin(), t.end(), cols, info, h_info, stream); CUDF_EXPECTS(info.size() == cols.size(), "Size/info mismatch"); // create output buffer and view auto output = cudf::make_fixed_width_column( data_type{type_id::INT32}, t.num_rows(), mask_state::UNALLOCATED, stream, mr); mutable_column_view mcv = output->mutable_view(); // simple case. if we have no complex types (lists, strings, etc), the per-row size is already // trivially computed if (h_info.complex_type_count <= 0) { thrust::fill(rmm::exec_policy(stream), mcv.begin<size_type>(), mcv.end<size_type>(), h_info.simple_per_row_size); return output; } // create a contiguous block of column_device_views auto d_cols = contiguous_copy_column_device_views<column_device_view>(cols, stream); // move stack info to the gpu rmm::device_uvector<column_info> d_info(info.size(), stream); CUDA_TRY(cudaMemcpyAsync(d_info.data(), info.data(), sizeof(column_info) * info.size(), cudaMemcpyHostToDevice, stream.value())); // each thread needs to maintain a stack of row spans of size max_branch_depth. we will use // shared memory to do this rather than allocating a potentially gigantic temporary buffer // of memory of size (# input rows * sizeof(row_span) * max_branch_depth). auto const shmem_per_thread = sizeof(row_span) * h_info.max_branch_depth; int device_id; CUDA_TRY(cudaGetDevice(&device_id)); int shmem_limit_per_block; CUDA_TRY( cudaDeviceGetAttribute(&shmem_limit_per_block, cudaDevAttrMaxSharedMemoryPerBlock, device_id)); constexpr int max_block_size = 256; auto const block_size = shmem_per_thread != 0 ? std::min(max_block_size, shmem_limit_per_block / static_cast<int>(shmem_per_thread)) : max_block_size; auto const shared_mem_size = shmem_per_thread * block_size; // should we be aborting if we reach some extremely small block size, or just if we hit 0? CUDF_EXPECTS(block_size > 0, "Encountered a column hierarchy too complex for row_bit_count"); cudf::detail::grid_1d grid{t.num_rows(), block_size, 1}; compute_row_sizes<<<grid.num_blocks, block_size, shared_mem_size, stream.value()>>>( {std::get<1>(d_cols), cols.size()}, {d_info.data(), info.size()}, {mcv.data<size_type>(), static_cast<std::size_t>(t.num_rows())}, h_info.max_branch_depth); return output; } } // namespace detail /** * @copydoc cudf::row_bit_count * */ std::unique_ptr<column> row_bit_count(table_view const& t, rmm::mr::device_memory_resource* mr) { return detail::row_bit_count(t, rmm::cuda_stream_default, mr); } } // namespace cudf
822e1adccf7053548105f5b06e9ad8d6b00e5826.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <algorithm> #include <cuml/common/cuml_allocator.hpp> #include <iostream> #include <metrics/silhouetteScore.cuh> #include <random> #include "test_utils.h" namespace MLCommon { namespace Metrics { //parameter structure definition struct silhouetteScoreParam { int nRows; int nCols; int nLabels; int metric; double tolerance; }; //test fixture class template <typename LabelT, typename DataT> class silhouetteScoreTest : public ::testing::TestWithParam<silhouetteScoreParam> { protected: //the constructor void SetUp() override { //getting the parameters params = ::testing::TestWithParam<silhouetteScoreParam>::GetParam(); nRows = params.nRows; nCols = params.nCols; nLabels = params.nLabels; int nElements = nRows * nCols; //generating random value test input std::vector<double> h_X(nElements, 0.0); std::vector<int> h_labels(nRows, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(0, nLabels - 1); std::uniform_real_distribution<double> realGenerator(0, 100); std::generate(h_X.begin(), h_X.end(), [&]() { return realGenerator(dre); }); std::generate(h_labels.begin(), h_labels.end(), [&]() { return intGenerator(dre); }); //allocating and initializing memory to the GPU CUDA_CHECK(hipStreamCreate(&stream)); MLCommon::allocate(d_X, nElements, true); MLCommon::allocate(d_labels, nElements, true); MLCommon::allocate(sampleSilScore, nElements); MLCommon::updateDevice(d_X, &h_X[0], (int)nElements, stream); MLCommon::updateDevice(d_labels, &h_labels[0], (int)nElements, stream); std::shared_ptr<MLCommon::deviceAllocator> allocator( new defaultDeviceAllocator); //finding the distance matrix device_buffer<double> d_distanceMatrix(allocator, stream, nRows * nRows); device_buffer<char> workspace(allocator, stream, 1); double *h_distanceMatrix = (double *)malloc(nRows * nRows * sizeof(double *)); MLCommon::Distance::pairwiseDistance( d_X, d_X, d_distanceMatrix.data(), nRows, nRows, nCols, workspace, static_cast<Distance::DistanceType>(params.metric), stream); CUDA_CHECK(hipStreamSynchronize(stream)); MLCommon::updateHost(h_distanceMatrix, d_distanceMatrix.data(), nRows * nRows, stream); //finding the bincount array double *binCountArray = (double *)malloc(nLabels * sizeof(double *)); memset(binCountArray, 0, nLabels * sizeof(double)); for (int i = 0; i < nRows; ++i) { binCountArray[h_labels[i]] += 1; } //finding the average intra cluster distance for every element double *a = (double *)malloc(nRows * sizeof(double *)); for (int i = 0; i < nRows; ++i) { int myLabel = h_labels[i]; double sumOfIntraClusterD = 0; for (int j = 0; j < nRows; ++j) { if (h_labels[j] == myLabel) { sumOfIntraClusterD += h_distanceMatrix[i * nRows + j]; } } if (binCountArray[myLabel] <= 1) a[i] = -1; else a[i] = sumOfIntraClusterD / (binCountArray[myLabel] - 1); } //finding the average inter cluster distance for every element double *b = (double *)malloc(nRows * sizeof(double *)); for (int i = 0; i < nRows; ++i) { int myLabel = h_labels[i]; double minAvgInterCD = ULLONG_MAX; for (int j = 0; j < nLabels; ++j) { int curClLabel = j; if (curClLabel == myLabel) continue; double avgInterCD = 0; for (int k = 0; k < nRows; ++k) { if (h_labels[k] == curClLabel) { avgInterCD += h_distanceMatrix[i * nRows + k]; } } if (binCountArray[curClLabel]) avgInterCD /= binCountArray[curClLabel]; else avgInterCD = ULLONG_MAX; minAvgInterCD = min(minAvgInterCD, avgInterCD); } b[i] = minAvgInterCD; } //finding the silhouette score for every element double *truthSampleSilScore = (double *)malloc(nRows * sizeof(double *)); for (int i = 0; i < nRows; ++i) { if (a[i] == -1) truthSampleSilScore[i] = 0; else if (a[i] == 0 && b[i] == 0) truthSampleSilScore[i] = 0; else truthSampleSilScore[i] = (b[i] - a[i]) / max(a[i], b[i]); truthSilhouetteScore += truthSampleSilScore[i]; } truthSilhouetteScore /= nRows; //calling the silhouetteScore CUDA implementation computedSilhouetteScore = MLCommon::Metrics::silhouetteScore( d_X, nRows, nCols, d_labels, nLabels, sampleSilScore, allocator, stream, params.metric); } //the destructor void TearDown() override { CUDA_CHECK(hipFree(d_X)); CUDA_CHECK(hipFree(d_labels)); CUDA_CHECK(hipStreamDestroy(stream)); } //declaring the data values silhouetteScoreParam params; int nLabels; DataT *d_X = nullptr; DataT *sampleSilScore = nullptr; LabelT *d_labels = nullptr; int nRows; int nCols; double truthSilhouetteScore = 0; double computedSilhouetteScore = 0; hipStream_t stream; }; //setting test parameter values const std::vector<silhouetteScoreParam> inputs = { {4, 2, 3, 0, 0.00001}, {4, 2, 2, 5, 0.00001}, {8, 8, 3, 4, 0.00001}, {11, 2, 5, 0, 0.00001}, {40, 2, 8, 0, 0.00001}, {12, 7, 3, 2, 0.00001}, {7, 5, 5, 3, 0.00001}}; //writing the test suite typedef silhouetteScoreTest<int, double> silhouetteScoreTestClass; TEST_P(silhouetteScoreTestClass, Result) { ASSERT_NEAR(computedSilhouetteScore, truthSilhouetteScore, params.tolerance); } INSTANTIATE_TEST_CASE_P(silhouetteScore, silhouetteScoreTestClass, ::testing::ValuesIn(inputs)); } //end namespace Metrics } //end namespace MLCommon
822e1adccf7053548105f5b06e9ad8d6b00e5826.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <common/cudart_utils.h> #include <gtest/gtest.h> #include <algorithm> #include <cuml/common/cuml_allocator.hpp> #include <iostream> #include <metrics/silhouetteScore.cuh> #include <random> #include "test_utils.h" namespace MLCommon { namespace Metrics { //parameter structure definition struct silhouetteScoreParam { int nRows; int nCols; int nLabels; int metric; double tolerance; }; //test fixture class template <typename LabelT, typename DataT> class silhouetteScoreTest : public ::testing::TestWithParam<silhouetteScoreParam> { protected: //the constructor void SetUp() override { //getting the parameters params = ::testing::TestWithParam<silhouetteScoreParam>::GetParam(); nRows = params.nRows; nCols = params.nCols; nLabels = params.nLabels; int nElements = nRows * nCols; //generating random value test input std::vector<double> h_X(nElements, 0.0); std::vector<int> h_labels(nRows, 0); std::random_device rd; std::default_random_engine dre(rd()); std::uniform_int_distribution<int> intGenerator(0, nLabels - 1); std::uniform_real_distribution<double> realGenerator(0, 100); std::generate(h_X.begin(), h_X.end(), [&]() { return realGenerator(dre); }); std::generate(h_labels.begin(), h_labels.end(), [&]() { return intGenerator(dre); }); //allocating and initializing memory to the GPU CUDA_CHECK(cudaStreamCreate(&stream)); MLCommon::allocate(d_X, nElements, true); MLCommon::allocate(d_labels, nElements, true); MLCommon::allocate(sampleSilScore, nElements); MLCommon::updateDevice(d_X, &h_X[0], (int)nElements, stream); MLCommon::updateDevice(d_labels, &h_labels[0], (int)nElements, stream); std::shared_ptr<MLCommon::deviceAllocator> allocator( new defaultDeviceAllocator); //finding the distance matrix device_buffer<double> d_distanceMatrix(allocator, stream, nRows * nRows); device_buffer<char> workspace(allocator, stream, 1); double *h_distanceMatrix = (double *)malloc(nRows * nRows * sizeof(double *)); MLCommon::Distance::pairwiseDistance( d_X, d_X, d_distanceMatrix.data(), nRows, nRows, nCols, workspace, static_cast<Distance::DistanceType>(params.metric), stream); CUDA_CHECK(cudaStreamSynchronize(stream)); MLCommon::updateHost(h_distanceMatrix, d_distanceMatrix.data(), nRows * nRows, stream); //finding the bincount array double *binCountArray = (double *)malloc(nLabels * sizeof(double *)); memset(binCountArray, 0, nLabels * sizeof(double)); for (int i = 0; i < nRows; ++i) { binCountArray[h_labels[i]] += 1; } //finding the average intra cluster distance for every element double *a = (double *)malloc(nRows * sizeof(double *)); for (int i = 0; i < nRows; ++i) { int myLabel = h_labels[i]; double sumOfIntraClusterD = 0; for (int j = 0; j < nRows; ++j) { if (h_labels[j] == myLabel) { sumOfIntraClusterD += h_distanceMatrix[i * nRows + j]; } } if (binCountArray[myLabel] <= 1) a[i] = -1; else a[i] = sumOfIntraClusterD / (binCountArray[myLabel] - 1); } //finding the average inter cluster distance for every element double *b = (double *)malloc(nRows * sizeof(double *)); for (int i = 0; i < nRows; ++i) { int myLabel = h_labels[i]; double minAvgInterCD = ULLONG_MAX; for (int j = 0; j < nLabels; ++j) { int curClLabel = j; if (curClLabel == myLabel) continue; double avgInterCD = 0; for (int k = 0; k < nRows; ++k) { if (h_labels[k] == curClLabel) { avgInterCD += h_distanceMatrix[i * nRows + k]; } } if (binCountArray[curClLabel]) avgInterCD /= binCountArray[curClLabel]; else avgInterCD = ULLONG_MAX; minAvgInterCD = min(minAvgInterCD, avgInterCD); } b[i] = minAvgInterCD; } //finding the silhouette score for every element double *truthSampleSilScore = (double *)malloc(nRows * sizeof(double *)); for (int i = 0; i < nRows; ++i) { if (a[i] == -1) truthSampleSilScore[i] = 0; else if (a[i] == 0 && b[i] == 0) truthSampleSilScore[i] = 0; else truthSampleSilScore[i] = (b[i] - a[i]) / max(a[i], b[i]); truthSilhouetteScore += truthSampleSilScore[i]; } truthSilhouetteScore /= nRows; //calling the silhouetteScore CUDA implementation computedSilhouetteScore = MLCommon::Metrics::silhouetteScore( d_X, nRows, nCols, d_labels, nLabels, sampleSilScore, allocator, stream, params.metric); } //the destructor void TearDown() override { CUDA_CHECK(cudaFree(d_X)); CUDA_CHECK(cudaFree(d_labels)); CUDA_CHECK(cudaStreamDestroy(stream)); } //declaring the data values silhouetteScoreParam params; int nLabels; DataT *d_X = nullptr; DataT *sampleSilScore = nullptr; LabelT *d_labels = nullptr; int nRows; int nCols; double truthSilhouetteScore = 0; double computedSilhouetteScore = 0; cudaStream_t stream; }; //setting test parameter values const std::vector<silhouetteScoreParam> inputs = { {4, 2, 3, 0, 0.00001}, {4, 2, 2, 5, 0.00001}, {8, 8, 3, 4, 0.00001}, {11, 2, 5, 0, 0.00001}, {40, 2, 8, 0, 0.00001}, {12, 7, 3, 2, 0.00001}, {7, 5, 5, 3, 0.00001}}; //writing the test suite typedef silhouetteScoreTest<int, double> silhouetteScoreTestClass; TEST_P(silhouetteScoreTestClass, Result) { ASSERT_NEAR(computedSilhouetteScore, truthSilhouetteScore, params.tolerance); } INSTANTIATE_TEST_CASE_P(silhouetteScore, silhouetteScoreTestClass, ::testing::ValuesIn(inputs)); } //end namespace Metrics } //end namespace MLCommon
bddb1a4e5480bc72d4a254ceb5a6c5adad39165a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "compute_squared_norm.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *array = NULL; hipMalloc(&array, XSIZE*YSIZE); int width = XSIZE; int pitch = 2; int height = YSIZE; float *norm = NULL; hipMalloc(&norm, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( compute_squared_norm), dim3(gridBlock),dim3(threadBlock), 0, 0, array,width,pitch,height,norm); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( compute_squared_norm), dim3(gridBlock),dim3(threadBlock), 0, 0, array,width,pitch,height,norm); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( compute_squared_norm), dim3(gridBlock),dim3(threadBlock), 0, 0, array,width,pitch,height,norm); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bddb1a4e5480bc72d4a254ceb5a6c5adad39165a.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "compute_squared_norm.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *array = NULL; cudaMalloc(&array, XSIZE*YSIZE); int width = XSIZE; int pitch = 2; int height = YSIZE; float *norm = NULL; cudaMalloc(&norm, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); compute_squared_norm<<<gridBlock,threadBlock>>>(array,width,pitch,height,norm); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { compute_squared_norm<<<gridBlock,threadBlock>>>(array,width,pitch,height,norm); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { compute_squared_norm<<<gridBlock,threadBlock>>>(array,width,pitch,height,norm); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
cb26edcbe712a91d3080458af0fd862f9e320b7c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include <stdio.h> #include <stdarg.h> #include <ctype.h> #include <assert.h> #include <windows.h> __global__ void getTid(int* myTid, int* maxTidPerBlock) { int tid = (blockDim.x * blockIdx.x) + threadIdx.x; // write my own tid myTid[tid] = tid; // TODO: fill myTid with max tid from each block // TODO: write max tid from block into maxTidPerBlock } __global__ void getMaxTid(int* maxTids) { // TODO: find max tid from among all blocks } void __cdecl odprintf(const char *format, ...) { char buf[4096], *p = buf; va_list args; int n; va_start(args, format); n = _vsnprintf(p, sizeof buf - 3, format, args); // buf-3 is room for CR/LF/NUL va_end(args); p += (n < 0) ? sizeof buf - 3 : n; while (p > buf && isspace(p[-1])) *--p = '\0'; *p++ = '\r'; *p++ = '\n'; *p = '\0'; OutputDebugString(buf); } int main() { const int BLOCKS = 4; const int THREADS_PER_BLOCK = 16; hipError_t cudaStatus; int* mtpb = (int*)malloc(BLOCKS * sizeof(int)); assert(NULL != mtpb); // malloc, and zero, device buffers int *d_myTid, *d_mtpb; cudaStatus = hipMalloc(&d_myTid, BLOCKS * THREADS_PER_BLOCK * sizeof(int)); checkCudaErrors(cudaStatus); cudaStatus = hipMemset(d_myTid, 0, BLOCKS * THREADS_PER_BLOCK * sizeof(int)); checkCudaErrors(cudaStatus); cudaStatus = hipMalloc(&d_mtpb, BLOCKS * sizeof(int)); checkCudaErrors(cudaStatus); cudaStatus = hipMemset(d_mtpb, 0, BLOCKS * sizeof(int)); checkCudaErrors(cudaStatus); // launch first kernel hipLaunchKernelGGL(( getTid), dim3(BLOCKS), dim3(THREADS_PER_BLOCK), 0, 0, d_myTid, d_mtpb); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); checkCudaErrors(cudaStatus); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); checkCudaErrors(cudaStatus); // launch second kernel hipLaunchKernelGGL(( getMaxTid), dim3(1), dim3(BLOCKS), 0, 0, d_mtpb); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); checkCudaErrors(cudaStatus); // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); checkCudaErrors(cudaStatus); cudaStatus = hipMemcpy(mtpb, d_mtpb, BLOCKS * sizeof(int), hipMemcpyDeviceToHost); checkCudaErrors(cudaStatus); for (int i = 0; i < BLOCKS; i++) { odprintf("%d ", mtpb[i]); } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); checkCudaErrors(cudaStatus); return 0; }
cb26edcbe712a91d3080458af0fd862f9e320b7c.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "helper_cuda.h" #include <stdio.h> #include <stdarg.h> #include <ctype.h> #include <assert.h> #include <windows.h> __global__ void getTid(int* myTid, int* maxTidPerBlock) { int tid = (blockDim.x * blockIdx.x) + threadIdx.x; // write my own tid myTid[tid] = tid; // TODO: fill myTid with max tid from each block // TODO: write max tid from block into maxTidPerBlock } __global__ void getMaxTid(int* maxTids) { // TODO: find max tid from among all blocks } void __cdecl odprintf(const char *format, ...) { char buf[4096], *p = buf; va_list args; int n; va_start(args, format); n = _vsnprintf(p, sizeof buf - 3, format, args); // buf-3 is room for CR/LF/NUL va_end(args); p += (n < 0) ? sizeof buf - 3 : n; while (p > buf && isspace(p[-1])) *--p = '\0'; *p++ = '\r'; *p++ = '\n'; *p = '\0'; OutputDebugString(buf); } int main() { const int BLOCKS = 4; const int THREADS_PER_BLOCK = 16; cudaError_t cudaStatus; int* mtpb = (int*)malloc(BLOCKS * sizeof(int)); assert(NULL != mtpb); // malloc, and zero, device buffers int *d_myTid, *d_mtpb; cudaStatus = cudaMalloc(&d_myTid, BLOCKS * THREADS_PER_BLOCK * sizeof(int)); checkCudaErrors(cudaStatus); cudaStatus = cudaMemset(d_myTid, 0, BLOCKS * THREADS_PER_BLOCK * sizeof(int)); checkCudaErrors(cudaStatus); cudaStatus = cudaMalloc(&d_mtpb, BLOCKS * sizeof(int)); checkCudaErrors(cudaStatus); cudaStatus = cudaMemset(d_mtpb, 0, BLOCKS * sizeof(int)); checkCudaErrors(cudaStatus); // launch first kernel getTid<<<BLOCKS, THREADS_PER_BLOCK>>>(d_myTid, d_mtpb); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); checkCudaErrors(cudaStatus); // launch second kernel getMaxTid<<<1, BLOCKS>>>(d_mtpb); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); checkCudaErrors(cudaStatus); // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); checkCudaErrors(cudaStatus); cudaStatus = cudaMemcpy(mtpb, d_mtpb, BLOCKS * sizeof(int), cudaMemcpyDeviceToHost); checkCudaErrors(cudaStatus); for (int i = 0; i < BLOCKS; i++) { odprintf("%d ", mtpb[i]); } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); checkCudaErrors(cudaStatus); return 0; }
6829dc62a0ef7185daba56b1d9d0eb0c783efd16.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ compute) { float compute_local[64]; __shared__ float A_shared[2048]; __shared__ float B_shared[2048]; float A_shared_local[32]; float B_shared_local[2]; for (int i_c_init = 0; i_c_init < 32; ++i_c_init) { for (int j_c_init = 0; j_c_init < 2; ++j_c_init) { compute_local[(((i_c_init * 2) + j_c_init))] = 0.000000e+00f; } } for (int k_outer = 0; k_outer < 16; ++k_outer) { __syncthreads(); #pragma unroll for (int ax1_inner = 0; ax1_inner < 32; ++ax1_inner) { A_shared[((((((int)threadIdx.y) * 1024) + (ax1_inner * 32)) + ((int)threadIdx.x)))] = ((float*)A)[(((((((int)threadIdx.y) * 16384) + (ax1_inner * 512)) + (k_outer * 32)) + ((int)threadIdx.x)))]; } #pragma unroll for (int ax1_inner1 = 0; ax1_inner1 < 32; ++ax1_inner1) { B_shared[((((((int)threadIdx.y) * 1024) + (ax1_inner1 * 32)) + ((int)threadIdx.x)))] = ((float*)B)[((((((((int)blockIdx.x) * 32768) + (((int)threadIdx.y) * 16384)) + (ax1_inner1 * 512)) + (k_outer * 32)) + ((int)threadIdx.x)))]; } __syncthreads(); for (int k_inner = 0; k_inner < 32; ++k_inner) { #pragma unroll for (int ax1 = 0; ax1 < 32; ++ax1) { A_shared_local[(ax1)] = A_shared[((((((int)threadIdx.y) * 1024) + (ax1 * 32)) + k_inner))]; } #pragma unroll for (int ax11 = 0; ax11 < 2; ++ax11) { B_shared_local[(ax11)] = B_shared[((((((int)threadIdx.x) * 64) + (ax11 * 32)) + k_inner))]; } #pragma unroll for (int i_c = 0; i_c < 32; ++i_c) { #pragma unroll for (int j_c = 0; j_c < 2; ++j_c) { compute_local[(((i_c * 2) + j_c))] = (compute_local[(((i_c * 2) + j_c))] + (A_shared_local[(i_c)] * B_shared_local[(j_c)])); } } } } #pragma unroll for (int i_inner_inner = 0; i_inner_inner < 32; ++i_inner_inner) { #pragma unroll for (int j_inner_inner = 0; j_inner_inner < 2; ++j_inner_inner) { ((float*)compute)[((((((((int)threadIdx.y) * 524288) + (i_inner_inner * 16384)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + j_inner_inner))] = compute_local[(((i_inner_inner * 2) + j_inner_inner))]; } } }
6829dc62a0ef7185daba56b1d9d0eb0c783efd16.cu
extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ compute) { float compute_local[64]; __shared__ float A_shared[2048]; __shared__ float B_shared[2048]; float A_shared_local[32]; float B_shared_local[2]; for (int i_c_init = 0; i_c_init < 32; ++i_c_init) { for (int j_c_init = 0; j_c_init < 2; ++j_c_init) { compute_local[(((i_c_init * 2) + j_c_init))] = 0.000000e+00f; } } for (int k_outer = 0; k_outer < 16; ++k_outer) { __syncthreads(); #pragma unroll for (int ax1_inner = 0; ax1_inner < 32; ++ax1_inner) { A_shared[((((((int)threadIdx.y) * 1024) + (ax1_inner * 32)) + ((int)threadIdx.x)))] = ((float*)A)[(((((((int)threadIdx.y) * 16384) + (ax1_inner * 512)) + (k_outer * 32)) + ((int)threadIdx.x)))]; } #pragma unroll for (int ax1_inner1 = 0; ax1_inner1 < 32; ++ax1_inner1) { B_shared[((((((int)threadIdx.y) * 1024) + (ax1_inner1 * 32)) + ((int)threadIdx.x)))] = ((float*)B)[((((((((int)blockIdx.x) * 32768) + (((int)threadIdx.y) * 16384)) + (ax1_inner1 * 512)) + (k_outer * 32)) + ((int)threadIdx.x)))]; } __syncthreads(); for (int k_inner = 0; k_inner < 32; ++k_inner) { #pragma unroll for (int ax1 = 0; ax1 < 32; ++ax1) { A_shared_local[(ax1)] = A_shared[((((((int)threadIdx.y) * 1024) + (ax1 * 32)) + k_inner))]; } #pragma unroll for (int ax11 = 0; ax11 < 2; ++ax11) { B_shared_local[(ax11)] = B_shared[((((((int)threadIdx.x) * 64) + (ax11 * 32)) + k_inner))]; } #pragma unroll for (int i_c = 0; i_c < 32; ++i_c) { #pragma unroll for (int j_c = 0; j_c < 2; ++j_c) { compute_local[(((i_c * 2) + j_c))] = (compute_local[(((i_c * 2) + j_c))] + (A_shared_local[(i_c)] * B_shared_local[(j_c)])); } } } } #pragma unroll for (int i_inner_inner = 0; i_inner_inner < 32; ++i_inner_inner) { #pragma unroll for (int j_inner_inner = 0; j_inner_inner < 2; ++j_inner_inner) { ((float*)compute)[((((((((int)threadIdx.y) * 524288) + (i_inner_inner * 16384)) + (((int)blockIdx.x) * 64)) + (((int)threadIdx.x) * 2)) + j_inner_inner))] = compute_local[(((i_inner_inner * 2) + j_inner_inner))]; } } }
c77f2191c97d7b5b8f822f61c7f07b5dead5f6b6.hip
// !!! This is a file automatically generated by hipify!!! #define BLOCKDIMX 16 #define BLOCKDIMY 8 #define BLOCKDIMZ 8 #define BLOCKSIZEX 32 #define BLOCKSIZEY 128 #define BLOCKSIZEZ 16 // #define BLOCKDIMX 64 // #define BLOCKDIMY 2 // #define BLOCKDIMZ 1 // #define BLOCKSIZEX 128 // #define BLOCKSIZEY 4 // #define BLOCKSIZEZ 2 // Use all constants to debug and get the performance #define DIMX 512 #define DIMY 512 #define DIMZ 512 #define TOTAL (DIMX*DIMY*DIMZ) #define NUMTHREADS (BLOCKDIMX*BLOCKDIMY*BLOCKDIMZ) #define HALO 1 #define OPENEDDIMX (BLOCKSIZEX+2*HALO) #define OPENEDDIMY (BLOCKSIZEY+2*HALO) #define OPENEDDIMZ (BLOCKSIZEZ+2*HALO) #define OPENEDDIMXY (OPENEDDIMX*OPENEDDIMY) #define OPENEDDIMXYZ (OPENEDDIMX*OPENEDDIMY*OPENEDDIMZ) #define CLOSEDDIMX (BLOCKSIZEX) #define CLOSEDDIMY (BLOCKSIZEY) #define CLOSEDDIMZ (BLOCKSIZEZ) #define CLOSEDDIMXY (CLOSEDDIMX*CLOSEDDIMY) #define CLOSEDDIMXYZ (CLOSEDDIMX*CLOSEDDIMY*CLOSEDDIMZ) #define NUMREADING ((OPENEDDIMXYZ / NUMTHREADS) + ((OPENEDDIMXYZ%NUMTHREADS)?1:0)) #define NUMWRITING ((CLOSEDDIMXYZ / NUMTHREADS) + ((CLOSEDDIMXYZ%NUMTHREADS)?1:0)) // #define CORRECTNESS_DATA #define CORRECTNESS_HEAT // #define myclamp(x, value, tx, fx) {return ((x)==(value)) ? (tx):(fx)} #define C0 0.25f #define C1 0.50f #include <iostream> #include <fstream> #include <sstream> #include <iomanip> // std::setfill, std::setw #include <string> // #include <sys/ioctl.h> #include <hip/hip_runtime.h> #include <helper_math.h> // #include <gpu_timer.hpp> using namespace std; //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkLastError() { \ hipError_t error = hipGetLastError(); \ int id; \ hipGetDevice(&id); \ if(error != hipSuccess) { \ printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \ __FILE__,__LINE__, hipGetErrorString(error), id); \ exit(EXIT_FAILURE); \ } \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkReadFile(filename, pData, size) { \ fstream *fs = new fstream; \ fs->open(filename, ios::in|ios::binary); \ if (!fs->is_open()) \ { \ printf("Cannot open file '%s' in file '%s' at line %i\n", \ filename, __FILE__, __LINE__); \ return 1; \ } \ fs->read(reinterpret_cast<char*>(pData), size); \ fs->close(); \ delete fs; \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkWriteFile(filename, pData, size) { \ fstream *fs = new fstream; \ fs->open(filename, ios::out|ios::binary); \ if (!fs->is_open()) \ { \ fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \ filename, __FILE__, __LINE__); \ return 1; \ } \ fs->write(reinterpret_cast<char*>(pData), size); \ fs->close(); \ delete fs; \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define at(x, y, z, DIMX, DIMY, DIMZ) ( clamp((int)(z), 0, DIMZ-1)*DIMY*DIMX + \ clamp((int)(y), 0, DIMY-1)*DIMX + \ clamp((int)(x), 0, DIMX-1) ) //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void heatflow_global(float *src, float *dst) { int closed_index_1d, offset_index_1d, global_index_1d; int3 closed_index_3d, offset_index_3d, global_index_3d; offset_index_3d = make_int3(blockIdx.x * BLOCKSIZEX, blockIdx.y * BLOCKSIZEY, blockIdx.z * BLOCKSIZEZ); int index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; #pragma unroll for(int thisWriting=0; thisWriting<NUMWRITING; thisWriting++) { // closed_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + closed_index_1d = index + thisWriting*NUMTHREADS; closed_index_3d = make_int3((closed_index_1d % CLOSEDDIMXY % CLOSEDDIMX), (closed_index_1d % CLOSEDDIMXY / CLOSEDDIMX), (closed_index_1d / CLOSEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + closed_index_3d.x), (offset_index_3d.y + closed_index_3d.y), (offset_index_3d.z + closed_index_3d.z) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; if(global_index_3d.z > 0 && global_index_3d.z < (DIMZ-1) && global_index_3d.y > 0 && global_index_3d.y < (DIMY-1) && global_index_3d.x > 0 && global_index_3d.x < (DIMX-1) ) { // dst[at(global_index_3d.x, global_index_3d.y, global_index_3d.z, DIMX, DIMY, DIMZ)] dst[global_index_1d] = C0 * (src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)])+ C1 * (src[at(global_index_3d.x-1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y-1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z-1, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+1, DIMX, DIMY, DIMZ)]); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void heatflow_shared(float *src, float *dst) { int opened_index_1d, closed_index_1d, offset_index_1d, global_index_1d; int3 opened_index_3d, closed_index_3d, offset_index_3d, global_index_3d; offset_index_3d = make_int3(blockIdx.x * BLOCKSIZEX, blockIdx.y * BLOCKSIZEY, blockIdx.z * BLOCKSIZEZ); __shared__ float sharedMem[OPENEDDIMZ][OPENEDDIMY][OPENEDDIMX]; float result; int index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; #pragma unroll for(int thisReading=0; thisReading<NUMREADING; thisReading++) { // opened_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + opened_index_1d = index + thisReading * NUMTHREADS; opened_index_3d = make_int3((opened_index_1d % OPENEDDIMXY % OPENEDDIMX), (opened_index_1d % OPENEDDIMXY / OPENEDDIMX), (opened_index_1d / OPENEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + opened_index_3d.x - HALO), (offset_index_3d.y + opened_index_3d.y - HALO), (offset_index_3d.z + opened_index_3d.z - HALO) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; if(opened_index_3d.z < OPENEDDIMZ) { if(global_index_3d.z >= 0 && global_index_3d.z < (DIMZ) && global_index_3d.y >= 0 && global_index_3d.y < (DIMY) && global_index_3d.x >= 0 && global_index_3d.x < (DIMX) ) { sharedMem[opened_index_3d.z][opened_index_3d.y][opened_index_3d.x] = src[global_index_1d]; } } } __syncthreads(); #pragma unroll for(int thisWriting=0; thisWriting<NUMWRITING; thisWriting++) { // closed_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + closed_index_1d = index + thisWriting * NUMTHREADS; closed_index_3d = make_int3((closed_index_1d % CLOSEDDIMXY % CLOSEDDIMX), (closed_index_1d % CLOSEDDIMXY / CLOSEDDIMX), (closed_index_1d / CLOSEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + closed_index_3d.x), (offset_index_3d.y + closed_index_3d.y), (offset_index_3d.z + closed_index_3d.z) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; result = C0 * (sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0])+ C1 * (sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO-1] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+1] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO-1][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+1][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO-1][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO+1][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0]); if(global_index_3d.z > 0 && global_index_3d.z < (DIMZ-1) && global_index_3d.y > 0 && global_index_3d.y < (DIMY-1) && global_index_3d.x > 0 && global_index_3d.x < (DIMX-1) ) { dst[global_index_1d] = result; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("-----------------------------------------------------------------------\n"); srand(time(NULL)); // for random number generator hipSetDevice(3);checkLastError(); hipDeviceReset();checkLastError(); // Specify dimensions // Allocate host memory float *h_src = new float[TOTAL]; float *h_dst = new float[TOTAL]; // Allocate device memory float *d_src; float *d_dst; hipMalloc((void**)&d_src, TOTAL*sizeof(float)); checkLastError(); hipMalloc((void**)&d_dst, TOTAL*sizeof(float)); checkLastError(); // Initialize the image source for(int z=0; z<DIMZ; z++) { for(int y=0; y<DIMY; y++) { for(int x=0; x<DIMX; x++) { h_src[z*DIMY*DIMX+y*DIMX+x] = (float)( (int)rand() % 10); // 7; } } } // Transferring to the device memory hipMemcpy(d_src, h_src, TOTAL*sizeof(float), hipMemcpyHostToDevice); checkLastError(); hipMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError(); // parameters for performance eval double flops, gbps, nops, nbp; nbp = 8*4; // # of bytes transferred per point nops = 8.; // # of flops per point int iter = 20; int rightData = 0; int rightHeat = 0; /// Verify the correctness of data // #ifdef CORRECTNESS_DATA hipMemcpy(d_dst, d_src, TOTAL*sizeof(float), hipMemcpyDeviceToDevice); checkLastError(); hipMemcpy(h_dst, d_dst, TOTAL*sizeof(float), hipMemcpyDeviceToHost); checkLastError(); for(int z=0; z<DIMZ && rightData; z++) { for(int y=0; y<DIMY && rightData; y++) { for(int x=0; x<DIMX && rightData; x++) { if(h_src[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x]) { printf("Data does not match at x: %d, y: %d, z: %d\n", x, y, z); rightData = 0; // goto cleanup_data; } } } } if(rightData) printf("Data is correct.\n"); // cleanup_data: // #endif // grid construction dim3 numThreads(BLOCKDIMX, BLOCKDIMY, BLOCKDIMZ); //Dim dim3 numBlocks((DIMX/BLOCKSIZEX)+((DIMX%BLOCKSIZEX)?1:0), //Size for ILP (DIMY/BLOCKSIZEY)+((DIMY%BLOCKSIZEY)?1:0), (DIMZ/BLOCKSIZEZ)+((DIMZ%BLOCKSIZEZ)?1:0)); hipMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError(); // Reset the result memset(h_dst, 0, TOTAL*sizeof(float)); printf("Blockdim (%03d, %03d, %03d); Blocksize (%03d, %03d, %03d);\n", BLOCKDIMX, BLOCKDIMY, BLOCKDIMZ, BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); // launch kernel // GpuTimer gpu_timer; // gpu_timer.Start(); hipEvent_t begin, end; hipEventCreate(&begin); hipEventCreate(&end); hipEventRecord(begin, 0); for(int n=0; n<iter; n++) { // heatflow_global<<<numBlocks, numThreads>>>(d_src, d_dst); hipLaunchKernelGGL(( heatflow_shared), dim3(numBlocks), dim3(numThreads), 0, 0, d_src, d_dst); } // gpu_timer.Stop(); hipDeviceSynchronize(); hipEventRecord(end, 0); hipEventSynchronize(end); float msec; hipEventElapsedTime(&msec, begin, end); checkLastError(); // float msec = gpu_timer.Elapsed(); gbps = nbp*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter; flops = nops*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter; printf("Computing time : %.3f msec, Device memory bandwidth : %.3f GB/s, GFLOPS : %.3f\n", msec, gbps, flops); float* h_ref = new float[DIMX*DIMY*DIMZ]; float tmp, result; // #ifdef CORRECTNESS_HEAT /// Verify the correctness of heat flow, no check at boundary // Golden result for(int z=1; z<(DIMZ-1); z++) { for(int y=1; y<(DIMY-1); y++) { for(int x=1; x<(DIMX-1); x++) { result = C0 * (h_src[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)])+ C1 * (h_src[at(x-1, y+0, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+1, y+0, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y-1, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+1, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+0, z-1, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+0, z+1, DIMX, DIMY, DIMZ)]); h_ref[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)] = result; } } } // Transferring to the host memory hipMemcpy(h_dst, d_dst, TOTAL*sizeof(float), hipMemcpyDeviceToHost); checkLastError(); // Compare result for(int z=1; z<(DIMZ-1) && rightHeat; z++) { for(int y=1; y<(DIMY-1) && rightHeat; y++) { for(int x=1; x<(DIMX-1) && rightHeat; x++) { if(h_ref[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x]) { printf("Solution does not match at x: %d, y: %d, z: %d\n", x, y, z); printf("h_ref (%04.4f), h_dst (%04.4f)\n", h_ref[z*DIMY*DIMX+y*DIMX+x], h_dst[z*DIMY*DIMX+y*DIMX+x]); rightHeat = 0; // goto cleanup_heat; } } } } if(rightHeat) printf("Solution is correct.\n"); // cleanup_heat: // #endif ///!!! Print line // struct winsize w; // ioctl(0, TIOCGWINSZ, &w); // for(int k=0; k<w.ws_col; k++) // printf("-"); printf("\n"); checkLastError(); // cleanup: hipFree(d_src); hipFree(d_dst); free(h_src); free(h_dst); free(h_ref); return 0; }
c77f2191c97d7b5b8f822f61c7f07b5dead5f6b6.cu
#define BLOCKDIMX 16 #define BLOCKDIMY 8 #define BLOCKDIMZ 8 #define BLOCKSIZEX 32 #define BLOCKSIZEY 128 #define BLOCKSIZEZ 16 // #define BLOCKDIMX 64 // #define BLOCKDIMY 2 // #define BLOCKDIMZ 1 // #define BLOCKSIZEX 128 // #define BLOCKSIZEY 4 // #define BLOCKSIZEZ 2 // Use all constants to debug and get the performance #define DIMX 512 #define DIMY 512 #define DIMZ 512 #define TOTAL (DIMX*DIMY*DIMZ) #define NUMTHREADS (BLOCKDIMX*BLOCKDIMY*BLOCKDIMZ) #define HALO 1 #define OPENEDDIMX (BLOCKSIZEX+2*HALO) #define OPENEDDIMY (BLOCKSIZEY+2*HALO) #define OPENEDDIMZ (BLOCKSIZEZ+2*HALO) #define OPENEDDIMXY (OPENEDDIMX*OPENEDDIMY) #define OPENEDDIMXYZ (OPENEDDIMX*OPENEDDIMY*OPENEDDIMZ) #define CLOSEDDIMX (BLOCKSIZEX) #define CLOSEDDIMY (BLOCKSIZEY) #define CLOSEDDIMZ (BLOCKSIZEZ) #define CLOSEDDIMXY (CLOSEDDIMX*CLOSEDDIMY) #define CLOSEDDIMXYZ (CLOSEDDIMX*CLOSEDDIMY*CLOSEDDIMZ) #define NUMREADING ((OPENEDDIMXYZ / NUMTHREADS) + ((OPENEDDIMXYZ%NUMTHREADS)?1:0)) #define NUMWRITING ((CLOSEDDIMXYZ / NUMTHREADS) + ((CLOSEDDIMXYZ%NUMTHREADS)?1:0)) // #define CORRECTNESS_DATA #define CORRECTNESS_HEAT // #define myclamp(x, value, tx, fx) {return ((x)==(value)) ? (tx):(fx)} #define C0 0.25f #define C1 0.50f #include <iostream> #include <fstream> #include <sstream> #include <iomanip> // std::setfill, std::setw #include <string> // #include <sys/ioctl.h> #include <cuda.h> #include <helper_math.h> // #include <gpu_timer.hpp> using namespace std; //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkLastError() { \ cudaError_t error = cudaGetLastError(); \ int id; \ cudaGetDevice(&id); \ if(error != cudaSuccess) { \ printf("Cuda failure error in file '%s' in line %i: '%s' at device %d \n", \ __FILE__,__LINE__, cudaGetErrorString(error), id); \ exit(EXIT_FAILURE); \ } \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkReadFile(filename, pData, size) { \ fstream *fs = new fstream; \ fs->open(filename, ios::in|ios::binary); \ if (!fs->is_open()) \ { \ printf("Cannot open file '%s' in file '%s' at line %i\n", \ filename, __FILE__, __LINE__); \ return 1; \ } \ fs->read(reinterpret_cast<char*>(pData), size); \ fs->close(); \ delete fs; \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define checkWriteFile(filename, pData, size) { \ fstream *fs = new fstream; \ fs->open(filename, ios::out|ios::binary); \ if (!fs->is_open()) \ { \ fprintf(stderr, "Cannot open file '%s' in file '%s' at line %i\n", \ filename, __FILE__, __LINE__); \ return 1; \ } \ fs->write(reinterpret_cast<char*>(pData), size); \ fs->close(); \ delete fs; \ } //////////////////////////////////////////////////////////////////////////////////////////////////// #define at(x, y, z, DIMX, DIMY, DIMZ) ( clamp((int)(z), 0, DIMZ-1)*DIMY*DIMX + \ clamp((int)(y), 0, DIMY-1)*DIMX + \ clamp((int)(x), 0, DIMX-1) ) //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void heatflow_global(float *src, float *dst) { int closed_index_1d, offset_index_1d, global_index_1d; int3 closed_index_3d, offset_index_3d, global_index_3d; offset_index_3d = make_int3(blockIdx.x * BLOCKSIZEX, blockIdx.y * BLOCKSIZEY, blockIdx.z * BLOCKSIZEZ); int index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; #pragma unroll for(int thisWriting=0; thisWriting<NUMWRITING; thisWriting++) { // closed_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + closed_index_1d = index + thisWriting*NUMTHREADS; closed_index_3d = make_int3((closed_index_1d % CLOSEDDIMXY % CLOSEDDIMX), (closed_index_1d % CLOSEDDIMXY / CLOSEDDIMX), (closed_index_1d / CLOSEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + closed_index_3d.x), (offset_index_3d.y + closed_index_3d.y), (offset_index_3d.z + closed_index_3d.z) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; if(global_index_3d.z > 0 && global_index_3d.z < (DIMZ-1) && global_index_3d.y > 0 && global_index_3d.y < (DIMY-1) && global_index_3d.x > 0 && global_index_3d.x < (DIMX-1) ) { // dst[at(global_index_3d.x, global_index_3d.y, global_index_3d.z, DIMX, DIMY, DIMZ)] dst[global_index_1d] = C0 * (src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)])+ C1 * (src[at(global_index_3d.x-1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+1, global_index_3d.y+0, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y-1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+1, global_index_3d.z+0, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z-1, DIMX, DIMY, DIMZ)] + src[at(global_index_3d.x+0, global_index_3d.y+0, global_index_3d.z+1, DIMX, DIMY, DIMZ)]); } } } //////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void heatflow_shared(float *src, float *dst) { int opened_index_1d, closed_index_1d, offset_index_1d, global_index_1d; int3 opened_index_3d, closed_index_3d, offset_index_3d, global_index_3d; offset_index_3d = make_int3(blockIdx.x * BLOCKSIZEX, blockIdx.y * BLOCKSIZEY, blockIdx.z * BLOCKSIZEZ); __shared__ float sharedMem[OPENEDDIMZ][OPENEDDIMY][OPENEDDIMX]; float result; int index = threadIdx.z * blockDim.y * blockDim.x + threadIdx.y * blockDim.x + threadIdx.x; #pragma unroll for(int thisReading=0; thisReading<NUMREADING; thisReading++) { // opened_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + opened_index_1d = index + thisReading * NUMTHREADS; opened_index_3d = make_int3((opened_index_1d % OPENEDDIMXY % OPENEDDIMX), (opened_index_1d % OPENEDDIMXY / OPENEDDIMX), (opened_index_1d / OPENEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + opened_index_3d.x - HALO), (offset_index_3d.y + opened_index_3d.y - HALO), (offset_index_3d.z + opened_index_3d.z - HALO) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; if(opened_index_3d.z < OPENEDDIMZ) { if(global_index_3d.z >= 0 && global_index_3d.z < (DIMZ) && global_index_3d.y >= 0 && global_index_3d.y < (DIMY) && global_index_3d.x >= 0 && global_index_3d.x < (DIMX) ) { sharedMem[opened_index_3d.z][opened_index_3d.y][opened_index_3d.x] = src[global_index_1d]; } } } __syncthreads(); #pragma unroll for(int thisWriting=0; thisWriting<NUMWRITING; thisWriting++) { // closed_index_1d = threadIdx.z * blockDim.y * blockDim.x + // threadIdx.y * blockDim.x + // threadIdx.x + closed_index_1d = index + thisWriting * NUMTHREADS; closed_index_3d = make_int3((closed_index_1d % CLOSEDDIMXY % CLOSEDDIMX), (closed_index_1d % CLOSEDDIMXY / CLOSEDDIMX), (closed_index_1d / CLOSEDDIMXY) ); global_index_3d = make_int3((offset_index_3d.x + closed_index_3d.x), (offset_index_3d.y + closed_index_3d.y), (offset_index_3d.z + closed_index_3d.z) ); global_index_1d = global_index_3d.z * DIMY * DIMX + global_index_3d.y * DIMX + global_index_3d.x; result = C0 * (sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0])+ C1 * (sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO-1] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+1] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO-1][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO+0][closed_index_3d.y+HALO+1][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO-1][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0] + sharedMem[closed_index_3d.z+HALO+1][closed_index_3d.y+HALO+0][closed_index_3d.x+HALO+0]); if(global_index_3d.z > 0 && global_index_3d.z < (DIMZ-1) && global_index_3d.y > 0 && global_index_3d.y < (DIMY-1) && global_index_3d.x > 0 && global_index_3d.x < (DIMX-1) ) { dst[global_index_1d] = result; } } } //////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { printf("-----------------------------------------------------------------------\n"); srand(time(NULL)); // for random number generator cudaSetDevice(3);checkLastError(); cudaDeviceReset();checkLastError(); // Specify dimensions // Allocate host memory float *h_src = new float[TOTAL]; float *h_dst = new float[TOTAL]; // Allocate device memory float *d_src; float *d_dst; cudaMalloc((void**)&d_src, TOTAL*sizeof(float)); checkLastError(); cudaMalloc((void**)&d_dst, TOTAL*sizeof(float)); checkLastError(); // Initialize the image source for(int z=0; z<DIMZ; z++) { for(int y=0; y<DIMY; y++) { for(int x=0; x<DIMX; x++) { h_src[z*DIMY*DIMX+y*DIMX+x] = (float)( (int)rand() % 10); // 7; } } } // Transferring to the device memory cudaMemcpy(d_src, h_src, TOTAL*sizeof(float), cudaMemcpyHostToDevice); checkLastError(); cudaMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError(); // parameters for performance eval double flops, gbps, nops, nbp; nbp = 8*4; // # of bytes transferred per point nops = 8.; // # of flops per point int iter = 20; int rightData = 0; int rightHeat = 0; /// Verify the correctness of data // #ifdef CORRECTNESS_DATA cudaMemcpy(d_dst, d_src, TOTAL*sizeof(float), cudaMemcpyDeviceToDevice); checkLastError(); cudaMemcpy(h_dst, d_dst, TOTAL*sizeof(float), cudaMemcpyDeviceToHost); checkLastError(); for(int z=0; z<DIMZ && rightData; z++) { for(int y=0; y<DIMY && rightData; y++) { for(int x=0; x<DIMX && rightData; x++) { if(h_src[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x]) { printf("Data does not match at x: %d, y: %d, z: %d\n", x, y, z); rightData = 0; // goto cleanup_data; } } } } if(rightData) printf("Data is correct.\n"); // cleanup_data: // #endif // grid construction dim3 numThreads(BLOCKDIMX, BLOCKDIMY, BLOCKDIMZ); //Dim dim3 numBlocks((DIMX/BLOCKSIZEX)+((DIMX%BLOCKSIZEX)?1:0), //Size for ILP (DIMY/BLOCKSIZEY)+((DIMY%BLOCKSIZEY)?1:0), (DIMZ/BLOCKSIZEZ)+((DIMZ%BLOCKSIZEZ)?1:0)); cudaMemset(d_dst, 0, TOTAL*sizeof(float));checkLastError(); // Reset the result memset(h_dst, 0, TOTAL*sizeof(float)); printf("Blockdim (%03d, %03d, %03d); Blocksize (%03d, %03d, %03d);\n", BLOCKDIMX, BLOCKDIMY, BLOCKDIMZ, BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ); // launch kernel // GpuTimer gpu_timer; // gpu_timer.Start(); cudaEvent_t begin, end; cudaEventCreate(&begin); cudaEventCreate(&end); cudaEventRecord(begin, 0); for(int n=0; n<iter; n++) { // heatflow_global<<<numBlocks, numThreads>>>(d_src, d_dst); heatflow_shared<<<numBlocks, numThreads>>>(d_src, d_dst); } // gpu_timer.Stop(); cudaDeviceSynchronize(); cudaEventRecord(end, 0); cudaEventSynchronize(end); float msec; cudaEventElapsedTime(&msec, begin, end); checkLastError(); // float msec = gpu_timer.Elapsed(); gbps = nbp*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter; flops = nops*DIMX*DIMY*DIMZ/(msec/1000.)/(1024.*1024.*1024.)*(double)iter; printf("Computing time : %.3f msec, Device memory bandwidth : %.3f GB/s, GFLOPS : %.3f\n", msec, gbps, flops); float* h_ref = new float[DIMX*DIMY*DIMZ]; float tmp, result; // #ifdef CORRECTNESS_HEAT /// Verify the correctness of heat flow, no check at boundary // Golden result for(int z=1; z<(DIMZ-1); z++) { for(int y=1; y<(DIMY-1); y++) { for(int x=1; x<(DIMX-1); x++) { result = C0 * (h_src[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)])+ C1 * (h_src[at(x-1, y+0, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+1, y+0, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y-1, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+1, z+0, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+0, z-1, DIMX, DIMY, DIMZ)] + h_src[at(x+0, y+0, z+1, DIMX, DIMY, DIMZ)]); h_ref[at(x+0, y+0, z+0, DIMX, DIMY, DIMZ)] = result; } } } // Transferring to the host memory cudaMemcpy(h_dst, d_dst, TOTAL*sizeof(float), cudaMemcpyDeviceToHost); checkLastError(); // Compare result for(int z=1; z<(DIMZ-1) && rightHeat; z++) { for(int y=1; y<(DIMY-1) && rightHeat; y++) { for(int x=1; x<(DIMX-1) && rightHeat; x++) { if(h_ref[z*DIMY*DIMX+y*DIMX+x] != h_dst[z*DIMY*DIMX+y*DIMX+x]) { printf("Solution does not match at x: %d, y: %d, z: %d\n", x, y, z); printf("h_ref (%04.4f), h_dst (%04.4f)\n", h_ref[z*DIMY*DIMX+y*DIMX+x], h_dst[z*DIMY*DIMX+y*DIMX+x]); rightHeat = 0; // goto cleanup_heat; } } } } if(rightHeat) printf("Solution is correct.\n"); // cleanup_heat: // #endif ///!!! Print line // struct winsize w; // ioctl(0, TIOCGWINSZ, &w); // for(int k=0; k<w.ws_col; k++) // printf("-"); printf("\n"); checkLastError(); // cleanup: cudaFree(d_src); cudaFree(d_dst); free(h_src); free(h_dst); free(h_ref); return 0; }
adf1fa0da6dca1e221c00143d3d10acad8a22940.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "thread_block.hpp" namespace gpu_planning { __host__ __device__ ThreadBlock1d::ThreadBlock1d() : x_{0}, dim_x_{0} {} __host__ __device__ ThreadBlock1d::ThreadBlock1d(int x, int dim_x) : x_{x}, dim_x_{dim_x} {} __host__ __device__ int ThreadBlock1d::x() const { return x_; } __host__ __device__ int ThreadBlock1d::dim_x() const { return dim_x_; } __host__ __device__ void ThreadBlock1d::sync() const { #ifdef __CUDA_ARCH__ __syncthreads(); #endif } __host__ __device__ ThreadBlock2d::ThreadBlock2d() : x_{0}, y_{0}, dim_x_{0}, dim_y_{0} {} __host__ __device__ ThreadBlock2d::ThreadBlock2d(int x, int y, int dim_x, int dim_y) : x_{x}, y_{y}, dim_x_{dim_x}, dim_y_{dim_y} {} __host__ __device__ int ThreadBlock2d::x() const { return x_; } __host__ __device__ int ThreadBlock2d::y() const { return y_; } __host__ __device__ int ThreadBlock2d::dim_x() const { return dim_x_; } __host__ __device__ int ThreadBlock2d::dim_y() const { return dim_y_; } __host__ __device__ void ThreadBlock2d::sync() const { #ifdef __CUDA_ARCH__ __syncthreads(); #endif } __host__ __device__ ThreadBlock3d::ThreadBlock3d() : x_{0}, y_{0}, z_{0}, dim_x_{0}, dim_y_{0}, dim_z_{0} {} __host__ __device__ ThreadBlock3d::ThreadBlock3d(int x, int y, int z, int dim_x, int dim_y, int dim_z) : x_{x}, y_{y}, z_{z}, dim_x_{dim_x}, dim_y_{dim_y}, dim_z_{dim_z} {} __host__ ThreadBlock3d ThreadBlock3d::host() { return ThreadBlock3d(0, 0, 0, 1, 1, 1); } __device__ ThreadBlock3d ThreadBlock3d::device_current() { return ThreadBlock3d(threadIdx.x, threadIdx.y, threadIdx.z, blockDim.x, blockDim.y, blockDim.z); } __host__ __device__ int ThreadBlock3d::x() const { return x_; } __host__ __device__ int ThreadBlock3d::y() const { return y_; } __host__ __device__ int ThreadBlock3d::z() const { return z_; } __host__ __device__ int ThreadBlock3d::dim_x() const { return dim_x_; } __host__ __device__ int ThreadBlock3d::dim_y() const { return dim_y_; } __host__ __device__ int ThreadBlock3d::dim_z() const { return dim_z_; } __host__ __device__ ThreadBlock1d ThreadBlock3d::to_1d() const { return ThreadBlock1d((z_ * dim_y_ + y_) * dim_x_ + x_, dim_x_ * dim_y_ * dim_z_); } __host__ __device__ void ThreadBlock3d::sync() const { #ifdef __CUDA_ARCH__ __syncthreads(); #endif } __host__ __device__ ThreadBlock2d ThreadBlock3d::slice_z() const { return ThreadBlock2d(x_, y_, dim_x_, dim_y_); } } // namespace gpu_planning
adf1fa0da6dca1e221c00143d3d10acad8a22940.cu
#include "thread_block.hpp" namespace gpu_planning { __host__ __device__ ThreadBlock1d::ThreadBlock1d() : x_{0}, dim_x_{0} {} __host__ __device__ ThreadBlock1d::ThreadBlock1d(int x, int dim_x) : x_{x}, dim_x_{dim_x} {} __host__ __device__ int ThreadBlock1d::x() const { return x_; } __host__ __device__ int ThreadBlock1d::dim_x() const { return dim_x_; } __host__ __device__ void ThreadBlock1d::sync() const { #ifdef __CUDA_ARCH__ __syncthreads(); #endif } __host__ __device__ ThreadBlock2d::ThreadBlock2d() : x_{0}, y_{0}, dim_x_{0}, dim_y_{0} {} __host__ __device__ ThreadBlock2d::ThreadBlock2d(int x, int y, int dim_x, int dim_y) : x_{x}, y_{y}, dim_x_{dim_x}, dim_y_{dim_y} {} __host__ __device__ int ThreadBlock2d::x() const { return x_; } __host__ __device__ int ThreadBlock2d::y() const { return y_; } __host__ __device__ int ThreadBlock2d::dim_x() const { return dim_x_; } __host__ __device__ int ThreadBlock2d::dim_y() const { return dim_y_; } __host__ __device__ void ThreadBlock2d::sync() const { #ifdef __CUDA_ARCH__ __syncthreads(); #endif } __host__ __device__ ThreadBlock3d::ThreadBlock3d() : x_{0}, y_{0}, z_{0}, dim_x_{0}, dim_y_{0}, dim_z_{0} {} __host__ __device__ ThreadBlock3d::ThreadBlock3d(int x, int y, int z, int dim_x, int dim_y, int dim_z) : x_{x}, y_{y}, z_{z}, dim_x_{dim_x}, dim_y_{dim_y}, dim_z_{dim_z} {} __host__ ThreadBlock3d ThreadBlock3d::host() { return ThreadBlock3d(0, 0, 0, 1, 1, 1); } __device__ ThreadBlock3d ThreadBlock3d::device_current() { return ThreadBlock3d(threadIdx.x, threadIdx.y, threadIdx.z, blockDim.x, blockDim.y, blockDim.z); } __host__ __device__ int ThreadBlock3d::x() const { return x_; } __host__ __device__ int ThreadBlock3d::y() const { return y_; } __host__ __device__ int ThreadBlock3d::z() const { return z_; } __host__ __device__ int ThreadBlock3d::dim_x() const { return dim_x_; } __host__ __device__ int ThreadBlock3d::dim_y() const { return dim_y_; } __host__ __device__ int ThreadBlock3d::dim_z() const { return dim_z_; } __host__ __device__ ThreadBlock1d ThreadBlock3d::to_1d() const { return ThreadBlock1d((z_ * dim_y_ + y_) * dim_x_ + x_, dim_x_ * dim_y_ * dim_z_); } __host__ __device__ void ThreadBlock3d::sync() const { #ifdef __CUDA_ARCH__ __syncthreads(); #endif } __host__ __device__ ThreadBlock2d ThreadBlock3d::slice_z() const { return ThreadBlock2d(x_, y_, dim_x_, dim_y_); } } // namespace gpu_planning
10e651e9ec714c823b9a506109ecd5f130350911.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // A very basic raytracer example. // [compile] // nvcc -o raytracer_cuda raytracer_cuda.cu // [/compile] // [ignore] // Copyright (C) 2012 www.scratchapixel.com // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // [/ignore] #include <cstdlib> #include <cstdio> #include "math.h" #include <fstream> #include <vector> #include <iostream> #include <cassert> #include <time.h> using namespace std; #define GIG 1000000000 #define CPG 2.9 // Cycles per GHz -- Adjust to your computer // Assertion to check for errors #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } #if defined __linux__ || defined __APPLE__ // "Compiled for Linux #else // Windows doesn't define these values by default, Linux does #define M_PI 3.141592653589793 #define INFINITY 1e8 #endif #ifdef __HIPCC__ #define CUDA_CALLABLE_MEMBER __host__ __device__ #else #define CUDA_CALLABLE_MEMBER #endif #define PRINT_TIME 1 // This variable controls the maximum recursion depth #define MAX_RAY_DEPTH 5 template<typename T> class Vec3 { public: T x, y, z; CUDA_CALLABLE_MEMBER Vec3() : x(T(0)), y(T(0)), z(T(0)) {} CUDA_CALLABLE_MEMBER Vec3(T xx) : x(xx), y(xx), z(xx) {} CUDA_CALLABLE_MEMBER Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {} CUDA_CALLABLE_MEMBER Vec3& normalize() { T nor2 = length2(); if (nor2 > 0) { T invNor = 1 / sqrt(nor2); x *= invNor, y *= invNor, z *= invNor; } return *this; } CUDA_CALLABLE_MEMBER Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); } CUDA_CALLABLE_MEMBER Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); } CUDA_CALLABLE_MEMBER T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; } CUDA_CALLABLE_MEMBER Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); } CUDA_CALLABLE_MEMBER Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); } CUDA_CALLABLE_MEMBER Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; } CUDA_CALLABLE_MEMBER Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; } CUDA_CALLABLE_MEMBER Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); } CUDA_CALLABLE_MEMBER T length2() const { return x * x + y * y + z * z; } CUDA_CALLABLE_MEMBER T length() const { return sqrt(length2()); } CUDA_CALLABLE_MEMBER friend std::ostream & operator << (std::ostream &os, const Vec3<T> &v) { os << "[" << v.x << " " << v.y << " " << v.z << "]"; return os; } }; typedef Vec3<float> Vec3f; class Sphere { public: Vec3f center; /// position of the sphere float radius, radius2; /// sphere radius and radius^2 Vec3f surfaceColor, emissionColor; /// surface color and emission (light) float transparency, reflection; /// surface transparency and reflectivity CUDA_CALLABLE_MEMBER Sphere( const Vec3f &c, // consts here const float &r, const Vec3f &sc, const float &refl = 0, const float &transp = 0, const Vec3f &ec = 0) : center(c), radius(r), radius2(r * r), surfaceColor(sc), emissionColor(ec), transparency(transp), reflection(refl) { /* empty */ } // Compute a ray-sphere intersection using the geometric solution CUDA_CALLABLE_MEMBER bool intersect(const Vec3f &rayorig, const Vec3f &raydir, float &t0, float &t1) const { Vec3f l = center - rayorig; float tca = l.dot(raydir); if (tca < 0) return false; float d2 = l.dot(l) - tca * tca; if (d2 > radius2) return false; float thc = sqrt(radius2 - d2); t0 = tca - thc; t1 = tca + thc; return true; } }; //cpu time calculation struct timespec diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } double timeInSeconds(struct timespec* t) { return (t->tv_sec + 1.0e-9 * (t->tv_nsec)); } __host__ __device__ float mix(const float &a, const float &b, const float &mix) { return b * mix + a * (1 - mix); } /**************************************************************************************************************************************************/ /***************************THIS IS THE CPU HOST VERSION***************************/ // This is the main trace function. It takes a ray as argument (defined by its origin // and direction). We test if this ray intersects any of the geometry in the scene. // If the ray intersects an object, we compute the intersection point, the normal // at the intersection point, and shade this point using this information. // Shading depends on the surface property (is it transparent, reflective, diffuse). // The function returns a color for the ray. If the ray intersects an object that // is the color of the object at the intersection point, otherwise it returns // the background color. __host__ __device__ Vec3f trace( const Vec3f &rayorig, const Vec3f &raydir, //const std::vector<Sphere> &spheres, const Sphere* spheres, const int &depth) { //if (raydir.length() != 1) std::cerr << "Error " << raydir << std::endl; float tnear = INFINITY; const Sphere* sphere = NULL; int size = 6; // find intersection of this ray with the sphere in the scene for (unsigned i = 0; i < size; ++i) { float t0 = INFINITY, t1 = INFINITY; if (spheres[i].intersect(rayorig, raydir, t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; sphere = &spheres[i]; } } } // if there's no intersection return black or background color if (!sphere) return Vec3f(2); Vec3f surfaceColor = 0; // color of the ray/surfaceof the object intersected by the ray Vec3f phit = rayorig + raydir * tnear; // point of intersection Vec3f nhit = phit - sphere->center; // normal at the intersection point nhit.normalize(); // normalize normal direction // If the normal and the view direction are not opposite to each other // reverse the normal direction. That also means we are inside the sphere so set // the inside bool to true. Finally reverse the sign of IdotN which we want // positive. float bias = 1e-4; // add some bias to the point from which we will be tracing bool inside = false; if (raydir.dot(nhit) > 0) nhit = -nhit, inside = true; //if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) { float facingratio = -raydir.dot(nhit); // change the mix value to tweak the effect float fresneleffect = mix(pow(1 - facingratio, 3), 1, 0.1); // compute reflection direction (not need to normalize because all vectors // are already normalized) Vec3f refldir = raydir - nhit * 2 * raydir.dot(nhit); refldir.normalize(); Vec3f reflection = 0;// trace(phit + nhit * bias, refldir, spheres, depth + 1); Vec3f refraction = 0; // if the sphere is also transparent compute refraction ray (transmission) //if (sphere->transparency) { float ior = 1.1, eta = (inside) ? ior : 1 / ior; // are we inside or outside the surface? float cosi = -nhit.dot(raydir); float k = 1 - eta * eta * (1 - cosi * cosi); Vec3f refrdir = raydir * eta + nhit * (eta * cosi - sqrt(k)); refrdir.normalize(); //refraction = trace(phit - nhit * bias, refrdir, spheres, depth + 1); //} // the result is a mix of reflection and refraction (if the sphere is transparent) surfaceColor = ( reflection * fresneleffect + refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor; //} //else { // it's a diffuse object, no need to raytrace any further for (unsigned i = 0; i < size; ++i) { if (spheres[i].emissionColor.x > 0) { // this is a light Vec3f transmission = 1; Vec3f lightDirection = spheres[i].center - phit; lightDirection.normalize(); for (unsigned j = 0; j < size; ++j) { if (i != j) { float t0, t1; if (spheres[j].intersect(phit + nhit * bias, lightDirection, t0, t1)) { transmission = 0; break; } } } surfaceColor += sphere->surfaceColor * transmission * max(float(0), nhit.dot(lightDirection)) * spheres[i].emissionColor; } } //} return surfaceColor + sphere->emissionColor; } // Main rendering function. We compute a camera ray for each pixel of the image // trace it and return a color. If the ray hits a sphere, we return the color of the // sphere at the intersection point, else we return the background color. void render(Sphere* spheres) { unsigned width = 1920, height = 1080; Vec3f *image = new Vec3f[width * height], *pixel = image; float invWidth = 1 / float(width), invHeight = 1 / float(height); float fov = 30, aspectratio = width / float(height); float angle = tan(M_PI * 0.5 * fov / 180.); // Trace rays for (unsigned y = 0; y < height; ++y) { for (unsigned x = 0; x < width; ++x, ++pixel) { float xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio; float yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle; Vec3f raydir(xx, yy, -1); raydir.normalize(); *pixel = trace(Vec3f(0), raydir, spheres, 0); } } // Save result to a PPM image (keep these flags if you compile under Windows) std::ofstream ofs("./cpu_untitled.ppm", std::ios::out | std::ios::binary); ofs << "P6\n" << width << " " << height << "\n255\n"; for (unsigned i = 0; i <width*height; i++) { ofs << (unsigned char)(::min(float(1), image[i].x) * 255) << (unsigned char)(::min(float(1), image[i].y) * 255) << (unsigned char)(::min(float(1), image[i].z) * 255); } ofs.close(); delete [] image; } /**************************************************************************************************************************************************/ /*********************************CUDA PART BEGINS********************************/ //CUDA Trace Function first calculates all of the 63 rays with a max depth of 5. //The information is stores are the ray origins and ray directions of each ray //and also the sphere that it intersected with __device__ Vec3f trace_cuda( const Vec3f &rayorig, const Vec3f &raydir, const Sphere *spheres, Vec3f* rays_orig, Vec3f* rays_dir, Vec3f* surfaceColors, const Sphere** inter_spheres, int k) { int sphere_size = 6; float bias = 1e-4; // add some bias to the point from which we will be tracing for(int i = 0; i < 31; i++){ float tnear = INFINITY; // find intersection of this ray with the sphere in the scene for (int j = 0; j < sphere_size; j++) { float t0 = INFINITY, t1 = INFINITY; if (spheres[j].intersect(rays_orig[i*k], rays_dir[i*k], t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; inter_spheres[i*k] = &spheres[j]; } else inter_spheres[i*k] = NULL; } } // if there's no intersection then the resulting rays will be the same ray if (!inter_spheres[i*k]){ rays_orig[(i*2+1)*k] = rays_orig[i*k]; rays_dir[(i*2+1)*k] = rays_dir[i*k]; rays_orig[(i*2+2)*k] = rays_orig[i*k]; rays_dir[(i*2+2)*k] = rays_dir[i*k]; continue; } Vec3f phit = rays_orig[i*k] + rays_dir[i*k] * tnear; // point of intersection Vec3f nhit = phit - inter_spheres[i*k]->center; // normal at the intersection point nhit.normalize(); // normalize normal direction // If the normal and the view direction are not opposite to each other // reverse the normal direction. That also means we are inside the sphere so set // the inside bool to true. Finally reverse the sign of IdotN which we want // positive. bool inside = false; if (rays_dir[i*k].dot(nhit) > 0) nhit = -nhit, inside = true; if ((inter_spheres[i*k]->transparency > 0 || inter_spheres[i*k]->reflection > 0)){ Vec3f refldir = rays_dir[i*k] - nhit * 2 * rays_dir[i*k].dot(nhit); refldir.normalize(); rays_dir[(i*2+1)*k] = refldir; rays_orig[(i*2+1)*k] = phit + nhit * bias; if (inter_spheres[i*k]->transparency){ float ior = 1.1, eta = (inside) ? ior : 1 / ior; // are we inside or outside the surface? float cosi = -nhit.dot(rays_dir[i*k]); float k = 1 - eta * eta * (1 - cosi * cosi); Vec3f refrdir = rays_dir[i*k] * eta + nhit * (eta * cosi - sqrtf(k)); refrdir.normalize(); rays_dir[(i*2+2)*k] = refrdir; rays_orig[(i*2+2)*k] = phit - nhit * bias; } } } for(int i = 31; i < 63; i++){ float tnear = INFINITY; // find intersection of this ray with the sphere in the scene for (unsigned j = 0; j < sphere_size; ++j) { float t0 = INFINITY, t1 = INFINITY; if (spheres[j].intersect(rays_orig[i], rays_dir[i], t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; inter_spheres[i*k] = &spheres[j]; } else inter_spheres[i*k] = NULL; } } } //Go Backwards to find the surface color and return final color int start, end, depth; depth = 5; end = 63; start = end - (int)pow(2,depth) - 1; for(int i = depth; i >= 0; i--){ for(int i = start; i < end; i--){ Vec3f surfaceColor = 0; if(!inter_spheres[i*k]) surfaceColors[i*k] = Vec3f(2); else{ // this is a light float tnear = INFINITY; float t0 = INFINITY, t1 = INFINITY; if (inter_spheres[i*k]->intersect(rays_orig[i*k], rays_dir[i*k], t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; inter_spheres[i*k] = &spheres[j]; } else inter_spheres[i*k] = NULL; } Vec3f phit = rays_orig[i*k] + rays_dir[i*k] * tnear; // point of intersection Vec3f nhit = phit - inter_spheres[i*k]->center; // normal at the intersection point nhit.normalize(); // normalize normal direction Vec3f transmission = 1; Vec3f lightDirection = spheres[j].center - phit; lightDirection.normalize(); for (int k = 0; k < sphere_size; k++) { if (j != k) { float t0, t1; if (spheres[j].intersect(phit + nhit * bias, lightDirection, t0, t1)) { transmission = 0; break; } } } surfaceColor += inter_spheres[i]->surfaceColor * transmission * fmaxf(float(0), nhit.dot(lightDirection)) * spheres[i].emissionColor; } } surfaceColors[i] = surfaceColor; } } } for(int i = 30; i >= 0; i--){ Vec3f surfaceColor = 0; if(!inter_spheres[i]) surfaceColors[i] = Vec3f(2); else{ float tnear = INFINITY; float t0 = INFINITY, t1 = INFINITY; if (inter_spheres[i]->intersect(rays_orig[i], rays_dir[i], t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; } } Vec3f phit = rays_orig[i] + rays_dir[i] * tnear; // point of intersection Vec3f nhit = phit - inter_spheres[i]->center; // normal at the intersection point nhit.normalize(); // normalize normal direction // If the normal and the view direction are not opposite to each other // reverse the normal direction. That also means we are inside the sphere so set // the inside bool to true. Finally reverse the sign of IdotN which we want // positive. if (rays_dir[i].dot(nhit) > 0) nhit = -nhit; float facingratio = -rays_dir[i].dot(nhit); // change the mix value to tweak the effect float fresneleffect = mix(powf((float)(1 - facingratio), 3.0), 1, 0.1); // the result is a mix of reflection and refraction (if the sphere is transparent) surfaceColors[i] = ( surfaceColors[i*2+1] * fresneleffect + surfaceColors[i*2+2] * (1 - fresneleffect) * inter_spheres[i]->transparency) * inter_spheres[i]->surfaceColor; } } return surfaceColors[0]; } //CUDA Rener Function __global__ void render_cuda(Sphere *d_spheres, int height, int width, Vec3f *pixel, Vec3f* d_rays_orig, Vec3f* d_rays_dir, Vec3f* d_surfaceColors, Sphere** d_inter_spheres) { float invWidth = 1 / float(width), invHeight = 1 / float(height); float fov = 30, aspectratio = width / float(height); float angle = tanf(M_PI * 0.5 * fov / 180.); int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; int k = y*width+x; float xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio; float yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle; Vec3f raydir(xx, yy, -1); raydir.normalize(); if(k < height*width) pixel[k] = trace_cuda(Vec3f(0), raydir, d_spheres, d_rays_orig, d_rays_dir, d_surfaceColors, d_inter_spheres, k); //for(int i = 0; i < 63; i++) //trace(Vec3f(0),raydir,d_spheres,0); } /**************************************************************************************************************************************************/ // In the main function, we will create the scene which is composed of 5 spheres // and 1 light (which is also a sphere). Then, once the scene description is complete // we render that scene, by calling the render() function. int main(int argc, char **argv) { // dimensions of image int width = 7680; int height = 4320; int num_pixels = width*height; //CPU timing variables struct timespec t1, t2; float cpu_time; // GPU Timing variables hipEvent_t start, stop; float elapsed_gpu; srand48(13); std::vector<Sphere> spheres; // position, radius, surface color, reflectivity, transparency, emission color spheres.push_back(Sphere(Vec3f( 0.0, -10004, -20), 10000, Vec3f(0.20, 0.20, 0.20), 0, 0.0)); spheres.push_back(Sphere(Vec3f( 0.0, 0, -20), 4, Vec3f(1.00, 0.32, 0.36), 1, 0.5)); spheres.push_back(Sphere(Vec3f( 5.0, -1, -15), 2, Vec3f(0.90, 0.76, 0.46), 1, 0.0)); spheres.push_back(Sphere(Vec3f( 5.0, 0, -25), 3, Vec3f(0.65, 0.77, 0.97), 1, 0.0)); spheres.push_back(Sphere(Vec3f(-5.5, 0, -15), 3, Vec3f(0.90, 0.90, 0.90), 1, 0.0)); // light spheres.push_back(Sphere(Vec3f( 0.0, 20, -30), 3, Vec3f(0.00, 0.00, 0.00), 0, 0.0, Vec3f(3))); //CPU num times trace is run /*int *num; int numSize = width*height*sizeof(int); num = (int*)malloc(numSize); memset(num,0,numSize);*/ //CPU answers /*clock_gettime(CLOCK_MONOTONIC,&t1); render(spheres); clock_gettime(CLOCK_MONOTONIC,&t2); cpu_time = timeInSeconds(&t2) - timeInSeconds(&t1); cout << "CPU time is: " << cpu_time << " (sec)" << endl;*/ //CPU Arrays Vec3f *image; Sphere *h_spheres; Vec3f *h_rays_orig; Vec3f *h_rays_dir; Sphere **h_inter_spheres; Vec3f *h_surfaceColors; //Image Array int flatArraySize = width * height * sizeof(Vec3f); image = (Vec3f*)malloc(flatArraySize); //Sphere Array int num_spheres = 6; int size = num_spheres * sizeof(Sphere); h_spheres = (Sphere*)malloc(size); h_spheres[0] = Sphere(Vec3f( 0.0, -10004, -20), 10000, Vec3f(0.20, 0.20, 0.20), 0, 0.0); h_spheres[1] = Sphere(Vec3f( 0.0, 0, -20), 4, Vec3f(1.00, 0.32, 0.36), 1, 0.5); h_spheres[2] = Sphere(Vec3f( 5.0, -1, -15), 2, Vec3f(0.90, 0.76, 0.46), 1, 0.0); h_spheres[3] = Sphere(Vec3f( 5.0, 0, -25), 3, Vec3f(0.65, 0.77, 0.97), 1, 0.0); h_spheres[4] = Sphere(Vec3f(-5.5, 0, -15), 3, Vec3f(0.90, 0.90, 0.90), 1, 0.0); h_spheres[5] = Sphere(Vec3f( 0.0, 20, -30), 3, Vec3f(0.00, 0.00, 0.00), 0, 0.0, Vec3f(3)); int vec_store_size = sizeof(Vec3f)*63*num_pixels; int sphere_ptr_size = sizeof(Sphere*)*63*num_pixels; h_rays_orig = (Vec3f*)malloc(vec_store_size); h_rays_dir = (Vec3f*)malloc(vec_store_size); h_surfaceColors = (Vec3f*)malloc(vec_store_size); h_inter_spheres = (Sphere**)malloc(sphere_ptr_size); // arrays on the GPU Vec3f *pixel; Sphere *d_spheres; Vec3f *d_rays_orig; Vec3f *d_rays_dir; Sphere **d_inter_spheres; Vec3f *d_surfaceColors; // allocate GPU memory CUDA_SAFE_CALL(hipMalloc((void**)&d_spheres, size)); CUDA_SAFE_CALL(hipMalloc((void**)&pixel, flatArraySize)); CUDA_SAFE_CALL(hipMalloc((void**)&d_rays_orig, vec_store_size)); CUDA_SAFE_CALL(hipMalloc((void**)&d_rays_dir, vec_store_size)); CUDA_SAFE_CALL(hipMalloc((void**)&d_surfaceColors, vec_store_size)); CUDA_SAFE_CALL(hipMalloc((void**)&d_inter_spheres, sphere_ptr_size)); // Transfer the arrays to the GPU memory CUDA_SAFE_CALL(hipMemcpy(d_spheres, h_spheres, size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(pixel, image, flatArraySize, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_rays_orig, h_rays_orig, vec_store_size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_rays_dir, h_rays_dir, vec_store_size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_surfaceColors, h_surfaceColors, vec_store_size, hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_inter_spheres, h_inter_spheres, sphere_ptr_size, hipMemcpyHostToDevice)); //call kernel function and choose dimension of the problem dim3 dimGrid(128,72); dim3 dimBlock(32, 32); //the rendering computation #if PRINT_TIME // Create the cuda events hipEventCreate(&start); hipEventCreate(&stop); // Record event on the default stream hipEventRecord(start, 0); #endif //Calculate the portion of image by layers and combine in the end //Automatically synced between each layer hipLaunchKernelGGL(( render_cuda), dim3(dimGrid), dim3(dimBlock), 0, 0, d_spheres, height, width, pixel, d_rays_orig, d_rays_dir, d_surfaceColors, d_inter_spheres); //render(spheres); #if PRINT_TIME // Stop and destroy the timer hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsed_gpu, start, stop); printf("\nGPU calculation time: %f (msec)\n", elapsed_gpu); hipEventDestroy(start); hipEventDestroy(stop); #endif // Transfer the results back to the host CUDA_SAFE_CALL(hipMemcpy(image, pixel, flatArraySize, hipMemcpyDeviceToHost)); //Free Memory // Save result to a PPM image (keep these flags if you compile under Windows) std::ofstream ofs("./cuda_untitled.ppm", std::ios::out | std::ios::binary); ofs << "P6\n" << width << " " << height << "\n255\n"; for (unsigned i = 0; i < width * height; ++i) { ofs << (unsigned char)(::min(float(1), image[i].x) * 255) << (unsigned char)(::min(float(1), image[i].y) * 255) << (unsigned char)(::min(float(1), image[i].z) * 255); } ofs.close(); return 0; }
10e651e9ec714c823b9a506109ecd5f130350911.cu
// A very basic raytracer example. // [compile] // nvcc -o raytracer_cuda raytracer_cuda.cu // [/compile] // [ignore] // Copyright (C) 2012 www.scratchapixel.com // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. // [/ignore] #include <cstdlib> #include <cstdio> #include "math.h" #include <fstream> #include <vector> #include <iostream> #include <cassert> #include <time.h> using namespace std; #define GIG 1000000000 #define CPG 2.9 // Cycles per GHz -- Adjust to your computer // Assertion to check for errors #define CUDA_SAFE_CALL(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"CUDA_SAFE_CALL: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } #if defined __linux__ || defined __APPLE__ // "Compiled for Linux #else // Windows doesn't define these values by default, Linux does #define M_PI 3.141592653589793 #define INFINITY 1e8 #endif #ifdef __CUDACC__ #define CUDA_CALLABLE_MEMBER __host__ __device__ #else #define CUDA_CALLABLE_MEMBER #endif #define PRINT_TIME 1 // This variable controls the maximum recursion depth #define MAX_RAY_DEPTH 5 template<typename T> class Vec3 { public: T x, y, z; CUDA_CALLABLE_MEMBER Vec3() : x(T(0)), y(T(0)), z(T(0)) {} CUDA_CALLABLE_MEMBER Vec3(T xx) : x(xx), y(xx), z(xx) {} CUDA_CALLABLE_MEMBER Vec3(T xx, T yy, T zz) : x(xx), y(yy), z(zz) {} CUDA_CALLABLE_MEMBER Vec3& normalize() { T nor2 = length2(); if (nor2 > 0) { T invNor = 1 / sqrt(nor2); x *= invNor, y *= invNor, z *= invNor; } return *this; } CUDA_CALLABLE_MEMBER Vec3<T> operator * (const T &f) const { return Vec3<T>(x * f, y * f, z * f); } CUDA_CALLABLE_MEMBER Vec3<T> operator * (const Vec3<T> &v) const { return Vec3<T>(x * v.x, y * v.y, z * v.z); } CUDA_CALLABLE_MEMBER T dot(const Vec3<T> &v) const { return x * v.x + y * v.y + z * v.z; } CUDA_CALLABLE_MEMBER Vec3<T> operator - (const Vec3<T> &v) const { return Vec3<T>(x - v.x, y - v.y, z - v.z); } CUDA_CALLABLE_MEMBER Vec3<T> operator + (const Vec3<T> &v) const { return Vec3<T>(x + v.x, y + v.y, z + v.z); } CUDA_CALLABLE_MEMBER Vec3<T>& operator += (const Vec3<T> &v) { x += v.x, y += v.y, z += v.z; return *this; } CUDA_CALLABLE_MEMBER Vec3<T>& operator *= (const Vec3<T> &v) { x *= v.x, y *= v.y, z *= v.z; return *this; } CUDA_CALLABLE_MEMBER Vec3<T> operator - () const { return Vec3<T>(-x, -y, -z); } CUDA_CALLABLE_MEMBER T length2() const { return x * x + y * y + z * z; } CUDA_CALLABLE_MEMBER T length() const { return sqrt(length2()); } CUDA_CALLABLE_MEMBER friend std::ostream & operator << (std::ostream &os, const Vec3<T> &v) { os << "[" << v.x << " " << v.y << " " << v.z << "]"; return os; } }; typedef Vec3<float> Vec3f; class Sphere { public: Vec3f center; /// position of the sphere float radius, radius2; /// sphere radius and radius^2 Vec3f surfaceColor, emissionColor; /// surface color and emission (light) float transparency, reflection; /// surface transparency and reflectivity CUDA_CALLABLE_MEMBER Sphere( const Vec3f &c, // consts here const float &r, const Vec3f &sc, const float &refl = 0, const float &transp = 0, const Vec3f &ec = 0) : center(c), radius(r), radius2(r * r), surfaceColor(sc), emissionColor(ec), transparency(transp), reflection(refl) { /* empty */ } // Compute a ray-sphere intersection using the geometric solution CUDA_CALLABLE_MEMBER bool intersect(const Vec3f &rayorig, const Vec3f &raydir, float &t0, float &t1) const { Vec3f l = center - rayorig; float tca = l.dot(raydir); if (tca < 0) return false; float d2 = l.dot(l) - tca * tca; if (d2 > radius2) return false; float thc = sqrt(radius2 - d2); t0 = tca - thc; t1 = tca + thc; return true; } }; //cpu time calculation struct timespec diff(struct timespec start, struct timespec end) { struct timespec temp; if ((end.tv_nsec-start.tv_nsec)<0) { temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else { temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } double timeInSeconds(struct timespec* t) { return (t->tv_sec + 1.0e-9 * (t->tv_nsec)); } __host__ __device__ float mix(const float &a, const float &b, const float &mix) { return b * mix + a * (1 - mix); } /**************************************************************************************************************************************************/ /***************************THIS IS THE CPU HOST VERSION***************************/ // This is the main trace function. It takes a ray as argument (defined by its origin // and direction). We test if this ray intersects any of the geometry in the scene. // If the ray intersects an object, we compute the intersection point, the normal // at the intersection point, and shade this point using this information. // Shading depends on the surface property (is it transparent, reflective, diffuse). // The function returns a color for the ray. If the ray intersects an object that // is the color of the object at the intersection point, otherwise it returns // the background color. __host__ __device__ Vec3f trace( const Vec3f &rayorig, const Vec3f &raydir, //const std::vector<Sphere> &spheres, const Sphere* spheres, const int &depth) { //if (raydir.length() != 1) std::cerr << "Error " << raydir << std::endl; float tnear = INFINITY; const Sphere* sphere = NULL; int size = 6; // find intersection of this ray with the sphere in the scene for (unsigned i = 0; i < size; ++i) { float t0 = INFINITY, t1 = INFINITY; if (spheres[i].intersect(rayorig, raydir, t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; sphere = &spheres[i]; } } } // if there's no intersection return black or background color if (!sphere) return Vec3f(2); Vec3f surfaceColor = 0; // color of the ray/surfaceof the object intersected by the ray Vec3f phit = rayorig + raydir * tnear; // point of intersection Vec3f nhit = phit - sphere->center; // normal at the intersection point nhit.normalize(); // normalize normal direction // If the normal and the view direction are not opposite to each other // reverse the normal direction. That also means we are inside the sphere so set // the inside bool to true. Finally reverse the sign of IdotN which we want // positive. float bias = 1e-4; // add some bias to the point from which we will be tracing bool inside = false; if (raydir.dot(nhit) > 0) nhit = -nhit, inside = true; //if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) { float facingratio = -raydir.dot(nhit); // change the mix value to tweak the effect float fresneleffect = mix(pow(1 - facingratio, 3), 1, 0.1); // compute reflection direction (not need to normalize because all vectors // are already normalized) Vec3f refldir = raydir - nhit * 2 * raydir.dot(nhit); refldir.normalize(); Vec3f reflection = 0;// trace(phit + nhit * bias, refldir, spheres, depth + 1); Vec3f refraction = 0; // if the sphere is also transparent compute refraction ray (transmission) //if (sphere->transparency) { float ior = 1.1, eta = (inside) ? ior : 1 / ior; // are we inside or outside the surface? float cosi = -nhit.dot(raydir); float k = 1 - eta * eta * (1 - cosi * cosi); Vec3f refrdir = raydir * eta + nhit * (eta * cosi - sqrt(k)); refrdir.normalize(); //refraction = trace(phit - nhit * bias, refrdir, spheres, depth + 1); //} // the result is a mix of reflection and refraction (if the sphere is transparent) surfaceColor = ( reflection * fresneleffect + refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor; //} //else { // it's a diffuse object, no need to raytrace any further for (unsigned i = 0; i < size; ++i) { if (spheres[i].emissionColor.x > 0) { // this is a light Vec3f transmission = 1; Vec3f lightDirection = spheres[i].center - phit; lightDirection.normalize(); for (unsigned j = 0; j < size; ++j) { if (i != j) { float t0, t1; if (spheres[j].intersect(phit + nhit * bias, lightDirection, t0, t1)) { transmission = 0; break; } } } surfaceColor += sphere->surfaceColor * transmission * max(float(0), nhit.dot(lightDirection)) * spheres[i].emissionColor; } } //} return surfaceColor + sphere->emissionColor; } // Main rendering function. We compute a camera ray for each pixel of the image // trace it and return a color. If the ray hits a sphere, we return the color of the // sphere at the intersection point, else we return the background color. void render(Sphere* spheres) { unsigned width = 1920, height = 1080; Vec3f *image = new Vec3f[width * height], *pixel = image; float invWidth = 1 / float(width), invHeight = 1 / float(height); float fov = 30, aspectratio = width / float(height); float angle = tan(M_PI * 0.5 * fov / 180.); // Trace rays for (unsigned y = 0; y < height; ++y) { for (unsigned x = 0; x < width; ++x, ++pixel) { float xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio; float yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle; Vec3f raydir(xx, yy, -1); raydir.normalize(); *pixel = trace(Vec3f(0), raydir, spheres, 0); } } // Save result to a PPM image (keep these flags if you compile under Windows) std::ofstream ofs("./cpu_untitled.ppm", std::ios::out | std::ios::binary); ofs << "P6\n" << width << " " << height << "\n255\n"; for (unsigned i = 0; i <width*height; i++) { ofs << (unsigned char)(std::min(float(1), image[i].x) * 255) << (unsigned char)(std::min(float(1), image[i].y) * 255) << (unsigned char)(std::min(float(1), image[i].z) * 255); } ofs.close(); delete [] image; } /**************************************************************************************************************************************************/ /*********************************CUDA PART BEGINS********************************/ //CUDA Trace Function first calculates all of the 63 rays with a max depth of 5. //The information is stores are the ray origins and ray directions of each ray //and also the sphere that it intersected with __device__ Vec3f trace_cuda( const Vec3f &rayorig, const Vec3f &raydir, const Sphere *spheres, Vec3f* rays_orig, Vec3f* rays_dir, Vec3f* surfaceColors, const Sphere** inter_spheres, int k) { int sphere_size = 6; float bias = 1e-4; // add some bias to the point from which we will be tracing for(int i = 0; i < 31; i++){ float tnear = INFINITY; // find intersection of this ray with the sphere in the scene for (int j = 0; j < sphere_size; j++) { float t0 = INFINITY, t1 = INFINITY; if (spheres[j].intersect(rays_orig[i*k], rays_dir[i*k], t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; inter_spheres[i*k] = &spheres[j]; } else inter_spheres[i*k] = NULL; } } // if there's no intersection then the resulting rays will be the same ray if (!inter_spheres[i*k]){ rays_orig[(i*2+1)*k] = rays_orig[i*k]; rays_dir[(i*2+1)*k] = rays_dir[i*k]; rays_orig[(i*2+2)*k] = rays_orig[i*k]; rays_dir[(i*2+2)*k] = rays_dir[i*k]; continue; } Vec3f phit = rays_orig[i*k] + rays_dir[i*k] * tnear; // point of intersection Vec3f nhit = phit - inter_spheres[i*k]->center; // normal at the intersection point nhit.normalize(); // normalize normal direction // If the normal and the view direction are not opposite to each other // reverse the normal direction. That also means we are inside the sphere so set // the inside bool to true. Finally reverse the sign of IdotN which we want // positive. bool inside = false; if (rays_dir[i*k].dot(nhit) > 0) nhit = -nhit, inside = true; if ((inter_spheres[i*k]->transparency > 0 || inter_spheres[i*k]->reflection > 0)){ Vec3f refldir = rays_dir[i*k] - nhit * 2 * rays_dir[i*k].dot(nhit); refldir.normalize(); rays_dir[(i*2+1)*k] = refldir; rays_orig[(i*2+1)*k] = phit + nhit * bias; if (inter_spheres[i*k]->transparency){ float ior = 1.1, eta = (inside) ? ior : 1 / ior; // are we inside or outside the surface? float cosi = -nhit.dot(rays_dir[i*k]); float k = 1 - eta * eta * (1 - cosi * cosi); Vec3f refrdir = rays_dir[i*k] * eta + nhit * (eta * cosi - sqrtf(k)); refrdir.normalize(); rays_dir[(i*2+2)*k] = refrdir; rays_orig[(i*2+2)*k] = phit - nhit * bias; } } } for(int i = 31; i < 63; i++){ float tnear = INFINITY; // find intersection of this ray with the sphere in the scene for (unsigned j = 0; j < sphere_size; ++j) { float t0 = INFINITY, t1 = INFINITY; if (spheres[j].intersect(rays_orig[i], rays_dir[i], t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; inter_spheres[i*k] = &spheres[j]; } else inter_spheres[i*k] = NULL; } } } //Go Backwards to find the surface color and return final color int start, end, depth; depth = 5; end = 63; start = end - (int)pow(2,depth) - 1; for(int i = depth; i >= 0; i--){ for(int i = start; i < end; i--){ Vec3f surfaceColor = 0; if(!inter_spheres[i*k]) surfaceColors[i*k] = Vec3f(2); else{ // this is a light float tnear = INFINITY; float t0 = INFINITY, t1 = INFINITY; if (inter_spheres[i*k]->intersect(rays_orig[i*k], rays_dir[i*k], t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; inter_spheres[i*k] = &spheres[j]; } else inter_spheres[i*k] = NULL; } Vec3f phit = rays_orig[i*k] + rays_dir[i*k] * tnear; // point of intersection Vec3f nhit = phit - inter_spheres[i*k]->center; // normal at the intersection point nhit.normalize(); // normalize normal direction Vec3f transmission = 1; Vec3f lightDirection = spheres[j].center - phit; lightDirection.normalize(); for (int k = 0; k < sphere_size; k++) { if (j != k) { float t0, t1; if (spheres[j].intersect(phit + nhit * bias, lightDirection, t0, t1)) { transmission = 0; break; } } } surfaceColor += inter_spheres[i]->surfaceColor * transmission * fmaxf(float(0), nhit.dot(lightDirection)) * spheres[i].emissionColor; } } surfaceColors[i] = surfaceColor; } } } for(int i = 30; i >= 0; i--){ Vec3f surfaceColor = 0; if(!inter_spheres[i]) surfaceColors[i] = Vec3f(2); else{ float tnear = INFINITY; float t0 = INFINITY, t1 = INFINITY; if (inter_spheres[i]->intersect(rays_orig[i], rays_dir[i], t0, t1)) { if (t0 < 0) t0 = t1; if (t0 < tnear) { tnear = t0; } } Vec3f phit = rays_orig[i] + rays_dir[i] * tnear; // point of intersection Vec3f nhit = phit - inter_spheres[i]->center; // normal at the intersection point nhit.normalize(); // normalize normal direction // If the normal and the view direction are not opposite to each other // reverse the normal direction. That also means we are inside the sphere so set // the inside bool to true. Finally reverse the sign of IdotN which we want // positive. if (rays_dir[i].dot(nhit) > 0) nhit = -nhit; float facingratio = -rays_dir[i].dot(nhit); // change the mix value to tweak the effect float fresneleffect = mix(powf((float)(1 - facingratio), 3.0), 1, 0.1); // the result is a mix of reflection and refraction (if the sphere is transparent) surfaceColors[i] = ( surfaceColors[i*2+1] * fresneleffect + surfaceColors[i*2+2] * (1 - fresneleffect) * inter_spheres[i]->transparency) * inter_spheres[i]->surfaceColor; } } return surfaceColors[0]; } //CUDA Rener Function __global__ void render_cuda(Sphere *d_spheres, int height, int width, Vec3f *pixel, Vec3f* d_rays_orig, Vec3f* d_rays_dir, Vec3f* d_surfaceColors, Sphere** d_inter_spheres) { float invWidth = 1 / float(width), invHeight = 1 / float(height); float fov = 30, aspectratio = width / float(height); float angle = tanf(M_PI * 0.5 * fov / 180.); int y = blockIdx.y * blockDim.y + threadIdx.y; int x = blockIdx.x * blockDim.x + threadIdx.x; int k = y*width+x; float xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio; float yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle; Vec3f raydir(xx, yy, -1); raydir.normalize(); if(k < height*width) pixel[k] = trace_cuda(Vec3f(0), raydir, d_spheres, d_rays_orig, d_rays_dir, d_surfaceColors, d_inter_spheres, k); //for(int i = 0; i < 63; i++) //trace(Vec3f(0),raydir,d_spheres,0); } /**************************************************************************************************************************************************/ // In the main function, we will create the scene which is composed of 5 spheres // and 1 light (which is also a sphere). Then, once the scene description is complete // we render that scene, by calling the render() function. int main(int argc, char **argv) { // dimensions of image int width = 7680; int height = 4320; int num_pixels = width*height; //CPU timing variables struct timespec t1, t2; float cpu_time; // GPU Timing variables cudaEvent_t start, stop; float elapsed_gpu; srand48(13); std::vector<Sphere> spheres; // position, radius, surface color, reflectivity, transparency, emission color spheres.push_back(Sphere(Vec3f( 0.0, -10004, -20), 10000, Vec3f(0.20, 0.20, 0.20), 0, 0.0)); spheres.push_back(Sphere(Vec3f( 0.0, 0, -20), 4, Vec3f(1.00, 0.32, 0.36), 1, 0.5)); spheres.push_back(Sphere(Vec3f( 5.0, -1, -15), 2, Vec3f(0.90, 0.76, 0.46), 1, 0.0)); spheres.push_back(Sphere(Vec3f( 5.0, 0, -25), 3, Vec3f(0.65, 0.77, 0.97), 1, 0.0)); spheres.push_back(Sphere(Vec3f(-5.5, 0, -15), 3, Vec3f(0.90, 0.90, 0.90), 1, 0.0)); // light spheres.push_back(Sphere(Vec3f( 0.0, 20, -30), 3, Vec3f(0.00, 0.00, 0.00), 0, 0.0, Vec3f(3))); //CPU num times trace is run /*int *num; int numSize = width*height*sizeof(int); num = (int*)malloc(numSize); memset(num,0,numSize);*/ //CPU answers /*clock_gettime(CLOCK_MONOTONIC,&t1); render(spheres); clock_gettime(CLOCK_MONOTONIC,&t2); cpu_time = timeInSeconds(&t2) - timeInSeconds(&t1); cout << "CPU time is: " << cpu_time << " (sec)" << endl;*/ //CPU Arrays Vec3f *image; Sphere *h_spheres; Vec3f *h_rays_orig; Vec3f *h_rays_dir; Sphere **h_inter_spheres; Vec3f *h_surfaceColors; //Image Array int flatArraySize = width * height * sizeof(Vec3f); image = (Vec3f*)malloc(flatArraySize); //Sphere Array int num_spheres = 6; int size = num_spheres * sizeof(Sphere); h_spheres = (Sphere*)malloc(size); h_spheres[0] = Sphere(Vec3f( 0.0, -10004, -20), 10000, Vec3f(0.20, 0.20, 0.20), 0, 0.0); h_spheres[1] = Sphere(Vec3f( 0.0, 0, -20), 4, Vec3f(1.00, 0.32, 0.36), 1, 0.5); h_spheres[2] = Sphere(Vec3f( 5.0, -1, -15), 2, Vec3f(0.90, 0.76, 0.46), 1, 0.0); h_spheres[3] = Sphere(Vec3f( 5.0, 0, -25), 3, Vec3f(0.65, 0.77, 0.97), 1, 0.0); h_spheres[4] = Sphere(Vec3f(-5.5, 0, -15), 3, Vec3f(0.90, 0.90, 0.90), 1, 0.0); h_spheres[5] = Sphere(Vec3f( 0.0, 20, -30), 3, Vec3f(0.00, 0.00, 0.00), 0, 0.0, Vec3f(3)); int vec_store_size = sizeof(Vec3f)*63*num_pixels; int sphere_ptr_size = sizeof(Sphere*)*63*num_pixels; h_rays_orig = (Vec3f*)malloc(vec_store_size); h_rays_dir = (Vec3f*)malloc(vec_store_size); h_surfaceColors = (Vec3f*)malloc(vec_store_size); h_inter_spheres = (Sphere**)malloc(sphere_ptr_size); // arrays on the GPU Vec3f *pixel; Sphere *d_spheres; Vec3f *d_rays_orig; Vec3f *d_rays_dir; Sphere **d_inter_spheres; Vec3f *d_surfaceColors; // allocate GPU memory CUDA_SAFE_CALL(cudaMalloc((void**)&d_spheres, size)); CUDA_SAFE_CALL(cudaMalloc((void**)&pixel, flatArraySize)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_rays_orig, vec_store_size)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_rays_dir, vec_store_size)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_surfaceColors, vec_store_size)); CUDA_SAFE_CALL(cudaMalloc((void**)&d_inter_spheres, sphere_ptr_size)); // Transfer the arrays to the GPU memory CUDA_SAFE_CALL(cudaMemcpy(d_spheres, h_spheres, size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(pixel, image, flatArraySize, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_rays_orig, h_rays_orig, vec_store_size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_rays_dir, h_rays_dir, vec_store_size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_surfaceColors, h_surfaceColors, vec_store_size, cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_inter_spheres, h_inter_spheres, sphere_ptr_size, cudaMemcpyHostToDevice)); //call kernel function and choose dimension of the problem dim3 dimGrid(128,72); dim3 dimBlock(32, 32); //the rendering computation #if PRINT_TIME // Create the cuda events cudaEventCreate(&start); cudaEventCreate(&stop); // Record event on the default stream cudaEventRecord(start, 0); #endif //Calculate the portion of image by layers and combine in the end //Automatically synced between each layer render_cuda<<<dimGrid, dimBlock>>>(d_spheres, height, width, pixel, d_rays_orig, d_rays_dir, d_surfaceColors, d_inter_spheres); //render(spheres); #if PRINT_TIME // Stop and destroy the timer cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsed_gpu, start, stop); printf("\nGPU calculation time: %f (msec)\n", elapsed_gpu); cudaEventDestroy(start); cudaEventDestroy(stop); #endif // Transfer the results back to the host CUDA_SAFE_CALL(cudaMemcpy(image, pixel, flatArraySize, cudaMemcpyDeviceToHost)); //Free Memory // Save result to a PPM image (keep these flags if you compile under Windows) std::ofstream ofs("./cuda_untitled.ppm", std::ios::out | std::ios::binary); ofs << "P6\n" << width << " " << height << "\n255\n"; for (unsigned i = 0; i < width * height; ++i) { ofs << (unsigned char)(std::min(float(1), image[i].x) * 255) << (unsigned char)(std::min(float(1), image[i].y) * 255) << (unsigned char)(std::min(float(1), image[i].z) * 255); } ofs.close(); return 0; }
c25c26cc4fdedd1d36705b4354da6f8e26c192a0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "reference_calc.cpp" #include "utils.h" __global__ void gb(const unsigned char* const ic,unsigned char* const oc,int nr, int nc,const float* const filter, const int filterWidth) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * nc + thread_2D_pos.x; if (thread_2D_pos.x >= nc || thread_2D_pos.y >= nr) return; float result = 0.f; for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) { for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) { int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(nr - 1)); int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(nc - 1)); float image_value = static_cast<float>(ic[image_r * nc + image_c]); float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2]; result += image_value * filter_value; } } oc[thread_1D_pos] = result; } __global__ void sc(const uchar4* const iprgb,int nr,int nc,unsigned char* const rc,unsigned char* const gc,unsigned char* const bc) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * nc + thread_2D_pos.x; if (thread_2D_pos.x >= nc || thread_2D_pos.y >= nr) return;rc[thread_1D_pos] = iprgb[thread_1D_pos].x;gc[thread_1D_pos] = iprgb[thread_1D_pos].y;bc[thread_1D_pos] = iprgb[thread_1D_pos].z; } __global__ void recombineChannels(const unsigned char* const rc,const unsigned char* const gc,const unsigned char* const bc,uchar4* const oprgb,int nr,int nc) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * nc + thread_2D_pos.x; if (thread_2D_pos.x >= nc || thread_2D_pos.y >= nr) return; unsigned char red = rc[thread_1D_pos]; unsigned char green = gc[thread_1D_pos]; unsigned char blue = bc[thread_1D_pos]; uchar4 outputPixel = make_uchar4(red, green, blue, 255); oprgb[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t nrImage, const size_t ncImage,const float* const h_filter, const size_t filterWidth) { checkCudaErrors(hipMalloc(&d_red, sizeof(unsigned char) * nrImage * ncImage)); checkCudaErrors(hipMalloc(&d_green, sizeof(unsigned char) * nrImage * ncImage)); checkCudaErrors(hipMalloc(&d_blue, sizeof(unsigned char) * nrImage * ncImage)); int num_filter_bytes = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(hipMalloc(&d_filter, num_filter_bytes)); checkCudaErrors(hipMemcpy(d_filter, h_filter, num_filter_bytes, hipMemcpyHostToDevice)); } void your_gb(const uchar4 * const h_iprgb, uchar4 * const d_iprgb,uchar4* const d_oprgb, const size_t nr, const size_t nc,unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred,const int filterWidth) { const dim3 blockSize(1, 1, 1); const dim3 gridSize(nc, nr, 1); hipLaunchKernelGGL(( sc), dim3(gridSize), dim3(blockSize), 0, 0, d_iprgb,nr,nc,d_red,d_green,d_blue); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( gb), dim3(gridSize), dim3(blockSize), 0, 0, d_red,d_redBlurred,nr,nc,d_filter,filterWidth); hipLaunchKernelGGL(( gb), dim3(gridSize), dim3(blockSize), 0, 0, d_green,d_greenBlurred,nr,nc,d_filter,filterWidth); hipLaunchKernelGGL(( gb), dim3(gridSize), dim3(blockSize), 0, 0, d_blue,d_blueBlurred,nr,nc,d_filter,filterWidth); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); hipLaunchKernelGGL(( recombineChannels), dim3(gridSize), dim3(blockSize), 0, 0, d_redBlurred,d_greenBlurred,d_blueBlurred,d_oprgb,nr,nc); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); } void cleanup() { checkCudaErrors(hipFree(d_red)); checkCudaErrors(hipFree(d_green)); checkCudaErrors(hipFree(d_blue)); checkCudaErrors(hipFree(d_filter)); }
c25c26cc4fdedd1d36705b4354da6f8e26c192a0.cu
#include "reference_calc.cpp" #include "utils.h" __global__ void gb(const unsigned char* const ic,unsigned char* const oc,int nr, int nc,const float* const filter, const int filterWidth) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * nc + thread_2D_pos.x; if (thread_2D_pos.x >= nc || thread_2D_pos.y >= nr) return; float result = 0.f; for (int filter_r = -filterWidth/2; filter_r <= filterWidth/2; ++filter_r) { for (int filter_c = -filterWidth/2; filter_c <= filterWidth/2; ++filter_c) { int image_r = min(max(thread_2D_pos.y + filter_r, 0), static_cast<int>(nr - 1)); int image_c = min(max(thread_2D_pos.x + filter_c, 0), static_cast<int>(nc - 1)); float image_value = static_cast<float>(ic[image_r * nc + image_c]); float filter_value = filter[(filter_r + filterWidth/2) * filterWidth + filter_c + filterWidth/2]; result += image_value * filter_value; } } oc[thread_1D_pos] = result; } __global__ void sc(const uchar4* const iprgb,int nr,int nc,unsigned char* const rc,unsigned char* const gc,unsigned char* const bc) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * nc + thread_2D_pos.x; if (thread_2D_pos.x >= nc || thread_2D_pos.y >= nr) return;rc[thread_1D_pos] = iprgb[thread_1D_pos].x;gc[thread_1D_pos] = iprgb[thread_1D_pos].y;bc[thread_1D_pos] = iprgb[thread_1D_pos].z; } __global__ void recombineChannels(const unsigned char* const rc,const unsigned char* const gc,const unsigned char* const bc,uchar4* const oprgb,int nr,int nc) { const int2 thread_2D_pos = make_int2( blockIdx.x * blockDim.x + threadIdx.x,blockIdx.y * blockDim.y + threadIdx.y); const int thread_1D_pos = thread_2D_pos.y * nc + thread_2D_pos.x; if (thread_2D_pos.x >= nc || thread_2D_pos.y >= nr) return; unsigned char red = rc[thread_1D_pos]; unsigned char green = gc[thread_1D_pos]; unsigned char blue = bc[thread_1D_pos]; uchar4 outputPixel = make_uchar4(red, green, blue, 255); oprgb[thread_1D_pos] = outputPixel; } unsigned char *d_red, *d_green, *d_blue; float *d_filter; void allocateMemoryAndCopyToGPU(const size_t nrImage, const size_t ncImage,const float* const h_filter, const size_t filterWidth) { checkCudaErrors(cudaMalloc(&d_red, sizeof(unsigned char) * nrImage * ncImage)); checkCudaErrors(cudaMalloc(&d_green, sizeof(unsigned char) * nrImage * ncImage)); checkCudaErrors(cudaMalloc(&d_blue, sizeof(unsigned char) * nrImage * ncImage)); int num_filter_bytes = sizeof(float) * filterWidth * filterWidth; checkCudaErrors(cudaMalloc(&d_filter, num_filter_bytes)); checkCudaErrors(cudaMemcpy(d_filter, h_filter, num_filter_bytes, cudaMemcpyHostToDevice)); } void your_gb(const uchar4 * const h_iprgb, uchar4 * const d_iprgb,uchar4* const d_oprgb, const size_t nr, const size_t nc,unsigned char *d_redBlurred, unsigned char *d_greenBlurred, unsigned char *d_blueBlurred,const int filterWidth) { const dim3 blockSize(1, 1, 1); const dim3 gridSize(nc, nr, 1); sc<<<gridSize, blockSize>>>(d_iprgb,nr,nc,d_red,d_green,d_blue); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); gb<<<gridSize, blockSize>>>(d_red,d_redBlurred,nr,nc,d_filter,filterWidth); gb<<<gridSize, blockSize>>>(d_green,d_greenBlurred,nr,nc,d_filter,filterWidth); gb<<<gridSize, blockSize>>>(d_blue,d_blueBlurred,nr,nc,d_filter,filterWidth); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); recombineChannels<<<gridSize, blockSize>>>(d_redBlurred,d_greenBlurred,d_blueBlurred,d_oprgb,nr,nc); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); } void cleanup() { checkCudaErrors(cudaFree(d_red)); checkCudaErrors(cudaFree(d_green)); checkCudaErrors(cudaFree(d_blue)); checkCudaErrors(cudaFree(d_filter)); }
71fa80035bbb13b6ac11bef8692cb8d3c1298147.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************************************************//** * \file .cu * \author Christopher Minar ([email protected]) * \CPU Author, Anush Krishnan ([email protected]) * \brief Implementation of the methods of the class \c DirectForcingSolver to tag * points near the immersed boundary using a ray-tracing algorithm. */ #include "tagPoints.h" namespace kernels { __global__ void interpolateVelocityToGhostNodeX(double *u, bool set, int *ghostTagsUV, double *bx, double *by, double *uB, double *yu, double *xu, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u)//testing variables {//In the luo et al method they only move corners coincident to the GN to the boundary. We are moving all corners inside to the boundary int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iu = J*(nx-1) + I, ii= I-5, jj = J-5; if (iu > J*(nx-1) + I) //return if we're out of bound return; if (ghostTagsUV[iu]<=0) //return if we're not at an interpolation point return; /* * (x3,y3)__________(x4,y4) * | | * | *(ip_x,ip_y) | * | | * | | * | | * (x1,y1)__________(x2,y2) */ //find x and y of nodes that bound the image point while (xu[ii] < image_point_x[iu]) ii++; while (yu[jj] <image_point_y[iu]) jj++; double x[4] = {xu[ii-1], xu[ii], xu[ii-1], xu[ii]}; double y[4] = {yu[jj-1], yu[jj-1], yu[jj], yu[jj]}; //find index at corners and the u value at the corners int index[4] = {(jj-1)*(nx-1)+ii-1, (jj-1)*(nx-1)+ii, jj*(nx-1)+ii-1, jj*(nx-1)+ii}; double q[4] = {u[index[0]], u[index[1]], u[index[2]], u[index[3]]}; //find the closest corner to the body intercept double min = 1.0; double s; int close_index; bool inflag = false; //a boolean that is true if there is a node inside the body for (int l=0;l<4;l++) { //find the closest node to the BI s = sqrt(pow(x[l]-body_intercept_x[iu],2) + pow(y[l]-body_intercept_y[iu],2)); if (s<min) { min = s; close_index = index[l]; } //check if any of the points are inside the body if (ghostTagsUV[index[l]]>0) inflag = true; } //if point is inside of the body //or if no points are inside the body and the node is the closest to the BI // then move them to the body intercept //point 1 for (int l=0;l<4;l++) { //if ( ghostTagsUV[index[l]] > 0)//this moves every node inside to the edge if ( ghostTagsUV[index[l]] == iu ) //this moves just the GN to the edge { x[l] = body_intercept_x[index[l]]; y[l] = body_intercept_y[index[l]]; q[l] = uB[0]; } else if ( index[l]==close_index && !inflag ) //uncomment this if you want to move the closest node outside of the body to the body { x[l] = body_intercept_x[iu]; y[l] = body_intercept_y[iu]; q[l] = uB[0]; } } x1[iu] = x[0]; x2[iu] = x[1]; x3[iu] = x[2]; x4[iu] = x[3]; y1[iu] = y[0]; y2[iu] = y[1]; y3[iu] = y[2]; y4[iu] = y[3]; q1[iu] = q[0]; q2[iu] = q[1]; q3[iu] = q[2]; q4[iu] = q[3]; index1[iu] = index[0]; index2[iu] = index[1]; index3[iu] = index[2]; index4[iu] = index[3]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iu], a13 = y1[iu], a14 = x1[iu]*y1[iu]; double a22 = x2[iu], a23 = y2[iu], a24 = x2[iu]*y2[iu]; double a32 = x3[iu], a33 = y3[iu], a34 = x3[iu]*y3[iu]; double a42 = x4[iu], a43 = y4[iu], a44 = x4[iu]*y4[iu]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = b11/detA*q1[iu] + b12/detA*q2[iu] + b13/detA*q3[iu] + b14/detA*q4[iu]; double a1 = b21/detA*q1[iu] + b22/detA*q2[iu] + b23/detA*q3[iu] + b24/detA*q4[iu]; double a2 = b31/detA*q1[iu] + b32/detA*q2[iu] + b33/detA*q3[iu] + b34/detA*q4[iu]; double a3 = b41/detA*q1[iu] + b42/detA*q2[iu] + b43/detA*q3[iu] + b44/detA*q4[iu]; q1coef[iu] = (b11+b21*image_point_x[iu]+b31*image_point_y[iu]+b41*image_point_x[iu]*image_point_y[iu])/detA; q2coef[iu] = (b12+b22*image_point_x[iu]+b32*image_point_y[iu]+b42*image_point_x[iu]*image_point_y[iu])/detA; q3coef[iu] = (b13+b23*image_point_x[iu]+b33*image_point_y[iu]+b43*image_point_x[iu]*image_point_y[iu])/detA; q4coef[iu] = (b14+b24*image_point_x[iu]+b34*image_point_y[iu]+b44*image_point_x[iu]*image_point_y[iu])/detA; image_point_u[iu] = a0 + a1*image_point_x[iu] + a2*image_point_y[iu] + a3*image_point_x[iu]*image_point_y[iu]; if (set) u[iu] = 2*uB[0] - image_point_u[iu]; //u_gn = 2*u_BI - u_IP //flag doesn't currently work with a rotating body because of uB[0], need to use the actual u at the body intercept } __global__ void interpolateVelocityToGhostNodeY(double *u, bool set, int *ghostTagsUV, double *bx, double *by, double *vB, double *yv, double *xv, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u)//testing variables { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iv = J*nx + I + (nx-1)*ny, ii= I-5, jj = J-5; if (J*nx + I > nx*(ny-1)) //return if we're out of bound return; if (ghostTagsUV[iv]<=0) //return if we're not at an interpolation point return; /* * (x1,y1)__________(x2,y2) * | | * | *(ip_x,ip_y) | * | | * | | * | | * (x3,y3)__________(x4,y4) */ //find x and y of nodes that bound the image point while (xv[ii] < image_point_x[iv]) ii++; while (yv[jj] <image_point_y[iv]) jj++; double x[4] = {xv[ii-1], xv[ii], xv[ii-1], xv[ii]}; double y[4] = {yv[jj-1], yv[jj-1], yv[jj], yv[jj]}; //find index at corners and the u value at the corners int index[4] = {(jj-1)*nx+ii-1 + (nx-1)*ny, (jj-1)*nx+ii + (nx-1)*ny, jj*nx+ii-1 + (nx-1)*ny, jj*nx+ii + (nx-1)*ny}; double q[4] = {u[index[0]], u[index[1]], u[index[2]], u[index[3]]}; //find the closest corner to the body intercept double min = 1.0; double s; int close_index; bool inflag = false; //a boolean that is true if there is a node inside the body for (int l=0;l<4;l++) { //find the closest node to the BI s = sqrt(pow(x[l]-body_intercept_x[iv],2) + pow(y[l]-body_intercept_y[iv],2)); if (s<min) { min = s; close_index = index[l]; } //check if any of the points are inside the body if (ghostTagsUV[index[l]]>0) inflag = true; } //if point is inside of the body //or if no points are inside the body and the node is the closest to the BI // then move them to the body intercept //point 1 for (int l=0;l<4;l++) { //if ( ghostTagsUV[index[l]] > 0) if ( ghostTagsUV[index[l]] == iv ) { x[l] = body_intercept_x[index[l]]; y[l] = body_intercept_y[index[l]]; q[l] = vB[0]; } else if ( index[l]==close_index && !inflag ) //uncomment this if you want to move the closest node outside of the body to the body { x[l] = body_intercept_x[iv]; y[l] = body_intercept_y[iv]; q[l] = vB[0]; } } x1[iv] = x[0]; x2[iv] = x[1]; x3[iv] = x[2]; x4[iv] = x[3]; y1[iv] = y[0]; y2[iv] = y[1]; y3[iv] = y[2]; y4[iv] = y[3]; q1[iv] = q[0]; q2[iv] = q[1]; q3[iv] = q[2]; q4[iv] = q[3]; index1[iv] = index[0]; index2[iv] = index[1]; index3[iv] = index[2]; index4[iv] = index[3]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iv], a13 = y1[iv], a14 = x1[iv]*y1[iv]; double a22 = x2[iv], a23 = y2[iv], a24 = x2[iv]*y2[iv]; double a32 = x3[iv], a33 = y3[iv], a34 = x3[iv]*y3[iv]; double a42 = x4[iv], a43 = y4[iv], a44 = x4[iv]*y4[iv]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = b11/detA*q1[iv] + b12/detA*q2[iv] + b13/detA*q3[iv] + b14/detA*q4[iv]; double a1 = b21/detA*q1[iv] + b22/detA*q2[iv] + b23/detA*q3[iv] + b24/detA*q4[iv]; double a2 = b31/detA*q1[iv] + b32/detA*q2[iv] + b33/detA*q3[iv] + b34/detA*q4[iv]; double a3 = b41/detA*q1[iv] + b42/detA*q2[iv] + b43/detA*q3[iv] + b44/detA*q4[iv]; q1coef[iv] = (b11+b21*image_point_x[iv]+b31*image_point_y[iv]+b41*image_point_x[iv]*image_point_y[iv])/detA; q2coef[iv] = (b12+b22*image_point_x[iv]+b32*image_point_y[iv]+b42*image_point_x[iv]*image_point_y[iv])/detA; q3coef[iv] = (b13+b23*image_point_x[iv]+b33*image_point_y[iv]+b43*image_point_x[iv]*image_point_y[iv])/detA; q4coef[iv] = (b14+b24*image_point_x[iv]+b34*image_point_y[iv]+b44*image_point_x[iv]*image_point_y[iv])/detA; image_point_u[iv] = a0 + a1*image_point_x[iv] + a2*image_point_y[iv] + a3*image_point_x[iv]*image_point_y[iv]; if (set) u[iv] = 2*vB[0] - image_point_u[iv]; //u_gn = 2*u_BI - u_IP //flag doesn't currently work with a rotating body because of uB[0], need to use the actual u at the body intercept } __global__ void interpolateVelocityToHybridNodeX(double *u, double *ustar, int *hybridTagsUV, double *bx, double *by, double *uB, double *yu, double *xu, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iu = J*(nx-1) + I, ii= I-5, jj = J-5; if (iu > J*(nx-1) + I) //return if we're out of bound return; if (hybridTagsUV[iu]<=0) //return if we're not at an interpolation point return; /* (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) */ //find x and y of nodes that bound the image point while (xu[ii] < image_point_x[iu]) ii++; x1[iu] = xu[ii-1]; x2[iu] = xu[ii]; x3[iu] = x1[iu]; x4[iu] = x2[iu]; while (yu[jj] <image_point_y[iu]) jj++; y1[iu] = yu[jj-1]; y2[iu] = y1[iu]; y3[iu] = yu[jj]; y4[iu] = y3[iu]; //find q1,q2,q3,q4 q1[iu] = u[(jj-1)*(nx-1)+ii-1]; q2[iu] = u[(jj-1)*(nx-1)+ii]; q3[iu] = u[jj*(nx-1)+ii-1]; q4[iu] = u[jj*(nx-1)+ii]; index1[iu] = (jj-1)*(nx-1)+ii-1; index2[iu] = (jj-1)*(nx-1)+ii; index3[iu] = jj*(nx-1)+ii-1; index4[iu] = jj*(nx-1)+ii; //check if any points are inside of the body, then move them to the body intercept //point 1 if (hybridTagsUV[(jj-1)*(nx-1)+ii-1] == iu) { x1[iu] = body_intercept_x[iu]; y1[iu] = body_intercept_y[iu]; q1[iu] = uB[0]; } if (hybridTagsUV[(jj-1)*(nx-1)+ii] == iu) { x2[iu] = body_intercept_x[iu]; y2[iu] = body_intercept_y[iu]; q2[iu] = uB[0]; } if (hybridTagsUV[jj*(nx-1)+ii-1] == iu) { x3[iu] = body_intercept_x[iu]; y3[iu] = body_intercept_y[iu]; q3[iu] = uB[0]; } if (hybridTagsUV[jj*(nx-1)+ii] == iu) { x4[iu] = body_intercept_x[iu]; y4[iu] = body_intercept_y[iu]; q4[iu] = uB[0]; } //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iu], a13 = y1[iu], a14 = x1[iu]*y1[iu]; double a22 = x2[iu], a23 = y2[iu], a24 = x2[iu]*y2[iu]; double a32 = x3[iu], a33 = y3[iu], a34 = x3[iu]*y3[iu]; double a42 = x4[iu], a43 = y4[iu], a44 = x4[iu]*y4[iu]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = (b11*q1[iu] + b12*q2[iu] + b13*q3[iu] + b14*q4[iu])/detA; double a1 = (b21*q1[iu] + b22*q2[iu] + b23*q3[iu] + b24*q4[iu])/detA; double a2 = (b31*q1[iu] + b32*q2[iu] + b33*q3[iu] + b34*q4[iu])/detA; double a3 = (b41*q1[iu] + b42*q2[iu] + b43*q3[iu] + b44*q4[iu])/detA; q1coef[iu] = (b11+b21*xu[I]+b31*yu[J]+b41*xu[I]*yu[J])/detA; q2coef[iu] = (b12+b22*xu[I]+b32*yu[J]+b42*xu[I]*yu[J])/detA; q3coef[iu] = (b13+b23*xu[I]+b33*yu[J]+b43*xu[I]*yu[J])/detA; q4coef[iu] = (b14+b24*xu[I]+b34*yu[J]+b44*xu[I]*yu[J])/detA; ustar[iu] = a0 + a1*xu[I] + a2*yu[J] + a3*yu[J]*xu[I]; //u[iu] = a0 + a1*xu[I] + a2*yu[J] + a3*yu[J]*xu[I]; image_point_u[iu] = a0 + a1*image_point_x[iu] + a2*image_point_y[iu] + a3*image_point_x[iu]*image_point_y[iu]; } __global__ void interpolateVelocityToHybridNodeY(double *u, double *ustar, int *hybridTagsUV, double *bx, double *by, double *vB, double *yv, double *xv, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iv = J*nx + I + (nx-1)*ny, ii= I-5, jj = J-5; if (J*nx + I > nx*(ny-1)) //return if we're out of bound return; if (hybridTagsUV[iv]<=0) //return if we're not at an interpolation point return; /* * (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) * * */ //find x and y of nodes that bound the image point while (xv[ii] < image_point_x[iv]) ii++; x1[iv] = xv[ii-1]; x2[iv] = xv[ii]; x3[iv] = x1[iv]; x4[iv] = x2[iv]; while (yv[jj] <image_point_y[iv]) jj++; y1[iv] = yv[jj-1]; y2[iv] = y1[iv]; y3[iv] = yv[jj]; y4[iv] = y3[iv]; //find q1,q2,q3,q4 q1[iv] = u[(jj-1)*nx+ii-1 + (nx-1)*ny]; q2[iv] = u[(jj-1)*nx+ii + (nx-1)*ny]; q3[iv] = u[jj*nx+ii-1 + (nx-1)*ny]; q4[iv] = u[jj*nx+ii + (nx-1)*ny]; index1[iv] = (jj-1)*nx+ii-1 + (nx-1)*ny; index2[iv] = (jj-1)*nx+ii + (nx-1)*ny; index3[iv] = jj*nx+ii-1 + (nx-1)*ny; index4[iv] = jj*nx+ii + (nx-1)*ny; //check if any points are inside of the body, then move them to the body intercept //point 1 if (hybridTagsUV[(jj-1)*nx+ii-1 + (nx-1)*ny] == iv) { x1[iv] = body_intercept_x[iv]; y1[iv] = body_intercept_y[iv]; q1[iv] = vB[0]; } if (hybridTagsUV[(jj-1)*nx+ii + (nx-1)*ny] == iv) { x2[iv] = body_intercept_x[iv]; y2[iv] = body_intercept_y[iv]; q2[iv] = vB[0]; } if (hybridTagsUV[jj*nx+ii-1 + (nx-1)*ny] == iv) { x3[iv] = body_intercept_x[iv]; y3[iv] = body_intercept_y[iv]; q3[iv] = vB[0]; } if (hybridTagsUV[jj*nx+ii + (nx-1)*ny] == iv) { x4[iv] = body_intercept_x[iv]; y4[iv] = body_intercept_y[iv]; q4[iv] = vB[0]; } //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iv], a13 = y1[iv], a14 = x1[iv]*y1[iv]; double a22 = x2[iv], a23 = y2[iv], a24 = x2[iv]*y2[iv]; double a32 = x3[iv], a33 = y3[iv], a34 = x3[iv]*y3[iv]; double a42 = x4[iv], a43 = y4[iv], a44 = x4[iv]*y4[iv]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = (b11*q1[iv] + b12*q2[iv] + b13*q3[iv] + b14*q4[iv])/detA; double a1 = (b21*q1[iv] + b22*q2[iv] + b23*q3[iv] + b24*q4[iv])/detA; double a2 = (b31*q1[iv] + b32*q2[iv] + b33*q3[iv] + b34*q4[iv])/detA; double a3 = (b41*q1[iv] + b42*q2[iv] + b43*q3[iv] + b44*q4[iv])/detA; q1coef[iv] = (b11+b21*xv[I]+b31*yv[J]+b41*xv[I]*yv[J])/detA; q2coef[iv] = (b12+b22*xv[I]+b32*yv[J]+b42*xv[I]*yv[J])/detA; q3coef[iv] = (b13+b23*xv[I]+b33*yv[J]+b43*xv[I]*yv[J])/detA; q4coef[iv] = (b14+b24*xv[I]+b34*yv[J]+b44*xv[I]*yv[J])/detA; ustar[iv] = a0 + a1*xv[I] + a2*yv[J] + a3*yv[J]*xv[I]; //u[iv] = a0 + a1*xv[I] + a2*yv[J] + a3*yv[J]*xv[I]; image_point_u[iv] = a0 + a1*image_point_x[iv] + a2*image_point_y[iv] + a3*image_point_x[iv]*image_point_y[iv]; } __global__ void interpolatePressureToHybridNode(double *pressure, double *pressureStar, double *u, int *hybridTagsP, double *bx, double *by, double *uB, double *uB0, double *vB, double *vB0, double *yu, double *yv, double *xu, double *xv, //yv xu not used? double *body_intercept_p_x, double *body_intercept_p_y, double *image_point_p_x, double *image_point_p_y, int *i_start, int *j_start, int width, int nx, int ny, double dt, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *a0, double *a1, double *a2, double *a3, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, ip = J*nx + I, ii= I-5, jj = J-5; if (ip > J*nx + I) //return if we're out of bound return; if (hybridTagsP[ip]<=0) //return if we're not at an interpolation point return; double n_x, n_y, nl; /* * (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) * * */ //find x and y of nodes that bound the image point double x[4]; double y[4]; double q[4]; int index[4]; while (xv[ii] < image_point_p_x[ip]) ii++; x[0] = xv[ii-1]; x[1] = xv[ii]; x[2] = xv[ii-1]; x[3] = xv[ii]; while (yu[jj] <image_point_p_y[ip]) jj++; y[0] = yu[jj-1]; y[1] = yu[jj-1]; y[2] = yu[jj]; y[3] = yu[jj]; //find q1,q2,q3,q4 index[0] = (jj-1)*nx+ii-1; index[1] = (jj-1)*nx+ii; index[2] = jj*nx+ii-1; index[3] = jj*nx+ii; for (int l=0; l<4; l++) q[l] = pressure[index[l]]; double a[16] = {1, x[0], y[0], x[0]*y[0], 1, x[1], y[1], x[1]*y[1], 1, x[2], y[2], x[2]*y[2], 1, x[3], y[3], x[3]*y[3]}; //setup for neuman BC double dudt; double dvdt; for (int l = 0; l<4; l++) { //move the closes node to the body to the surface then calculate the neuman boundary condition for it if ( hybridTagsP[index[l]] == ip ) { dudt = (uB[0] - uB0[0])/dt; dvdt = (vB[0] - vB0[0])/dt; x[l] = body_intercept_p_x[ip]; y[l] = body_intercept_p_y[ip]; n_x = image_point_p_x[ip] - x[l]; n_y = image_point_p_y[ip] - y[l]; nl = sqrt(n_x*n_x + n_y*n_y); q[l] = - ( n_x / nl * dudt + n_y/nl * dvdt); a[l*4] = 0; a[l*4+1] = n_x/nl; a[l*4+2] = n_y/nl; a[l*4+3] = n_y/nl*x[l] + n_x/nl*y[l]; } } x1[ip] = x[0]; x2[ip] = x[1]; x3[ip] = x[2]; x4[ip] = x[3]; y1[ip] = y[0]; y2[ip] = y[1]; y3[ip] = y[2]; y4[ip] = y[3]; q1[ip] = q[0]; q2[ip] = q[1]; q3[ip] = q[2]; q4[ip] = q[3]; index1[ip] = index[0]; index2[ip] = index[1]; index3[ip] = index[2]; index4[ip] = index[3]; double a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; double a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; double a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; double a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //for solving a 4x4 matrix exactly //https://www.physicsforums.com/threads/is-normal-derivative-a-definition.706458/ //for dealing with the normal at the boundary //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double detA = a11*a22*a33*a44 + a11*a23*a34*a42 + a11*a24*a32*a43 +a12*a21*a34*a43 + a12*a23*a31*a44 + a12*a24*a33*a41 +a13*a21*a32*a44 + a13*a22*a34*a41 + a13*a24*a31*a42 +a14*a21*a33*a42 + a14*a22*a31*a43 + a14*a23*a32*a41 -a11*a22*a34*a43 - a11*a23*a32*a44 - a11*a24*a33*a42 -a12*a21*a33*a44 - a12*a23*a34*a41 - a12*a24*a31*a43 -a13*a21*a34*a42 - a13*a22*a31*a44 - a13*a24*a32*a41 -a14*a21*a32*a43 - a14*a22*a33*a41 - a14*a23*a31*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; double b22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; double b23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; double b24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; double b31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; double b32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; double b33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; double b34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; double b41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; double b42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; double b43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; double b44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ a0[ip] = b11/detA*q1[ip] + b12/detA*q2[ip] + b13/detA*q3[ip] + b14/detA*q4[ip]; a1[ip] = b21/detA*q1[ip] + b22/detA*q2[ip] + b23/detA*q3[ip] + b24/detA*q4[ip]; a2[ip] = b31/detA*q1[ip] + b32/detA*q2[ip] + b33/detA*q3[ip] + b34/detA*q4[ip]; a3[ip] = b41/detA*q1[ip] + b42/detA*q2[ip] + b43/detA*q3[ip] + b44/detA*q4[ip]; q1coef[ip] = (b11 + b21*xv[I] + b31*yu[J] + b41*xv[I]*yu[J])/detA; q2coef[ip] = (b12 + b22*xv[I] + b32*yu[J] + b42*xv[I]*yu[J])/detA; q3coef[ip] = (b13 + b23*xv[I] + b33*yu[J] + b43*xv[I]*yu[J])/detA; q4coef[ip] = (b14 + b24*xv[I] + b34*yu[J] + b44*xv[I]*yu[J])/detA; pressureStar[ip] = a0[ip] + a1[ip]*xv[I] + a2[ip]*yu[J] + a3[ip]*xv[I]*yu[J]; } __global__ void interpolatePressureToGhostNode(double *pressure, bool set, double *u, int *ghostTagsP, double *bx, double *by, double *dpdn, double *uB, double *uB0, double *vB, double *vB0, double *yu, double *yv, double *xu, double *xv, double *body_intercept_p_x, double *body_intercept_p_y, double *image_point_p_x, double *image_point_p_y, double *body_intercept_p, int *i_start, int *j_start, int width, int nx, int ny, double dt, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *a0, double *a1, double *a2, double *a3, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4)//test { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, ip = J*nx + I, ii= I-5, jj = J-5; if (ip > J*nx + I) //return if we're out of bound return; if (ghostTagsP[ip]<=0) //return if we're not at an interpolation point return; double n_x, n_y, nl, matDClose; int close_index; /* * (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) */ //find x and y of nodes that bound the image point while (xv[ii] < image_point_p_x[ip]) ii++; while (yu[jj] < image_point_p_y[ip]) jj++; double x[4] = {xv[ii-1], xv[ii], xv[ii-1], xv[ii]}; double y[4] = {yu[jj-1], yu[jj-1], yu[jj], yu[jj]}; //find index at corners and the u value at the corners int index[4] = {(jj-1)*nx+ii-1, (jj-1)*nx+ii, jj*nx+ii-1, jj*nx+ii}; double q[4] = {pressure[index[0]], pressure[index[1]], pressure[index[2]], pressure[index[3]]}; double a[16] = {1, x[0], y[0], x[0]*y[0], 1, x[1], y[1], x[1]*y[1], 1, x[2], y[2], x[2]*y[2], 1, x[3], y[3], x[3]*y[3]}; //find the closest corner to the body intercept double min = 1.0; double s; for (int l=0;l<4;l++) { //find the closest node to the BI s = sqrt(pow(x[l]-body_intercept_p_x[ip],2) + pow(y[l]-body_intercept_p_y[ip],2)); if (s<min) { min = s; close_index = index[l]; } } //setup for neuman BC double dudt; double dvdt; for (int l=0; l<4; l++) { //set nodes inside the body to neuman bc if ( ghostTagsP[index[l]] > 0 ) { dudt = (uB[0] - uB0[0])/dt; dvdt = (vB[0] - vB0[0])/dt; x[l] = body_intercept_p_x[index[l]]; y[l] = body_intercept_p_y[index[l]]; n_x = image_point_p_x[index[l]] - x[l]; n_y = image_point_p_y[index[l]] - y[l]; nl = sqrt(n_x*n_x + n_y*n_y); q[l] = - ( n_x / nl * dudt + n_y/nl * dvdt); a[l*4] = 0; a[l*4+1] = n_x/nl; a[l*4+2] = n_y/nl; a[l*4+3] = n_y/nl*x[l] + n_x/nl*y[l]; } //if the node is the closest to the body, set the closeMatD if (index[l] == close_index) { dudt = (uB[0] - uB0[0])/dt; dvdt = (vB[0] - vB0[0])/dt; x[l] = body_intercept_p_x[ip]; y[l] = body_intercept_p_y[ip]; n_x = image_point_p_x[ip] - x[l]; n_y = image_point_p_y[ip] - y[l]; nl = sqrt(n_x*n_x + n_y*n_y); matDClose = ( n_x / nl * dudt + n_y/nl * dvdt); } } x1[ip] = x[0]; x2[ip] = x[1]; x3[ip] = x[2]; x4[ip] = x[3]; y1[ip] = y[0]; y2[ip] = y[1]; y3[ip] = y[2]; y4[ip] = y[3]; q1[ip] = q[0]; q2[ip] = q[1]; q3[ip] = q[2]; q4[ip] = q[3]; index1[ip] = index[0]; index2[ip] = index[1]; index3[ip] = index[2]; index4[ip] = index[3]; double a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; double a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; double a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; double a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //for solving a 4x4 matrix exactly //https://www.physicsforums.com/threads/is-normal-derivative-a-definition.706458/ //for dealing with the normal at the boundary (how to calculate a normal deriviative) //df/dn = grad(f) dot n //f = a0 + a1x + a2y + a3xy //df/dn = ((a1+a3y)i + (a2+a3x)j) dot ((n_x/nl) i+ (n_y/nl)j) //where n_x, n_y and nl are the normal vector lengths in the x, y and magnitude respectively //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * |0 n_x/nl n_y/nl (n_y*x+n_x*y)/nl| | | = |q | replace one row with this depending on which node is the closes to the body intercept<- * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double detA = a11*a22*a33*a44 + a11*a23*a34*a42 + a11*a24*a32*a43 +a12*a21*a34*a43 + a12*a23*a31*a44 + a12*a24*a33*a41 +a13*a21*a32*a44 + a13*a22*a34*a41 + a13*a24*a31*a42 +a14*a21*a33*a42 + a14*a22*a31*a43 + a14*a23*a32*a41 -a11*a22*a34*a43 - a11*a23*a32*a44 - a11*a24*a33*a42 -a12*a21*a33*a44 - a12*a23*a34*a41 - a12*a24*a31*a43 -a13*a21*a34*a42 - a13*a22*a31*a44 - a13*a24*a32*a41 -a14*a21*a32*a43 - a14*a22*a33*a41 - a14*a23*a31*a42; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| */ double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; double b22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; double b23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; double b24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; double b31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; double b32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; double b33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; double b34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; double b41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; double b42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; double b43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; double b44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; /* Solve A*a = q for a * Ainv = B/det(A) * a = Ainv*q'; * interpolate for a value using the newly formed function * p= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ a0[ip] = b11/detA*q[0] + b12/detA*q[1] + b13/detA*q[2] + b14/detA*q[3]; a1[ip] = b21/detA*q[0] + b22/detA*q[1] + b23/detA*q[2] + b24/detA*q[3]; a2[ip] = b31/detA*q[0] + b32/detA*q[1] + b33/detA*q[2] + b34/detA*q[3]; a3[ip] = b41/detA*q[0] + b42/detA*q[1] + b43/detA*q[2] + b44/detA*q[3]; q1coef[ip] = (b11 + b21*image_point_p_x[ip] + b31*image_point_p_y[ip] + b41*image_point_p_x[ip]*image_point_p_y[ip])/detA; q2coef[ip] = (b12 + b22*image_point_p_x[ip] + b32*image_point_p_y[ip] + b42*image_point_p_x[ip]*image_point_p_y[ip])/detA; q3coef[ip] = (b13 + b23*image_point_p_x[ip] + b33*image_point_p_y[ip] + b43*image_point_p_x[ip]*image_point_p_y[ip])/detA; q4coef[ip] = (b14 + b24*image_point_p_x[ip] + b34*image_point_p_y[ip] + b44*image_point_p_x[ip]*image_point_p_y[ip])/detA; //pressure at the image point double image_point_pressure = a0[ip] + a1[ip]*image_point_p_x[ip] + a2[ip]*image_point_p_y[ip] + a3[ip] * image_point_p_y[ip] *image_point_p_x[ip]; body_intercept_p[ip] = a0[ip] + a1[ip]*body_intercept_p_x[ip] + a2[ip]*body_intercept_p_y[ip] + a3[ip] * body_intercept_p_x[ip]*body_intercept_p_y[ip]; //used for force calc dpdn[ip] = sqrt(pow(image_point_p_x[ip]-xv[I],2)+pow(image_point_p_y[ip]-yu[J],2))*matDClose; //extrapolate pressure to the ghost node if (set) pressure[ip] = image_point_pressure + dpdn[ip]; } }
71fa80035bbb13b6ac11bef8692cb8d3c1298147.cu
/***************************************************************************//** * \file .cu * \author Christopher Minar ([email protected]) * \CPU Author, Anush Krishnan ([email protected]) * \brief Implementation of the methods of the class \c DirectForcingSolver to tag * points near the immersed boundary using a ray-tracing algorithm. */ #include "tagPoints.h" namespace kernels { __global__ void interpolateVelocityToGhostNodeX(double *u, bool set, int *ghostTagsUV, double *bx, double *by, double *uB, double *yu, double *xu, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u)//testing variables {//In the luo et al method they only move corners coincident to the GN to the boundary. We are moving all corners inside to the boundary int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iu = J*(nx-1) + I, ii= I-5, jj = J-5; if (iu > J*(nx-1) + I) //return if we're out of bound return; if (ghostTagsUV[iu]<=0) //return if we're not at an interpolation point return; /* * (x3,y3)__________(x4,y4) * | | * | *(ip_x,ip_y) | * | | * | | * | | * (x1,y1)__________(x2,y2) */ //find x and y of nodes that bound the image point while (xu[ii] < image_point_x[iu]) ii++; while (yu[jj] <image_point_y[iu]) jj++; double x[4] = {xu[ii-1], xu[ii], xu[ii-1], xu[ii]}; double y[4] = {yu[jj-1], yu[jj-1], yu[jj], yu[jj]}; //find index at corners and the u value at the corners int index[4] = {(jj-1)*(nx-1)+ii-1, (jj-1)*(nx-1)+ii, jj*(nx-1)+ii-1, jj*(nx-1)+ii}; double q[4] = {u[index[0]], u[index[1]], u[index[2]], u[index[3]]}; //find the closest corner to the body intercept double min = 1.0; double s; int close_index; bool inflag = false; //a boolean that is true if there is a node inside the body for (int l=0;l<4;l++) { //find the closest node to the BI s = sqrt(pow(x[l]-body_intercept_x[iu],2) + pow(y[l]-body_intercept_y[iu],2)); if (s<min) { min = s; close_index = index[l]; } //check if any of the points are inside the body if (ghostTagsUV[index[l]]>0) inflag = true; } //if point is inside of the body //or if no points are inside the body and the node is the closest to the BI // then move them to the body intercept //point 1 for (int l=0;l<4;l++) { //if ( ghostTagsUV[index[l]] > 0)//this moves every node inside to the edge if ( ghostTagsUV[index[l]] == iu ) //this moves just the GN to the edge { x[l] = body_intercept_x[index[l]]; y[l] = body_intercept_y[index[l]]; q[l] = uB[0]; } else if ( index[l]==close_index && !inflag ) //uncomment this if you want to move the closest node outside of the body to the body { x[l] = body_intercept_x[iu]; y[l] = body_intercept_y[iu]; q[l] = uB[0]; } } x1[iu] = x[0]; x2[iu] = x[1]; x3[iu] = x[2]; x4[iu] = x[3]; y1[iu] = y[0]; y2[iu] = y[1]; y3[iu] = y[2]; y4[iu] = y[3]; q1[iu] = q[0]; q2[iu] = q[1]; q3[iu] = q[2]; q4[iu] = q[3]; index1[iu] = index[0]; index2[iu] = index[1]; index3[iu] = index[2]; index4[iu] = index[3]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iu], a13 = y1[iu], a14 = x1[iu]*y1[iu]; double a22 = x2[iu], a23 = y2[iu], a24 = x2[iu]*y2[iu]; double a32 = x3[iu], a33 = y3[iu], a34 = x3[iu]*y3[iu]; double a42 = x4[iu], a43 = y4[iu], a44 = x4[iu]*y4[iu]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = b11/detA*q1[iu] + b12/detA*q2[iu] + b13/detA*q3[iu] + b14/detA*q4[iu]; double a1 = b21/detA*q1[iu] + b22/detA*q2[iu] + b23/detA*q3[iu] + b24/detA*q4[iu]; double a2 = b31/detA*q1[iu] + b32/detA*q2[iu] + b33/detA*q3[iu] + b34/detA*q4[iu]; double a3 = b41/detA*q1[iu] + b42/detA*q2[iu] + b43/detA*q3[iu] + b44/detA*q4[iu]; q1coef[iu] = (b11+b21*image_point_x[iu]+b31*image_point_y[iu]+b41*image_point_x[iu]*image_point_y[iu])/detA; q2coef[iu] = (b12+b22*image_point_x[iu]+b32*image_point_y[iu]+b42*image_point_x[iu]*image_point_y[iu])/detA; q3coef[iu] = (b13+b23*image_point_x[iu]+b33*image_point_y[iu]+b43*image_point_x[iu]*image_point_y[iu])/detA; q4coef[iu] = (b14+b24*image_point_x[iu]+b34*image_point_y[iu]+b44*image_point_x[iu]*image_point_y[iu])/detA; image_point_u[iu] = a0 + a1*image_point_x[iu] + a2*image_point_y[iu] + a3*image_point_x[iu]*image_point_y[iu]; if (set) u[iu] = 2*uB[0] - image_point_u[iu]; //u_gn = 2*u_BI - u_IP //flag doesn't currently work with a rotating body because of uB[0], need to use the actual u at the body intercept } __global__ void interpolateVelocityToGhostNodeY(double *u, bool set, int *ghostTagsUV, double *bx, double *by, double *vB, double *yv, double *xv, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u)//testing variables { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iv = J*nx + I + (nx-1)*ny, ii= I-5, jj = J-5; if (J*nx + I > nx*(ny-1)) //return if we're out of bound return; if (ghostTagsUV[iv]<=0) //return if we're not at an interpolation point return; /* * (x1,y1)__________(x2,y2) * | | * | *(ip_x,ip_y) | * | | * | | * | | * (x3,y3)__________(x4,y4) */ //find x and y of nodes that bound the image point while (xv[ii] < image_point_x[iv]) ii++; while (yv[jj] <image_point_y[iv]) jj++; double x[4] = {xv[ii-1], xv[ii], xv[ii-1], xv[ii]}; double y[4] = {yv[jj-1], yv[jj-1], yv[jj], yv[jj]}; //find index at corners and the u value at the corners int index[4] = {(jj-1)*nx+ii-1 + (nx-1)*ny, (jj-1)*nx+ii + (nx-1)*ny, jj*nx+ii-1 + (nx-1)*ny, jj*nx+ii + (nx-1)*ny}; double q[4] = {u[index[0]], u[index[1]], u[index[2]], u[index[3]]}; //find the closest corner to the body intercept double min = 1.0; double s; int close_index; bool inflag = false; //a boolean that is true if there is a node inside the body for (int l=0;l<4;l++) { //find the closest node to the BI s = sqrt(pow(x[l]-body_intercept_x[iv],2) + pow(y[l]-body_intercept_y[iv],2)); if (s<min) { min = s; close_index = index[l]; } //check if any of the points are inside the body if (ghostTagsUV[index[l]]>0) inflag = true; } //if point is inside of the body //or if no points are inside the body and the node is the closest to the BI // then move them to the body intercept //point 1 for (int l=0;l<4;l++) { //if ( ghostTagsUV[index[l]] > 0) if ( ghostTagsUV[index[l]] == iv ) { x[l] = body_intercept_x[index[l]]; y[l] = body_intercept_y[index[l]]; q[l] = vB[0]; } else if ( index[l]==close_index && !inflag ) //uncomment this if you want to move the closest node outside of the body to the body { x[l] = body_intercept_x[iv]; y[l] = body_intercept_y[iv]; q[l] = vB[0]; } } x1[iv] = x[0]; x2[iv] = x[1]; x3[iv] = x[2]; x4[iv] = x[3]; y1[iv] = y[0]; y2[iv] = y[1]; y3[iv] = y[2]; y4[iv] = y[3]; q1[iv] = q[0]; q2[iv] = q[1]; q3[iv] = q[2]; q4[iv] = q[3]; index1[iv] = index[0]; index2[iv] = index[1]; index3[iv] = index[2]; index4[iv] = index[3]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iv], a13 = y1[iv], a14 = x1[iv]*y1[iv]; double a22 = x2[iv], a23 = y2[iv], a24 = x2[iv]*y2[iv]; double a32 = x3[iv], a33 = y3[iv], a34 = x3[iv]*y3[iv]; double a42 = x4[iv], a43 = y4[iv], a44 = x4[iv]*y4[iv]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = b11/detA*q1[iv] + b12/detA*q2[iv] + b13/detA*q3[iv] + b14/detA*q4[iv]; double a1 = b21/detA*q1[iv] + b22/detA*q2[iv] + b23/detA*q3[iv] + b24/detA*q4[iv]; double a2 = b31/detA*q1[iv] + b32/detA*q2[iv] + b33/detA*q3[iv] + b34/detA*q4[iv]; double a3 = b41/detA*q1[iv] + b42/detA*q2[iv] + b43/detA*q3[iv] + b44/detA*q4[iv]; q1coef[iv] = (b11+b21*image_point_x[iv]+b31*image_point_y[iv]+b41*image_point_x[iv]*image_point_y[iv])/detA; q2coef[iv] = (b12+b22*image_point_x[iv]+b32*image_point_y[iv]+b42*image_point_x[iv]*image_point_y[iv])/detA; q3coef[iv] = (b13+b23*image_point_x[iv]+b33*image_point_y[iv]+b43*image_point_x[iv]*image_point_y[iv])/detA; q4coef[iv] = (b14+b24*image_point_x[iv]+b34*image_point_y[iv]+b44*image_point_x[iv]*image_point_y[iv])/detA; image_point_u[iv] = a0 + a1*image_point_x[iv] + a2*image_point_y[iv] + a3*image_point_x[iv]*image_point_y[iv]; if (set) u[iv] = 2*vB[0] - image_point_u[iv]; //u_gn = 2*u_BI - u_IP //flag doesn't currently work with a rotating body because of uB[0], need to use the actual u at the body intercept } __global__ void interpolateVelocityToHybridNodeX(double *u, double *ustar, int *hybridTagsUV, double *bx, double *by, double *uB, double *yu, double *xu, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iu = J*(nx-1) + I, ii= I-5, jj = J-5; if (iu > J*(nx-1) + I) //return if we're out of bound return; if (hybridTagsUV[iu]<=0) //return if we're not at an interpolation point return; /* (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) */ //find x and y of nodes that bound the image point while (xu[ii] < image_point_x[iu]) ii++; x1[iu] = xu[ii-1]; x2[iu] = xu[ii]; x3[iu] = x1[iu]; x4[iu] = x2[iu]; while (yu[jj] <image_point_y[iu]) jj++; y1[iu] = yu[jj-1]; y2[iu] = y1[iu]; y3[iu] = yu[jj]; y4[iu] = y3[iu]; //find q1,q2,q3,q4 q1[iu] = u[(jj-1)*(nx-1)+ii-1]; q2[iu] = u[(jj-1)*(nx-1)+ii]; q3[iu] = u[jj*(nx-1)+ii-1]; q4[iu] = u[jj*(nx-1)+ii]; index1[iu] = (jj-1)*(nx-1)+ii-1; index2[iu] = (jj-1)*(nx-1)+ii; index3[iu] = jj*(nx-1)+ii-1; index4[iu] = jj*(nx-1)+ii; //check if any points are inside of the body, then move them to the body intercept //point 1 if (hybridTagsUV[(jj-1)*(nx-1)+ii-1] == iu) { x1[iu] = body_intercept_x[iu]; y1[iu] = body_intercept_y[iu]; q1[iu] = uB[0]; } if (hybridTagsUV[(jj-1)*(nx-1)+ii] == iu) { x2[iu] = body_intercept_x[iu]; y2[iu] = body_intercept_y[iu]; q2[iu] = uB[0]; } if (hybridTagsUV[jj*(nx-1)+ii-1] == iu) { x3[iu] = body_intercept_x[iu]; y3[iu] = body_intercept_y[iu]; q3[iu] = uB[0]; } if (hybridTagsUV[jj*(nx-1)+ii] == iu) { x4[iu] = body_intercept_x[iu]; y4[iu] = body_intercept_y[iu]; q4[iu] = uB[0]; } //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iu], a13 = y1[iu], a14 = x1[iu]*y1[iu]; double a22 = x2[iu], a23 = y2[iu], a24 = x2[iu]*y2[iu]; double a32 = x3[iu], a33 = y3[iu], a34 = x3[iu]*y3[iu]; double a42 = x4[iu], a43 = y4[iu], a44 = x4[iu]*y4[iu]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = (b11*q1[iu] + b12*q2[iu] + b13*q3[iu] + b14*q4[iu])/detA; double a1 = (b21*q1[iu] + b22*q2[iu] + b23*q3[iu] + b24*q4[iu])/detA; double a2 = (b31*q1[iu] + b32*q2[iu] + b33*q3[iu] + b34*q4[iu])/detA; double a3 = (b41*q1[iu] + b42*q2[iu] + b43*q3[iu] + b44*q4[iu])/detA; q1coef[iu] = (b11+b21*xu[I]+b31*yu[J]+b41*xu[I]*yu[J])/detA; q2coef[iu] = (b12+b22*xu[I]+b32*yu[J]+b42*xu[I]*yu[J])/detA; q3coef[iu] = (b13+b23*xu[I]+b33*yu[J]+b43*xu[I]*yu[J])/detA; q4coef[iu] = (b14+b24*xu[I]+b34*yu[J]+b44*xu[I]*yu[J])/detA; ustar[iu] = a0 + a1*xu[I] + a2*yu[J] + a3*yu[J]*xu[I]; //u[iu] = a0 + a1*xu[I] + a2*yu[J] + a3*yu[J]*xu[I]; image_point_u[iu] = a0 + a1*image_point_x[iu] + a2*image_point_y[iu] + a3*image_point_x[iu]*image_point_y[iu]; } __global__ void interpolateVelocityToHybridNodeY(double *u, double *ustar, int *hybridTagsUV, double *bx, double *by, double *vB, double *yv, double *xv, double *body_intercept_x, double *body_intercept_y, double *image_point_x, double *image_point_y, int *i_start, int *j_start, int width, int nx, int ny, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4, double *image_point_u) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, iv = J*nx + I + (nx-1)*ny, ii= I-5, jj = J-5; if (J*nx + I > nx*(ny-1)) //return if we're out of bound return; if (hybridTagsUV[iv]<=0) //return if we're not at an interpolation point return; /* * (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) * * */ //find x and y of nodes that bound the image point while (xv[ii] < image_point_x[iv]) ii++; x1[iv] = xv[ii-1]; x2[iv] = xv[ii]; x3[iv] = x1[iv]; x4[iv] = x2[iv]; while (yv[jj] <image_point_y[iv]) jj++; y1[iv] = yv[jj-1]; y2[iv] = y1[iv]; y3[iv] = yv[jj]; y4[iv] = y3[iv]; //find q1,q2,q3,q4 q1[iv] = u[(jj-1)*nx+ii-1 + (nx-1)*ny]; q2[iv] = u[(jj-1)*nx+ii + (nx-1)*ny]; q3[iv] = u[jj*nx+ii-1 + (nx-1)*ny]; q4[iv] = u[jj*nx+ii + (nx-1)*ny]; index1[iv] = (jj-1)*nx+ii-1 + (nx-1)*ny; index2[iv] = (jj-1)*nx+ii + (nx-1)*ny; index3[iv] = jj*nx+ii-1 + (nx-1)*ny; index4[iv] = jj*nx+ii + (nx-1)*ny; //check if any points are inside of the body, then move them to the body intercept //point 1 if (hybridTagsUV[(jj-1)*nx+ii-1 + (nx-1)*ny] == iv) { x1[iv] = body_intercept_x[iv]; y1[iv] = body_intercept_y[iv]; q1[iv] = vB[0]; } if (hybridTagsUV[(jj-1)*nx+ii + (nx-1)*ny] == iv) { x2[iv] = body_intercept_x[iv]; y2[iv] = body_intercept_y[iv]; q2[iv] = vB[0]; } if (hybridTagsUV[jj*nx+ii-1 + (nx-1)*ny] == iv) { x3[iv] = body_intercept_x[iv]; y3[iv] = body_intercept_y[iv]; q3[iv] = vB[0]; } if (hybridTagsUV[jj*nx+ii + (nx-1)*ny] == iv) { x4[iv] = body_intercept_x[iv]; y4[iv] = body_intercept_y[iv]; q4[iv] = vB[0]; } //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double a12 = x1[iv], a13 = y1[iv], a14 = x1[iv]*y1[iv]; double a22 = x2[iv], a23 = y2[iv], a24 = x2[iv]*y2[iv]; double a32 = x3[iv], a33 = y3[iv], a34 = x3[iv]*y3[iv]; double a42 = x4[iv], a43 = y4[iv], a44 = x4[iv]*y4[iv]; double detA = 1*a22*a33*a44 + 1*a23*a34*a42 + 1*a24*a32*a43 +a12*1*a34*a43 + a12*a23*1*a44 + a12*a24*a33*1 +a13*1*a32*a44 + a13*a22*a34*1 + a13*a24*1*a42 +a14*1*a33*a42 + a14*a22*1*a43 + a14*a23*a32*1 -1*a22*a34*a43 - 1*a23*a32*a44 - 1*a24*a33*a42 -a12*1*a33*a44 - a12*a23*a34*1 - a12*a24*1*a43 -a13*1*a34*a42 - a13*a22*1*a44 - a13*a24*a32*1 -a14*1*a32*a43 - a14*a22*a33*1 - a14*a23*1*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = 1*a34*a43 + a23*1*a44 + a24*a33*1 - 1*a33*a44 - a23*a34*1 - a24*1*a43; double b22 = 1*a33*a44 + a13*a34*1 + a14*1*a43 - 1*a34*a43 - a13*1*a44 - a14*a33*1; double b23 = 1*a24*a43 + a13*1*a44 + a14*a23*1 - 1*a23*a44 - a13*a24*1 - a14*1*a43; double b24 = 1*a23*a34 + a13*a24*1 + a14*1*a33 - 1*a24*a33 - a13*1*a34 - a14*a23*1; double b31 = 1*a32*a44 + a22*a34*1 + a24*1*a42 - 1*a34*a42 - a22*1*a44 - a24*a32*1; double b32 = 1*a34*a42 + a12*1*a44 + a14*a32*1 - 1*a32*a44 - a12*a34*1 - a14*1*a42; double b33 = 1*a22*a44 + a12*a24*1 + a14*1*a42 - 1*a24*a42 - a12*1*a44 - a14*a22*1; double b34 = 1*a24*a32 + a12*1*a34 + a14*a22*1 - 1*a22*a34 - a12*a24*1 - a14*1*a32; double b41 = 1*a33*a42 + a22*1*a43 + a23*a32*1 - 1*a32*a43 - a22*a33*1 - a23*1*a42; double b42 = 1*a32*a43 + a12*a33*1 + a13*1*a42 - 1*a33*a42 - a12*1*a43 - a13*a32*1; double b43 = 1*a23*a42 + a12*1*a43 + a13*a22*1 - 1*a22*a43 - a12*a23*1 - a13*1*a42; double b44 = 1*a22*a33 + a12*a23*1 + a13*1*a32 - 1*a23*a32 - a12*1*a33 - a13*a22*1; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ double a0 = (b11*q1[iv] + b12*q2[iv] + b13*q3[iv] + b14*q4[iv])/detA; double a1 = (b21*q1[iv] + b22*q2[iv] + b23*q3[iv] + b24*q4[iv])/detA; double a2 = (b31*q1[iv] + b32*q2[iv] + b33*q3[iv] + b34*q4[iv])/detA; double a3 = (b41*q1[iv] + b42*q2[iv] + b43*q3[iv] + b44*q4[iv])/detA; q1coef[iv] = (b11+b21*xv[I]+b31*yv[J]+b41*xv[I]*yv[J])/detA; q2coef[iv] = (b12+b22*xv[I]+b32*yv[J]+b42*xv[I]*yv[J])/detA; q3coef[iv] = (b13+b23*xv[I]+b33*yv[J]+b43*xv[I]*yv[J])/detA; q4coef[iv] = (b14+b24*xv[I]+b34*yv[J]+b44*xv[I]*yv[J])/detA; ustar[iv] = a0 + a1*xv[I] + a2*yv[J] + a3*yv[J]*xv[I]; //u[iv] = a0 + a1*xv[I] + a2*yv[J] + a3*yv[J]*xv[I]; image_point_u[iv] = a0 + a1*image_point_x[iv] + a2*image_point_y[iv] + a3*image_point_x[iv]*image_point_y[iv]; } __global__ void interpolatePressureToHybridNode(double *pressure, double *pressureStar, double *u, int *hybridTagsP, double *bx, double *by, double *uB, double *uB0, double *vB, double *vB0, double *yu, double *yv, double *xu, double *xv, //yv xu not used? double *body_intercept_p_x, double *body_intercept_p_y, double *image_point_p_x, double *image_point_p_y, int *i_start, int *j_start, int width, int nx, int ny, double dt, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *a0, double *a1, double *a2, double *a3, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4) { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, ip = J*nx + I, ii= I-5, jj = J-5; if (ip > J*nx + I) //return if we're out of bound return; if (hybridTagsP[ip]<=0) //return if we're not at an interpolation point return; double n_x, n_y, nl; /* * (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) * * */ //find x and y of nodes that bound the image point double x[4]; double y[4]; double q[4]; int index[4]; while (xv[ii] < image_point_p_x[ip]) ii++; x[0] = xv[ii-1]; x[1] = xv[ii]; x[2] = xv[ii-1]; x[3] = xv[ii]; while (yu[jj] <image_point_p_y[ip]) jj++; y[0] = yu[jj-1]; y[1] = yu[jj-1]; y[2] = yu[jj]; y[3] = yu[jj]; //find q1,q2,q3,q4 index[0] = (jj-1)*nx+ii-1; index[1] = (jj-1)*nx+ii; index[2] = jj*nx+ii-1; index[3] = jj*nx+ii; for (int l=0; l<4; l++) q[l] = pressure[index[l]]; double a[16] = {1, x[0], y[0], x[0]*y[0], 1, x[1], y[1], x[1]*y[1], 1, x[2], y[2], x[2]*y[2], 1, x[3], y[3], x[3]*y[3]}; //setup for neuman BC double dudt; double dvdt; for (int l = 0; l<4; l++) { //move the closes node to the body to the surface then calculate the neuman boundary condition for it if ( hybridTagsP[index[l]] == ip ) { dudt = (uB[0] - uB0[0])/dt; dvdt = (vB[0] - vB0[0])/dt; x[l] = body_intercept_p_x[ip]; y[l] = body_intercept_p_y[ip]; n_x = image_point_p_x[ip] - x[l]; n_y = image_point_p_y[ip] - y[l]; nl = sqrt(n_x*n_x + n_y*n_y); q[l] = - ( n_x / nl * dudt + n_y/nl * dvdt); a[l*4] = 0; a[l*4+1] = n_x/nl; a[l*4+2] = n_y/nl; a[l*4+3] = n_y/nl*x[l] + n_x/nl*y[l]; } } x1[ip] = x[0]; x2[ip] = x[1]; x3[ip] = x[2]; x4[ip] = x[3]; y1[ip] = y[0]; y2[ip] = y[1]; y3[ip] = y[2]; y4[ip] = y[3]; q1[ip] = q[0]; q2[ip] = q[1]; q3[ip] = q[2]; q4[ip] = q[3]; index1[ip] = index[0]; index2[ip] = index[1]; index3[ip] = index[2]; index4[ip] = index[3]; double a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; double a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; double a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; double a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //for solving a 4x4 matrix exactly //https://www.physicsforums.com/threads/is-normal-derivative-a-definition.706458/ //for dealing with the normal at the boundary //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double detA = a11*a22*a33*a44 + a11*a23*a34*a42 + a11*a24*a32*a43 +a12*a21*a34*a43 + a12*a23*a31*a44 + a12*a24*a33*a41 +a13*a21*a32*a44 + a13*a22*a34*a41 + a13*a24*a31*a42 +a14*a21*a33*a42 + a14*a22*a31*a43 + a14*a23*a32*a41 -a11*a22*a34*a43 - a11*a23*a32*a44 - a11*a24*a33*a42 -a12*a21*a33*a44 - a12*a23*a34*a41 - a12*a24*a31*a43 -a13*a21*a34*a42 - a13*a22*a31*a44 - a13*a24*a32*a41 -a14*a21*a32*a43 - a14*a22*a33*a41 - a14*a23*a31*a42; double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; double b22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; double b23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; double b24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; double b31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; double b32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; double b33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; double b34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; double b41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; double b42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; double b43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; double b44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| * * Ainv = B/det(A) * a = Ainv*q'; * f= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ a0[ip] = b11/detA*q1[ip] + b12/detA*q2[ip] + b13/detA*q3[ip] + b14/detA*q4[ip]; a1[ip] = b21/detA*q1[ip] + b22/detA*q2[ip] + b23/detA*q3[ip] + b24/detA*q4[ip]; a2[ip] = b31/detA*q1[ip] + b32/detA*q2[ip] + b33/detA*q3[ip] + b34/detA*q4[ip]; a3[ip] = b41/detA*q1[ip] + b42/detA*q2[ip] + b43/detA*q3[ip] + b44/detA*q4[ip]; q1coef[ip] = (b11 + b21*xv[I] + b31*yu[J] + b41*xv[I]*yu[J])/detA; q2coef[ip] = (b12 + b22*xv[I] + b32*yu[J] + b42*xv[I]*yu[J])/detA; q3coef[ip] = (b13 + b23*xv[I] + b33*yu[J] + b43*xv[I]*yu[J])/detA; q4coef[ip] = (b14 + b24*xv[I] + b34*yu[J] + b44*xv[I]*yu[J])/detA; pressureStar[ip] = a0[ip] + a1[ip]*xv[I] + a2[ip]*yu[J] + a3[ip]*xv[I]*yu[J]; } __global__ void interpolatePressureToGhostNode(double *pressure, bool set, double *u, int *ghostTagsP, double *bx, double *by, double *dpdn, double *uB, double *uB0, double *vB, double *vB0, double *yu, double *yv, double *xu, double *xv, double *body_intercept_p_x, double *body_intercept_p_y, double *image_point_p_x, double *image_point_p_y, double *body_intercept_p, int *i_start, int *j_start, int width, int nx, int ny, double dt, int *index1, int *index2, int *index3, int *index4, double *q1coef, double *q2coef, double *q3coef, double *q4coef, double *a0, double *a1, double *a2, double *a3, double *x1, double *x2, double *x3, double *x4, double *y1, double *y2, double *y3, double *y4, double *q1, double *q2, double *q3, double *q4)//test { int idx = threadIdx.x + blockDim.x * blockIdx.x, i = idx % (width), j = idx / (width), I = i_start[0] + i, J = j_start[0] + j, ip = J*nx + I, ii= I-5, jj = J-5; if (ip > J*nx + I) //return if we're out of bound return; if (ghostTagsP[ip]<=0) //return if we're not at an interpolation point return; double n_x, n_y, nl, matDClose; int close_index; /* * (x3,y3)__________(x4,y4) * | | * | | * | | * | *ip | * | | * (x1,y1)__________(x2,y2) * * *(BI_x,BI_y) */ //find x and y of nodes that bound the image point while (xv[ii] < image_point_p_x[ip]) ii++; while (yu[jj] < image_point_p_y[ip]) jj++; double x[4] = {xv[ii-1], xv[ii], xv[ii-1], xv[ii]}; double y[4] = {yu[jj-1], yu[jj-1], yu[jj], yu[jj]}; //find index at corners and the u value at the corners int index[4] = {(jj-1)*nx+ii-1, (jj-1)*nx+ii, jj*nx+ii-1, jj*nx+ii}; double q[4] = {pressure[index[0]], pressure[index[1]], pressure[index[2]], pressure[index[3]]}; double a[16] = {1, x[0], y[0], x[0]*y[0], 1, x[1], y[1], x[1]*y[1], 1, x[2], y[2], x[2]*y[2], 1, x[3], y[3], x[3]*y[3]}; //find the closest corner to the body intercept double min = 1.0; double s; for (int l=0;l<4;l++) { //find the closest node to the BI s = sqrt(pow(x[l]-body_intercept_p_x[ip],2) + pow(y[l]-body_intercept_p_y[ip],2)); if (s<min) { min = s; close_index = index[l]; } } //setup for neuman BC double dudt; double dvdt; for (int l=0; l<4; l++) { //set nodes inside the body to neuman bc if ( ghostTagsP[index[l]] > 0 ) { dudt = (uB[0] - uB0[0])/dt; dvdt = (vB[0] - vB0[0])/dt; x[l] = body_intercept_p_x[index[l]]; y[l] = body_intercept_p_y[index[l]]; n_x = image_point_p_x[index[l]] - x[l]; n_y = image_point_p_y[index[l]] - y[l]; nl = sqrt(n_x*n_x + n_y*n_y); q[l] = - ( n_x / nl * dudt + n_y/nl * dvdt); a[l*4] = 0; a[l*4+1] = n_x/nl; a[l*4+2] = n_y/nl; a[l*4+3] = n_y/nl*x[l] + n_x/nl*y[l]; } //if the node is the closest to the body, set the closeMatD if (index[l] == close_index) { dudt = (uB[0] - uB0[0])/dt; dvdt = (vB[0] - vB0[0])/dt; x[l] = body_intercept_p_x[ip]; y[l] = body_intercept_p_y[ip]; n_x = image_point_p_x[ip] - x[l]; n_y = image_point_p_y[ip] - y[l]; nl = sqrt(n_x*n_x + n_y*n_y); matDClose = ( n_x / nl * dudt + n_y/nl * dvdt); } } x1[ip] = x[0]; x2[ip] = x[1]; x3[ip] = x[2]; x4[ip] = x[3]; y1[ip] = y[0]; y2[ip] = y[1]; y3[ip] = y[2]; y4[ip] = y[3]; q1[ip] = q[0]; q2[ip] = q[1]; q3[ip] = q[2]; q4[ip] = q[3]; index1[ip] = index[0]; index2[ip] = index[1]; index3[ip] = index[2]; index4[ip] = index[3]; double a11 = a[0], a12 = a[1], a13 = a[2], a14 = a[3]; double a21 = a[4], a22 = a[5], a23 = a[6], a24 = a[7]; double a31 = a[8], a32 = a[9], a33 = a[10], a34 = a[11]; double a41 = a[12], a42 = a[13], a43 = a[14], a44 = a[15]; //solve equation for bilinear interpolation of values to image point //http://www.cg.info.hiroshima-cu.ac.jp/~miyazaki/knowledge/teche23.html //for solving a 4x4 matrix exactly //https://www.physicsforums.com/threads/is-normal-derivative-a-definition.706458/ //for dealing with the normal at the boundary (how to calculate a normal deriviative) //df/dn = grad(f) dot n //f = a0 + a1x + a2y + a3xy //df/dn = ((a1+a3y)i + (a2+a3x)j) dot ((n_x/nl) i+ (n_y/nl)j) //where n_x, n_y and nl are the normal vector lengths in the x, y and magnitude respectively //solve for a /* A a q * |1 x1 y1 x1y1| |a0| = |q1| * |1 x2 y2 x2y2| |a1| = |q2| * |1 x3 y3 x3y3| |a2| = |q3| * |1 x4 y4 x4y4| |a3| = |q4| * * |0 n_x/nl n_y/nl (n_y*x+n_x*y)/nl| | | = |q | replace one row with this depending on which node is the closes to the body intercept<- * A * |a11 a12 a13 a14| * |a21 a22 a23 a24| * |a31 a13 a33 a34| * |a41 a14 a43 a44| */ double detA = a11*a22*a33*a44 + a11*a23*a34*a42 + a11*a24*a32*a43 +a12*a21*a34*a43 + a12*a23*a31*a44 + a12*a24*a33*a41 +a13*a21*a32*a44 + a13*a22*a34*a41 + a13*a24*a31*a42 +a14*a21*a33*a42 + a14*a22*a31*a43 + a14*a23*a32*a41 -a11*a22*a34*a43 - a11*a23*a32*a44 - a11*a24*a33*a42 -a12*a21*a33*a44 - a12*a23*a34*a41 - a12*a24*a31*a43 -a13*a21*a34*a42 - a13*a22*a31*a44 - a13*a24*a32*a41 -a14*a21*a32*a43 - a14*a22*a33*a41 - a14*a23*a31*a42; /* B * |b11 b12 b13 b14| * |b21 b22 b23 b24| * |b31 b32 b33 b34| * |b41 b42 b43 b44| */ double b11 = a22*a33*a44 + a23*a34*a42 + a24*a32*a43 - a22*a34*a43 - a23*a32*a44 - a24*a33*a42; double b12 = a12*a34*a43 + a13*a32*a44 + a14*a33*a42 - a12*a33*a44 - a13*a34*a42 - a14*a32*a43; double b13 = a12*a23*a44 + a13*a24*a42 + a14*a22*a43 - a12*a24*a43 - a13*a22*a44 - a14*a23*a42; double b14 = a12*a24*a33 + a13*a22*a34 + a14*a23*a32 - a12*a23*a34 - a13*a24*a32 - a14*a22*a33; double b21 = a21*a34*a43 + a23*a31*a44 + a24*a33*a41 - a21*a33*a44 - a23*a34*a41 - a24*a31*a43; double b22 = a11*a33*a44 + a13*a34*a41 + a14*a31*a43 - a11*a34*a43 - a13*a31*a44 - a14*a33*a41; double b23 = a11*a24*a43 + a13*a21*a44 + a14*a23*a41 - a11*a23*a44 - a13*a24*a41 - a14*a21*a43; double b24 = a11*a23*a34 + a13*a24*a31 + a14*a21*a33 - a11*a24*a33 - a13*a21*a34 - a14*a23*a31; double b31 = a21*a32*a44 + a22*a34*a41 + a24*a31*a42 - a21*a34*a42 - a22*a31*a44 - a24*a32*a41; double b32 = a11*a34*a42 + a12*a31*a44 + a14*a32*a41 - a11*a32*a44 - a12*a34*a41 - a14*a31*a42; double b33 = a11*a22*a44 + a12*a24*a41 + a14*a21*a42 - a11*a24*a42 - a12*a21*a44 - a14*a22*a41; double b34 = a11*a24*a32 + a12*a21*a34 + a14*a22*a31 - a11*a22*a34 - a12*a24*a31 - a14*a21*a32; double b41 = a21*a33*a42 + a22*a31*a43 + a23*a32*a41 - a21*a32*a43 - a22*a33*a41 - a23*a31*a42; double b42 = a11*a32*a43 + a12*a33*a41 + a13*a31*a42 - a11*a33*a42 - a12*a31*a43 - a13*a32*a41; double b43 = a11*a23*a42 + a12*a21*a43 + a13*a22*a41 - a11*a22*a43 - a12*a23*a41 - a13*a21*a42; double b44 = a11*a22*a33 + a12*a23*a31 + a13*a21*a32 - a11*a23*a32 - a12*a21*a33 - a13*a22*a31; /* Solve A*a = q for a * Ainv = B/det(A) * a = Ainv*q'; * interpolate for a value using the newly formed function * p= @(X,Y) a(1) + a(2)*X + a(3)*Y + a(4)*X*Y; */ a0[ip] = b11/detA*q[0] + b12/detA*q[1] + b13/detA*q[2] + b14/detA*q[3]; a1[ip] = b21/detA*q[0] + b22/detA*q[1] + b23/detA*q[2] + b24/detA*q[3]; a2[ip] = b31/detA*q[0] + b32/detA*q[1] + b33/detA*q[2] + b34/detA*q[3]; a3[ip] = b41/detA*q[0] + b42/detA*q[1] + b43/detA*q[2] + b44/detA*q[3]; q1coef[ip] = (b11 + b21*image_point_p_x[ip] + b31*image_point_p_y[ip] + b41*image_point_p_x[ip]*image_point_p_y[ip])/detA; q2coef[ip] = (b12 + b22*image_point_p_x[ip] + b32*image_point_p_y[ip] + b42*image_point_p_x[ip]*image_point_p_y[ip])/detA; q3coef[ip] = (b13 + b23*image_point_p_x[ip] + b33*image_point_p_y[ip] + b43*image_point_p_x[ip]*image_point_p_y[ip])/detA; q4coef[ip] = (b14 + b24*image_point_p_x[ip] + b34*image_point_p_y[ip] + b44*image_point_p_x[ip]*image_point_p_y[ip])/detA; //pressure at the image point double image_point_pressure = a0[ip] + a1[ip]*image_point_p_x[ip] + a2[ip]*image_point_p_y[ip] + a3[ip] * image_point_p_y[ip] *image_point_p_x[ip]; body_intercept_p[ip] = a0[ip] + a1[ip]*body_intercept_p_x[ip] + a2[ip]*body_intercept_p_y[ip] + a3[ip] * body_intercept_p_x[ip]*body_intercept_p_y[ip]; //used for force calc dpdn[ip] = sqrt(pow(image_point_p_x[ip]-xv[I],2)+pow(image_point_p_y[ip]-yu[J],2))*matDClose; //extrapolate pressure to the ghost node if (set) pressure[ip] = image_point_pressure + dpdn[ip]; } }
b43cc7a53c92123e1278b78fb8ee0b49a55905b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "../include/rbm_dp_matrix.h" #include "../include/rbm_baseline.h" #include "../include/utils.h" #include "../include/cuda_utils.h" #include "../include/write_weights_kernel.h" #include "rocblas.h" #include "../include/constants.h" #include <stdio.h> #include <climits> //Needed for INT_MAX #include <hiprand/hiprand_kernel.h> #include <vector> using namespace std; using namespace utils; //with weight matrix padding and matrix transpose, things are complicated //pitch and pitch2 are the pitches of dev_W and dev_W2. //dev_W_cublas is the non-pitched memory for cublas to use. //write_matrix_transpose_pitch is the transpose writer kernel for pitch matrices. // #define DEBUG #define DPMM_Y_BLOCK_SIZE 16 #define SAMPLING_KERNEL_BLOCK_SIZE 1024 #define ADD_BIAS_KERNEL_BLOCK_SIZE 1024 #define WRITE_BIAS_KERNEL_BLOCK_SIZE 64 //Future work: Check which weight matrix to send to cublas. namespace dp_matrix { //Need to declare cublas library object for device // __device__ hipblasHandle_t dev_cublas_handle; //Pre: All arrays/matrices are in row-major format. // A, B, C must be CUDA device pointers. // All other parameters are host data (including the handle). // A and B should contain data. // The data in C has no effect on the program result. // A is m rows and k columns // B is k rows and n columns // C is m rows and n columns // These dimensions are the dimensions AFTER the specified // transposition has been done. They are essentially the 3 // dimensions specifying the multiplication that will actually // happen. // transA is whether matrix A should be transposed. // transB is whether matrix B should be transposed. // handle is the initialized cublas handle. //Post: Sets C to the result of the matrix operation A.B, where A and // B have been transposed as specified by transA and transB. void gemm(DTYPE * A, DTYPE * B, DTYPE * C, int m, int n, int k, bool transA, bool transB, hipblasHandle_t & handle) { //This function has a lot of nit-picky complexity due to the column //major formatting required by cublas input. //Basically, A and B are being switched in order to get the //transposed output of C in row major format. This requires an extra //transpose, which cancels out the transpose that was needed to //convert from row major to column major. The leading dimensions //also conditionally change.... DTYPE alpha = 1.0; DTYPE beta = 0.0; int lda = transA ? m : k; int ldb = transB ? k : n; int ldc = n;//lol not mistake hipblasStatus_t status; // hipDeviceSynchronize(); // double t1 = get_wall_time(); //Need B.A in order to get transposed result in C. #ifdef USING_DOUBLES // cublas_gemm: 0.000730038 // cublas_gemm: 0.000989199 status = hipblasDgemm(handle, transB ? HIPBLAS_OP_T : HIPBLAS_OP_N, transA ? HIPBLAS_OP_T : HIPBLAS_OP_N, n, m, k, &alpha, B, ldb, A, lda, &beta, C, ldc); #else // cublas_gemm: 0.000174046 // cublas_gemm: 0.000134945 status = hipblasSgemm(handle, transB ? HIPBLAS_OP_T : HIPBLAS_OP_N, transA ? HIPBLAS_OP_T : HIPBLAS_OP_N, n, m, k, &alpha, B, ldb, A, lda, &beta, C, ldc); #endif // hipDeviceSynchronize(); // double t2 = get_wall_time(); // cout << "cublas_gemm: " << (t2-t1) << endl; if(status != HIPBLAS_STATUS_SUCCESS) { cerr << "gemm error\n"; if(status == HIPBLAS_STATUS_NOT_INITIALIZED) { cerr << "HIPBLAS_STATUS_NOT_INITIALIZED\n"; } else if(status == HIPBLAS_STATUS_ALLOC_FAILED) { cerr << "HIPBLAS_STATUS_ALLOC_FAILED\n"; } else if(status == HIPBLAS_STATUS_INVALID_VALUE) { cerr << "HIPBLAS_STATUS_INVALID_VALUE\n"; } else if(status == HIPBLAS_STATUS_ARCH_MISMATCH) { cerr << "HIPBLAS_STATUS_ARCH_MISMATCH\n"; } else if(status == HIPBLAS_STATUS_MAPPING_ERROR) { cerr << "HIPBLAS_STATUS_MAPPING_ERROR\n"; } else if(status == HIPBLAS_STATUS_EXECUTION_FAILED) { cerr << "HIPBLAS_STATUS_EXECUTION_FAILED\n"; } else if(status == HIPBLAS_STATUS_INTERNAL_ERROR) { cerr << "HIPBLAS_STATUS_INTERNAL_ERROR\n"; } else if(status == HIPBLAS_STATUS_NOT_SUPPORTED) { cerr << "HIPBLAS_STATUS_NOT_SUPPORTED\n"; } else if(status == CUBLAS_STATUS_LICENSE_ERROR) { cerr << "CUBLAS_STATUS_LICENSE_ERROR\n"; } else { cerr << "Unknown CUBLAS error\n"; } } } RBM_dp_matrix::RBM_dp_matrix(int size, int n_v, int n_h, int b_size, int k, DTYPE **w, DTYPE *hb, DTYPE *vb, int data_num_rows, int data_num_cols) : baseline::RBM(size, n_v, n_h, b_size, k, w, hb, vb, data_num_rows, data_num_cols) { // hipMalloc((void**) &dev_handle, sizeof(hipblasHandle_t)); // hipDeviceSetCacheConfig(hipFuncCachePreferShared); hipblasCreate(&host_handle); // init_cublas_handle<<<1,1>>>(dev_handle); // if(stream != NULL) { // init_cublas_handle_stream<<<1,1>>>(dev_handle, *stream); // } #ifdef MULTI_WEIGHT_MATRIX cout << "Using MULTI_WEIGHT_MATRIX" << endl; if(w == NULL) { WArray2 = new DTYPE[n_hidden * n_visible]; } #endif #ifndef BIT_CODING CUDA_CHECK(hipMalloc((void**)&hdirections, batch_size * n_hidden * sizeof(bool))); CUDA_CHECK(hipMalloc((void**)&vdirections, batch_size * n_visible * sizeof(bool))); #endif cout << "RBM_dp_matrix constructor" << endl; CUDA_CHECK(hipMalloc((void**)&dev_h_diffs, batch_size * (n_hidden + 1) * sizeof(int))); CUDA_CHECK(hipMalloc((void**)&dev_v_diffs, batch_size * (n_visible + 1) * sizeof(int))); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); // hipDeviceSetSharedMemConfig(hipSharedMemBankSizeEightByte); } //========================================================= //Primary RBM calculation methods and kernels //========================================================= __global__ void finish_sampling_kernel_matrix(DTYPE * mean, DTYPE * dot_product, DTYPE * sample, DTYPE * bias, int length, hiprandState_t * curand_states) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < length * const_batch_size) { int hidden_idx = i % length; #ifdef USING_DOUBLES DTYPE mean_i = 1.0 / (1.0 + exp(-(dot_product[i] + bias[hidden_idx]))); #else DTYPE mean_i = 1.f / (1.f + exp(-(dot_product[i] + bias[hidden_idx]))); #endif mean[i] = mean_i; #ifdef USING_DOUBLES DTYPE r = hiprand_uniform_double(&curand_states[i]);//double mode #else DTYPE r = hiprand_uniform(&curand_states[i]);//float mode #endif sample[i] = r < mean_i; } } //Average time: 0.000766 //Matrix based version of sample_h_given_v. Basically does //sample_h_given_v for all batch elements. void RBM_dp_matrix::sample_h_given_v_matrix(DTYPE *dev_v0_sample, DTYPE *dev_mean, DTYPE *dev_sample) { // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); gemm(dev_v0_sample, #ifdef WEIGHT_MATRIX_PADDING dev_W_cublas, #else dev_W, #endif dev_h_dot_product_batch, batch_size, n_hidden, n_visible, false, true, host_handle); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); int num_blocks = ((n_hidden * batch_size - 1) / SAMPLING_KERNEL_BLOCK_SIZE) + 1; hipLaunchKernelGGL(( finish_sampling_kernel_matrix), dim3(num_blocks), dim3(SAMPLING_KERNEL_BLOCK_SIZE), 0, 0, dev_mean, dev_h_dot_product_batch, dev_sample, dev_hbias, n_hidden, dev_curand_states); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); } void RBM_dp_matrix::sample_v_given_h_matrix(DTYPE *h0_sample, DTYPE *v_mean, DTYPE *v_sample, DTYPE *prev_v_sample) { // hipDeviceSynchronize();//sync from sample_h_given_v_cublas // matrix_dot_vector(W, const_n_hidden, const_n_visible, h0_sample, // v_dot_product, true, handle); gemm(h0_sample, #ifdef WEIGHT_MATRIX_PADDING dev_W_cublas, #else dev_W, #endif dev_v_dot_product_batch, batch_size, n_visible, n_hidden, false, false, host_handle); add_bias(v_mean, dev_v_dot_product_batch, dev_vbias, n_visible); save_sample_changes(v_mean, v_sample, prev_v_sample, dev_v_diffs, n_visible #ifndef BIT_CODING , vdirections #endif ); } #ifdef AGG_WARP_FILT //BEGIN: Code from url: "https://devblogs.nvidia.com/parallelforall/cuda-pro-tip-optimized-filtering-warp-aggregated-atomics/" __device__ inline int lane_id(void) { return threadIdx.x % WARP_SIZE; } __device__ int warp_bcast(int v, int leader) { return __shfl(v, leader); } // warp-aggregated atomic increment __device__ int atomicAggInc(int *ctr) { int mask = __ballot(1); // select the leader int leader = __ffs(mask) - 1; // leader does the update int res; if(lane_id() == leader) res = atomicAdd(ctr, __popc(mask)); // broadcast result res = warp_bcast(res, leader); // each thread computes its own value return res + __popc(mask & ((1 << lane_id()) - 1)); } //END //length is the length of each dot product (probably either //n_visible or n_hidden), and padded_length is the length rounded up //to the nearest warp size. //At the end of this function we want the value at diffs[0] to //represent the number of diffs that follow in the array. So we can //just send the diffs ptr to atomicAggInc so that it modifies the //first element accordingly. However, it's still really //complicated, since there are separate aggregations for each diffs //array in a batch happening simultaneously... Warps are //indivisible, but we'd need them to start their relative count //within a subwarp. The easiest way to fix this would be to set the //block size to "length" with "batch_size" blocks. However, this //would not support any layer with more than 1024 elements since //that is the max threadperblock count in CUDA. So to make it //general we round up length to the nearest divisible warp size when //calling this kernel, then perform some extra index shifting and //checking inside the kernel. As it turns out, the easy way to do //this is to allocate a 2D set of threads. // //WARNING: The diffs stored in the array will be the modded (non //batch) index, and this is for use later on in the dpmm kernel. __global__ void save_sample_changes_kernel(DTYPE * mean, DTYPE * sample, DTYPE * prev_sample, int * diffs_batch, int length, hiprandState_t * curand_state_ptr #ifndef BIT_CODING , bool * directions_batch #endif ) { int i = blockDim.x * blockIdx.x + threadIdx.x; int batch_i = blockDim.y * blockIdx.y + threadIdx.y; //Note: i no longer corresponds to the correct index for mean, //sample, and prev_sample due to the padding. The actual thread //indices will be calculated later in the kernel. if((i < length) && (batch_i < const_batch_size)) { //Only activate threads that are within a batch's length //Still need a shift for diffs array (cause it has a different //length) int shifted_i = (batch_i * length) + i; //Now we begin actual calculations DTYPE prev_sample_i = prev_sample[shifted_i]; // DTYPE r = curand_uniform_DTYPE(&curand_state_ptr[shifted_i]); #ifdef USING_DOUBLES DTYPE r = hiprand_uniform_double(&curand_state_ptr[shifted_i]);//double mode #else DTYPE r = hiprand_uniform(&curand_state_ptr[shifted_i]);//float mode #endif DTYPE new_sample_i = r < mean[shifted_i] ? 1.f : 0.f;//Just to be sure it's 1 or 0 sample[shifted_i] = new_sample_i; if(new_sample_i != prev_sample_i) { #ifdef BIT_CODING //Construct even number to indicate default dot product increasing int my_diff_value = 2 * i; if(prev_sample_i != 0.f) { my_diff_value++; } #else int my_diff_value = i; #endif int diffs_shift = batch_i * (length+1); int * diffs = &diffs_batch[diffs_shift]; int my_idx = atomicAggInc(diffs); //+1 to avoid first index (stores size) diffs[my_idx + 1] = my_diff_value; #ifndef BIT_CODING bool * directions = &directions_batch[batch_i * length]; #ifdef USING_DOUBLES directions[my_idx] = (prev_sample_i == 0.0); #else directions[my_idx] = (prev_sample_i == 0.f); #endif #endif } } } #else __global__ void save_sample_changes_kernel(DTYPE * mean, DTYPE * sample, DTYPE * prev_sample, int * diffs_batch, int length, hiprandState_t * curand_state_ptr #ifndef BIT_CODING , bool * directions_batch #endif ) { int i = blockDim.x * blockIdx.x + threadIdx.x; int batch_i = blockDim.y * blockIdx.y + threadIdx.y; //Note: i no longer corresponds to the correct index for mean, //sample, and prev_sample due to the padding. The actual thread //indices will be calculated later in the kernel. if((i < length) && (batch_i < const_batch_size)) { //Only activate threads that are within a batch's length //Still need a shift for diffs array (cause it has a different //length) int shifted_i = (batch_i * length) + i; //Now we begin actual calculations DTYPE prev_sample_i = prev_sample[shifted_i]; // DTYPE r = curand_uniform_DTYPE(&curand_state_ptr[shifted_i]); #ifdef USING_DOUBLES DTYPE r = hiprand_uniform_double(&curand_state_ptr[shifted_i]);//double mode DTYPE new_sample_i = r < mean[shifted_i] ? 1.0 : 0.0;//Just to be sure it's 1 or 0 #else DTYPE r = hiprand_uniform(&curand_state_ptr[shifted_i]);//float mode DTYPE new_sample_i = r < mean[shifted_i] ? 1.f : 0.f;//Just to be sure it's 1 or 0 #endif sample[shifted_i] = new_sample_i; if(new_sample_i != prev_sample_i) { //Construct even number to indicate default dot product increasing #ifdef BIT_CODING int my_diff_value = 2 * i;//instead of shifted_i if(prev_sample_i != 0.0) { //Then changing to negative, so dot product decreases, so //change to odd number (this encodes which direction the //dot product is going without extra memory usage) my_diff_value++; } #else int my_diff_value = i; #endif int diffs_shift = batch_i * (length+1); int * diffs = &diffs_batch[diffs_shift]; // int my_idx = atomicAggInc(diffs); int my_idx = atomicAdd(diffs, 1); //+1 to avoid first index (stores size) diffs[my_idx + 1] = my_diff_value; #ifndef BIT_CODING bool * directions = &directions_batch[batch_i * length]; #ifdef USING_DOUBLES directions[my_idx] = (prev_sample_i == 0.0); #else directions[my_idx] = (prev_sample_i == 0.f); #endif #endif } } } #endif __global__ void reset_diffs_counters(int * diffs, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < const_batch_size) { diffs[i * (length+1)] = 0; } } //Note: sample and prev_sample could be the same pointer. void RBM_dp_matrix::save_sample_changes(DTYPE * mean, DTYPE * sample, DTYPE * prev_sample, int * diffs, int length #ifndef BIT_CODING , bool * directions #endif ) { // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); //Set initial diffs index counter to 0 for each batch hipLaunchKernelGGL(( reset_diffs_counters), dim3(((batch_size - 1) / 32) + 1), dim3(32), 0, 0, diffs, length); //Need to setup some special padding for this kernel // int padded_length = length + (length%WARP_SIZE); // int num_threads = batch_size * padded_length; // cout << "length: " << length << endl; // cout << "padded_length: " << padded_length << endl; // cout << "batch_size: " << batch_size << endl; // num_blocks = ((num_threads - 1) / SAVE_SAMPLE_CHANGES_KERNEL_BLOCK_SIZE) + 1; // cout << "num threads: " << num_threads << endl; // cout << "num_blocks: " << num_blocks << endl; dim3 num_blocks, num_threads; dims_to_num_threads_and_blocks(length, batch_size, num_blocks, num_threads); hipLaunchKernelGGL(( save_sample_changes_kernel), dim3(num_blocks), dim3(num_threads), 0, 0, mean, sample, prev_sample, diffs, length, dev_curand_states #ifndef BIT_CODING ,directions #endif ); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); //Print the changes for testing... // int * host_diffs = new int[batch_size * (length+1)]; // hipMemcpy(host_diffs, diffs, batch_size * (length+1) * sizeof(int), // hipMemcpyDeviceToHost); // int indices[] = {0, 1}; // for(int idx = 0; idx < 2; idx++) { // int * my_diffs = &host_diffs[indices[idx] * (length+1)]; // cout << "idx: " << idx << endl; // for(int i = 0; i < my_diffs[0]+3; i++) { // cout << my_diffs[i] << endl; // } // cout << endl; // } // delete[] host_diffs; } //This function is the same as finish_sampling_kernel except that it //doesn't do the sampling step. //This is useful for the first sample_v_given_h call since cublas //can still be used (requiring the existence of this function to //finish the biasing EFFICIENTLY (global mem coalescence)), but we //want to save changes during the sampling step. //1: Basic vector addition to add bias to mean. //2: Set mean[i] = sigmoid(mean[i]) //PRE: len(mean) == len(sample) == len(bias) == length __global__ void add_bias_kernel(DTYPE * mean, DTYPE * dot_product, DTYPE * bias, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < length * const_batch_size) { int length_i = i % length; #ifdef USING_DOUBLES mean[i] = 1.0 / (1.0 + exp(-(dot_product[i] + bias[length_i]))); #else mean[i] = 1.f / (1.f + exp(-(dot_product[i] + bias[length_i]))); #endif } } void RBM_dp_matrix::add_bias(DTYPE * mean, DTYPE * dot_product, DTYPE * bias, int length) { int num_blocks = (((length*batch_size) - 1) / ADD_BIAS_KERNEL_BLOCK_SIZE) + 1; hipLaunchKernelGGL(( add_bias_kernel), dim3(num_blocks), dim3(ADD_BIAS_KERNEL_BLOCK_SIZE), 0, 0, mean, dot_product, bias, length); // hipDeviceSynchronize(); } //len_diffs is the actual length of each array in diffs_batch __global__ void dpmm_kernel(DTYPE * W, int m, int n, int * diffs_batch, int len_diffs, DTYPE * dot_product_batch, int len_dot_product, bool transpose #ifndef BIT_CODING , bool * directions #endif ) { // extern __shared__ int diffs_batch_sh[]; // __shared__ int result_inc[1024];//TODO int i = blockDim.x * blockIdx.x + threadIdx.x; int batch_i = blockDim.y * blockIdx.y + threadIdx.y; //TODO: Implement a universal diffs transfer to more efficient //memory so that accessing diffs takes less time... if((i < len_dot_product) && (batch_i < const_batch_size)) { //Setup shifted pointers DTYPE * dot_product = &dot_product_batch[batch_i * len_dot_product]; DTYPE result_i_inc = 0.0f; int num_diffs = diffs_batch[batch_i * len_diffs]; // if(num_diffs > len_dot_product) printf("PROBLEM\n"); // if(i < num_diffs+1) {//WARNING: Need to fix potential missing threads // diffs_batch_sh[i] = diffs_batch[(batch_i * len_diffs) + i]; // } int my_base = (batch_i * len_diffs); int my_dir_base = (batch_i * (len_diffs-1)) - 1; for(int diffs_idx = 1; diffs_idx < num_diffs+1; diffs_idx++) { int curr_diff = diffs_batch[my_base + diffs_idx]; // int curr_diff = diffs_batch_sh[diffs_idx]; #ifdef BIT_CODING int diff_value = curr_diff >> 1; bool diff_direction = (curr_diff & 1) == 0; #else int diff_value = curr_diff; bool diff_direction = directions[my_dir_base + diffs_idx]; #endif // int curr_diff = diffs_sh[diffs_idx-1]; int idx; // //ASSERT: m = n_hidden, n = n_visible #ifdef MULTI_WEIGHT_MATRIX #ifdef WEIGHT_MATRIX_PADDING if(transpose) //Then W2 is being used idx = diff_value * (const_pitch2/sizeof(DTYPE)) + i; else idx = diff_value * (const_pitch/sizeof(DTYPE)) + i; #else if(transpose) idx = diff_value * m + i; else idx = diff_value * n + i; #endif #else #ifdef WEIGHT_MATRIX_PADDING if(transpose) idx = i * (const_pitch/sizeof(DTYPE)) + diff_value; else idx = diff_value * (const_pitch/sizeof(DTYPE)) + i; #else if(transpose) idx = i * n + diff_value; else idx = diff_value * n + i; #endif #endif // result_i_inc += W[idx] * (curr_diff % 2 == 0 ? 1.0 : -1.0); // if(diff_direction) // result_i_inc += W[idx]; // else // result_i_inc -= W[idx]; result_i_inc += W[idx] * (diff_direction ? 1 : -1); // result_inc[i] += W[idx] * (diff_direction ? 1 : -1); } dot_product[i] += result_i_inc; } } #ifndef USE_DPMM //num blocks in y dimension, must be >=3 for float to limit shared memory, >=6 for doubles (probably) #define DPVM_Y_NUM_BLOCKS 3 //This determines the number of blocks in the z dimension. //Increasing yields more parallelism, but less shared memory reuse. #define NUM_BATCH_BLOCKS 16 __global__ void dpvm_kernel(DTYPE * W, int m, int n, int * diffs_batch, int len_diffs, DTYPE * dot_product_batch, int len_dot_product, bool transpose #ifndef BIT_CODING , bool * directions_batch #endif ) { __shared__ float block_results[WARP_SIZE]; __shared__ DTYPE W_sh[WARP_SIZE * (784 / DPVM_Y_NUM_BLOCKS)];//diffs index the W matrix's rows (len_diffs-1), TODO: upgrade to dynamic to fix int W_col_idx = blockDim.x * blockIdx.x + threadIdx.x;//Used for weight matrix accessing //Transfer all W data to shared memory (can this be upgraded to conditional transfer if used?). int num_W_rows_per_block = (len_diffs - 1) / DPVM_Y_NUM_BLOCKS; for(int i_iter = 0; i_iter < num_W_rows_per_block; i_iter++) { int i = blockIdx.y * num_W_rows_per_block + i_iter;//i is the row if((i < len_diffs - 1) && (W_col_idx < len_dot_product)) { W_sh[i_iter * WARP_SIZE + threadIdx.x] = W[i * (transpose ? m:n) + W_col_idx]; } } // __syncthreads(); // int y = threadIdx.y; // int block_results_idx = threadIdx.y * blockDim.x + threadIdx.x;//Used later as well int block_results_idx = threadIdx.x;//Used later as well //setup ptrs for specific batch and iterate int num_batch_els = const_batch_size / NUM_BATCH_BLOCKS; //an iter (batch_iter) is the iteration index we're on, the index (batch_i) is the batch element! Important distinction here for(int batch_iter = 0; batch_iter < num_batch_els; batch_iter++) { int batch_i = blockIdx.z * num_batch_els + batch_iter; int * diffs = &diffs_batch[len_diffs * batch_i]; DTYPE * dot_product = &dot_product_batch[len_dot_product * batch_i]; bool * directions = &directions_batch[(len_diffs-1) * batch_i]; //Reset the results shared mem block_results[block_results_idx] = 0.f; //TODO: Implement a universal diffs transfer to more efficient //memory so that accessing diffs takes less time... if(W_col_idx < len_dot_product) {//TODO: consider moving up //Setup shifted pointers int num_diffs = diffs[0]; // int num_diffs_iters = ((num_diffs-1) / DPVM_Y_BLOCK_SIZE) + 1 + 1; //+1 for normal blocking calculation, +1 for diffs array shift for(int diffs_idx = 1; diffs_idx < num_diffs+1; diffs_idx++) { // int diffs_idx = diffs_base_idx + threadIdx.y; // if(diffs_idx - 1 < num_diffs) { int curr_diff = diffs[diffs_idx]; #ifdef BIT_CODING int diff_value = curr_diff >> 1; bool diff_direction = (curr_diff & 1) == 0; #else int diff_value = curr_diff; bool diff_direction = directions[diffs_idx-1]; #endif //Lots of unnecessary checks, but whatevs if((blockIdx.y * num_W_rows_per_block <= diff_value) && (diff_value < (blockIdx.y+1) * num_W_rows_per_block)) { // int curr_diff = diffs_sh[diffs_idx-1]; int idx; // //ASSERT: m = n_hidden, n = n_visible #ifdef MULTI_WEIGHT_MATRIX #ifdef WEIGHT_MATRIX_PADDING if(transpose) //Then W2 is being used idx = diff_value * (const_pitch2/sizeof(DTYPE)) + i; else idx = diff_value * (const_pitch/sizeof(DTYPE)) + i; #else // if(transpose) // idx = diff_value * m + i; // else // idx = diff_value * n + i; // if(transpose) idx = (diff_value - (blockIdx.y * num_W_rows_per_block)) * WARP_SIZE + threadIdx.x; // else // idx = diff_value * WARP_SIZE #endif #else #ifdef WEIGHT_MATRIX_PADDING if(transpose) idx = i * (const_pitch/sizeof(DTYPE)) + diff_value; else idx = diff_value * (const_pitch/sizeof(DTYPE)) + i; #else if(transpose) idx = i * n + diff_value; else idx = diff_value * n + i; #endif #endif // result_i_inc += W[idx] * (diff_direction ? 1 : -1); // block_results[block_results_idx] += W[idx] * (diff_direction ? 1 : -1); block_results[block_results_idx] += W_sh[idx] * (diff_direction ? 1 : -1); // } } } atomicAdd(&dot_product[W_col_idx], block_results[block_results_idx]); // __syncthreads(); // if(threadIdx.y == 0) { /* for(int k = 0; k < DPVM_Y_BLOCK_SIZE; k++) { dot_product[i] += block_results[k * blockDim.x + threadIdx.x]; }//incrementing in shared memory isn't faster, I guess it's not a bottleneck. */ /* for(int k = 1; k < DPVM_Y_BLOCK_SIZE; k++) { block_results[threadIdx.x] += block_results[k * blockDim.x + threadIdx.x]; } dot_product[W_col_idx] += block_results[threadIdx.x]; */ // } // __syncthreads();//blocks could race ahead and reset block_results before being read } /* //Parallel reduce to first row of shared memory for the block int num_reducing = DPVM_Y_BLOCK_SIZE / 2; int access_shift = 1; while(num_reducing != 0) { if(threadIdx.y % (DPVM_Y_BLOCK_SIZE / num_reducing) == 0) block_results[block_results_idx] += block_results[block_results_idx + (access_shift * blockDim.x)]; num_reducing /= 2; access_shift *= 2; __syncthreads(); } //Send to global memory if(threadIdx.y == 0) dot_product[i] += block_results[threadIdx.x]; */ } } #endif void RBM_dp_matrix::dpmm(int m, int n, int * diffs, DTYPE * dot_product, bool transpose #ifndef BIT_CODING , bool * directions #endif ) { // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); //Figure out the correct length of the dot_product based on transpose: int len_dot_product = transpose ? m : n; int len_diffs = transpose ? n+1 : m+1;//diffs arrays are longer // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); #ifdef MULTI_WEIGHT_MATRIX DTYPE * corrected_W = transpose ? dev_W2 : dev_W; #else DTYPE * corrected_W = dev_W; #endif #ifdef USE_DPMM dim3 num_blocks(getNumBlocks(len_dot_product, WARP_SIZE), getNumBlocks(batch_size, DPMM_Y_BLOCK_SIZE)); dim3 num_threads(WARP_SIZE, DPMM_Y_BLOCK_SIZE); hipLaunchKernelGGL(( dpmm_kernel), dim3(num_blocks), dim3(num_threads), 0, 0, corrected_W, m, n, diffs, len_diffs, dot_product, len_dot_product, transpose #ifndef BIT_CODING , directions #endif ); #else // int num_blocks = getNumBlocks(len_dot_product, WARP_SIZE); // int num_threads = WARP_SIZE; dim3 num_blocks(getNumBlocks(len_dot_product, WARP_SIZE), DPVM_Y_NUM_BLOCKS, NUM_BATCH_BLOCKS); dim3 num_threads(WARP_SIZE, 1, 1);//only 1 z thd, since it will iterate hipLaunchKernelGGL(( dpvm_kernel), dim3(num_blocks), dim3(num_threads), 0, 0, corrected_W, m, n, diffs, len_diffs, dot_product, len_dot_product, transpose #ifndef BIT_CODING , directions #endif ); /* dim3 num_blocks(getNumBlocks(len_dot_product, WARP_SIZE), 1); dim3 num_threads(WARP_SIZE, DPVM_Y_BLOCK_SIZE); for(int i = 0; i < batch_size; i++) { hipLaunchKernelGGL(( dpvm_kernel), dim3(num_blocks), dim3(num_threads), 0, 0, corrected_W, m, n, diffs + (len_diffs * i), len_diffs, dot_product + (len_dot_product * i), len_dot_product, transpose #ifndef BIT_CODING , directions + (i * (len_diffs - 1)) #endif ); } */ #endif // hipDeviceSynchronize(); // double t2 = get_wall_time(); // cout << "dpmm microseconds: " << (t2-t1) * 1000000 << endl; //doubles // dpmm: 0.000123024 // dpmm: 0.000359058 //floats // dpmm: 0.00114083 <<< 10X slower? // dpmm: 0.000381947 // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); } //PRE: prev_sample should be the last sample array that was sent // into a sample_h_given_v* call. //TODO: This one is taking a long time (around 0.9-1.4 ms) // s_v_g_h_delta only takes 0.38-0.39ms // Room for improvement! void RBM_dp_matrix::sample_h_given_v_delta(DTYPE * v0_sample, DTYPE * h_mean, DTYPE * h_sample, DTYPE * prev_h_sample) { // cerr << "sample_h_given_v_delta\n"; // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); //This code performs a column analysis and prints if there's a column with no diffs. int * v_diffs = new int[(n_visible+1) * batch_size]; hipMemcpy(v_diffs, dev_v_diffs, (n_visible+1) * batch_size * sizeof(int), hipMemcpyDeviceToHost); bool * v_checked = new bool[n_visible]; for(int i = 0; i < n_visible; i++) { v_checked[i] = true; } for(int row = 0; row < batch_size; row++) { int num_diffs = v_diffs[row * (n_visible+1)]; // cout << "num_diffs = " << num_diffs << endl; for(int i = 1; i < num_diffs+1; i++) { int col = v_diffs[row * (n_visible+1) + i]; v_checked[col] = false; } } int num_cols_empty = 0; for(int i = 0; i < n_visible; i++) { if(v_checked[i]) { num_cols_empty++; // cout << i << " is EMPTY" << endl; } } // cout << "percent cols empty = " << (num_cols_empty / ((float) n_visible)) << endl; delete[] v_diffs; delete[] v_checked; // int num_diffs_first_v; // hipMemcpy(&num_diffs_first_v, dev_v_diffs, sizeof(int), hipMemcpyDeviceToHost); // cout << "v percent diff: " << (num_diffs_first_v/((float)n_visible)) << endl; // cout << ": " << (num_diffs_first_v/((float)n_visible)) << endl; dpmm(n_hidden, n_visible, dev_v_diffs, dev_h_dot_product_batch, true #ifndef BIT_CODING , vdirections #endif ); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); add_bias(h_mean, dev_h_dot_product_batch, dev_hbias, n_hidden); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); save_sample_changes(h_mean, h_sample, prev_h_sample, dev_h_diffs, n_hidden #ifndef BIT_CODING , hdirections #endif ); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); } //PRE: prev_sample should be the last sample array that was sent // into a sample_v_given_h* call. // All paramters should be device allocated. void RBM_dp_matrix::sample_v_given_h_delta(DTYPE * h0_sample, DTYPE * v_mean, DTYPE * v_sample, DTYPE * prev_v_sample) { // cerr << "sample_v_given_h_delta\n"; // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); //TODO: Fix MULTI_WEIGHT_MATRIX // cerr << "n_hidden: " << n_hidden << endl; // cerr << "n_visible: " << n_visible << endl; // int num_diffs_first_h; // hipMemcpy(&num_diffs_first_h, dev_h_diffs, sizeof(int), hipMemcpyDeviceToHost); // cout << "h percent diff: " << (num_diffs_first_h/((float)n_hidden)) << endl; dpmm(n_hidden, n_visible, dev_h_diffs, dev_v_dot_product_batch, false #ifndef BIT_CODING , hdirections #endif ); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); add_bias(v_mean, dev_v_dot_product_batch, dev_vbias, n_visible); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); save_sample_changes(v_mean, v_sample, prev_v_sample, dev_v_diffs, n_visible #ifndef BIT_CODING , vdirections #endif ); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); } // #ifdef MULTI_WEIGHT_MATRIX // __global__ // void write_matrix_transpose(DTYPE * W, DTYPE * W2) { // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; // __shared__ DTYPE my_tile[MAX_THREAD_SQUARE_EDGE][MAX_THREAD_SQUARE_EDGE]; // if((x < const_n_visible) && (y < const_n_hidden)) { // my_tile[threadIdx.y][threadIdx.x] = W[y * const_n_visible + x]; // } // __syncthreads(); // if((y < const_n_visible) && (x < const_n_hidden)) { // } // } // __global__ void write_matrix_transpose(float *W, const float *W2) // { // __shared__ float tile[MAX_THREAD_SQUARE_EDGE][MAX_THREAD_SQUARE_EDGE+1]; // int x = blockIdx.x * MAX_THREAD_SQUARE_EDGE + threadIdx.x; // int y = blockIdx.y * MAX_THREAD_SQUARE_EDGE + threadIdx.y; // int width = gridDim.x * MAX_THREAD_SQUARE_EDGE; // for (int j = 0; j < MAX_THREAD_SQUARE_EDGE; j += BLOCK_ROWS) // tile[(threadIdx.y+j)*MAX_THREAD_SQUARE_EDGE + threadIdx.x] = W[(y+j)*width + x]; // __syncthreads(); // for (int j = 0; j < MAX_THREAD_SQUARE_EDGE; j += BLOCK_ROWS) // W2[(y+j)*width + x] = tile[(threadIdx.y+j)*MAX_THREAD_SQUARE_EDGE + threadIdx.x]; // } // #endif // #define TILE_DIM 32 //PRE: src has const_n_hidden rows and const_n_visible columns. // dest is allocated with const_n_hidden * const_n_visible elements //POST: Writes the tranpose of src to dest. // __global__ // void write_matrix_transpose(DTYPE * src, DTYPE * dest) { // __shared__ float tile[MAX_THREAD_SQUARE_EDGE][MAX_THREAD_SQUARE_EDGE]; // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; // if((x < const_n_visible) && (y < const_n_hidden)) { // tile[threadIdx.y][threadIdx.x] = src[y*const_n_visible + x]; // } // __syncthreads(); // x = blockIdx.y * blockDim.y + threadIdx.x; // transpose block offset // y = blockIdx.x * blockDim.x + threadIdx.y; // if((x < const_n_hidden) && (y < const_n_visible)) { // dest[y*const_n_hidden + x] = tile[threadIdx.x][threadIdx.y]; // } // } void RBM_dp_matrix::gibbs_hvh_delta(DTYPE *h0_sample, DTYPE *nv_means, DTYPE *nv_samples, DTYPE *nh_means, DTYPE *nh_samples) { sample_v_given_h_delta(h0_sample , nv_means, nv_samples, nv_samples); sample_h_given_v_delta(nv_samples, nh_means, nh_samples, nh_samples); } DTYPE abs(DTYPE d) { if(d < 0) return -d; return d; } void compare_W_W2(DTYPE * dev_W, DTYPE * dev_W2, int n_visible, int n_hidden) { DTYPE * W = new DTYPE[n_visible * n_hidden]; DTYPE * W2 = new DTYPE[n_visible * n_hidden]; hipMemcpy(W, dev_W, sizeof(DTYPE) * n_visible * n_hidden, hipMemcpyDeviceToHost); hipMemcpy(W2, dev_W2, sizeof(DTYPE) * n_visible * n_hidden, hipMemcpyDeviceToHost); DTYPE total_diff = 0.0; for(int row = 0; row < n_hidden; row++) { for(int col = 0; col < n_visible; col++) { total_diff += abs(W[row * n_visible + col] - W2[col*n_hidden + row]); } } cout << "W diff: " << total_diff << endl; delete[] W; delete[] W2; } void RBM_dp_matrix::contrastive_divergence(int curr_i, DTYPE lr, DTYPE wc, DTYPE * dev_data) { // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); //TODO: Upgrade to use the multi-weight matrix functionality // reset_d_arrays(); DTYPE * dev_input = &dev_data[data_num_cols * (curr_i*batch_size)]; sample_h_given_v_matrix(dev_input , dev_ph_mean_batch , dev_ph_sample_batch); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); sample_v_given_h_matrix(dev_ph_sample_batch, dev_nv_means_batch, dev_nv_samples_batch, dev_input);//include dev_input as the //previous sample // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); sample_h_given_v_delta(dev_nv_samples_batch, dev_nh_means_batch, dev_nh_samples_batch, dev_ph_sample_batch); for(int step = 1; step < k; step++) { // sample_v_given_h_delta(dev_nh_samples_batch, dev_nv_means_batch, dev_nv_samples_batch, // dev_nv_samples_batch); // cerr << "sample_h_given_v_delta\n"; // sample_h_given_v_delta(dev_nv_samples_batch, dev_nh_means_batch, dev_nh_samples_batch, // dev_nh_samples_batch); // cerr << "gibbs_hvh_delta\n"; gibbs_hvh_delta(dev_nh_samples_batch, dev_nv_means_batch, dev_nv_samples_batch, dev_nh_means_batch, dev_nh_samples_batch); } // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); int most_nodes = max(n_hidden, n_visible); dim3 num_blocks, num_threads; dims_to_num_threads_and_blocks(n_visible, n_hidden, num_blocks, num_threads); // #ifdef SIMULTANEOUS_EXECUTION // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); // GET_TIME(t1); //MULTI_WEIGHT_MATRIX only adds 0.00035 seconds extra time onto // what was originally 0.00195 seconds. (GTX 980 test) hipLaunchKernelGGL(( write_weights) , dim3(num_blocks), dim3(num_threads), 0, 0, dev_data, dev_W, #ifdef MULTI_WEIGHT_MATRIX dev_W2, #endif lr, wc, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, dev_hbias, dev_vbias, dev_dhbias, dev_dvbias, data_num_rows, data_num_cols, curr_i); // #ifdef MULTI_WEIGHT_MATRIX // write_transpose<<< num_blocks, num_threads>>> (dev_W, dev_W2); // #endif #ifdef MULTI_WEIGHT_MATRIX #ifdef EFFICIENT_TRANSPOSE dim3 dimGrid((n_visible-1)/MAX_THREAD_SQUARE_EDGE+1, (n_hidden-1)/MAX_THREAD_SQUARE_EDGE+1); dim3 dimBlock(MAX_THREAD_SQUARE_EDGE, BLOCK_ROWS); //Need to update W2 efficiently // dims_to_num_threads_and_blocks(most_nodes, most_nodes, num_blocks, num_threads); #ifdef WEIGHT_MATRIX_PADDING hipLaunchKernelGGL(( write_matrix_transpose_pitch), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_W, dev_W2); #else hipLaunchKernelGGL(( write_matrix_transpose), dim3(dimGrid), dim3(dimBlock), 0, 0, dev_W, dev_W2); #endif #endif #endif #ifdef WEIGHT_MATRIX_PADDING //Cant go device to device, so have to go hacky and copy it out //to main memory then back CUDA_CHECK(hipMemcpy2D(WArray, n_visible * sizeof(DTYPE), dev_W, pitch, n_visible * sizeof(DTYPE), n_hidden, hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(dev_W_cublas, WArray, n_visible * n_hidden * sizeof(DTYPE), hipMemcpyHostToDevice)); // CUDA_CHECK(hipMemcpy2D(dev_W_cublas, n_visible * sizeof(DTYPE), dev_W, pitch, // n_visible * sizeof(DTYPE), n_hidden * sizeof(DTYPE), // hipMemcpyDeviceToDevice)); #endif // compare_W_W2(dev_W, dev_W2, n_visible, n_hidden); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); // GET_TIME(t2); // cerr << "weight matrix update time: " << get_duration(t1, t2) << endl; int num_bias_blocks = 1 + ((most_nodes-1) / WRITE_BIAS_KERNEL_BLOCK_SIZE); int num_bias_threads = WRITE_BIAS_KERNEL_BLOCK_SIZE; hipLaunchKernelGGL(( write_bias_results_to_memory) , dim3(num_bias_blocks), dim3(num_bias_threads), 0, 0, dev_data, lr, wc, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, dev_hbias, dev_vbias, dev_dhbias, dev_dvbias, data_num_rows, data_num_cols, curr_i); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); // GET_TIME(k2_t2); // cerr << "k2 time: " << get_duration(k2_t1, k2_t2) << endl; } void RBM_dp_matrix::reset_d_arrays() { //Since dW_pitch is the width of the dev_dW array rows, we //multiply by the number of rows (n_hidden) to get the number of //bytes to reset: // CUDA_CHECK(hipMemset(dev_dhbias, 0, n_hidden * sizeof(DTYPE))); // CUDA_CHECK(hipMemset(dev_dvbias, 0, n_visible * sizeof(DTYPE))); // CUDA_CHECK(hipMemset(dev_ph_mean_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(hipMemset(dev_nv_means_batch , 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(hipMemset(dev_nh_means_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(hipMemset(dev_ph_sample_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(hipMemset(dev_nv_samples_batch, 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(hipMemset(dev_nh_samples_batch, 0, sizeof(DTYPE) * n_hidden * batch_size)); } void RBM_dp_matrix::allocate_special_memory() { // data = new DTYPE[data_num_rows * data_num_cols]; // for(int i = 0; i < data_num_rows * data_num_cols; i++) { // data[i] = (DTYPE) int_data[i]; // } // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); CUDA_CHECK(hipMalloc((void**)&dev_ph_sample_batch , sizeof(DTYPE) * n_hidden * batch_size)); CUDA_CHECK(hipMalloc((void**)&dev_nv_samples_batch, sizeof(DTYPE) * n_visible * batch_size)); CUDA_CHECK(hipMalloc((void**)&dev_nh_samples_batch, sizeof(DTYPE) * n_hidden * batch_size)); #ifdef WEIGHT_MATRIX_PADDING CUDA_CHECK(hipMallocPitch((void**)&dev_W, &pitch, n_visible * sizeof(DTYPE), n_hidden)); //Copy pitch to const memory hipMemcpyToSymbol(const_pitch, &pitch , sizeof(size_t)); matrixToArray(W, WArray , n_hidden, n_visible); CUDA_CHECK(hipMemcpy2D(dev_W, pitch, WArray, n_visible * sizeof(DTYPE), n_visible * sizeof(DTYPE), n_hidden, hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc((void**)&dev_W_cublas, n_hidden * n_visible * sizeof(DTYPE))); // CUDA_CHECK(hipMemcpy2D(dev_W_cublas, n_visible * sizeof(DTYPE), dev_W, pitch, // n_visible * sizeof(DTYPE), n_hidden * sizeof(DTYPE), // hipMemcpyDeviceToDevice)); CUDA_CHECK(hipMemcpy(dev_W_cublas, WArray, n_hidden * n_visible * sizeof(DTYPE), hipMemcpyHostToDevice)); #else CUDA_CHECK(hipMalloc((void**)&dev_W , n_hidden * n_visible * sizeof(DTYPE))); matrixToArray (W, WArray , n_hidden, n_visible); CUDA_CHECK(hipMemcpy(dev_W, WArray, n_hidden * n_visible * sizeof(DTYPE), hipMemcpyHostToDevice)); #endif //Allocate transpose(s) #ifdef MULTI_WEIGHT_MATRIX #ifdef WEIGHT_MATRIX_PADDING CUDA_CHECK(hipMallocPitch((void**)&dev_W2, &pitch2, n_hidden * sizeof(DTYPE), n_visible)); hipMemcpyToSymbol(const_pitch2, &pitch2 , sizeof(size_t)); matrixToArrayTrans(W, WArray2, n_hidden, n_visible); CUDA_CHECK(hipMemcpy2D(dev_W2, pitch2, WArray2, n_hidden * sizeof(DTYPE), n_hidden * sizeof(DTYPE), n_visible, hipMemcpyHostToDevice)); #else CUDA_CHECK(hipMalloc((void**)&dev_W2, n_hidden * n_visible * sizeof(DTYPE))); matrixToArrayTrans(W, WArray2, n_hidden, n_visible); CUDA_CHECK(hipMemcpy(dev_W2, WArray2, n_hidden * n_visible * sizeof(DTYPE), hipMemcpyHostToDevice)); #endif #endif // CUDA_CHECK(hipMalloc((void**)&dev_data, // data_num_rows * data_num_cols * sizeof(DTYPE))); // CUDA_CHECK(hipMemcpy(dev_data, data, data_num_rows * data_num_cols * sizeof(DTYPE), // hipMemcpyHostToDevice)); CUDA_CHECK(hipMalloc((void**)&dev_h_dot_product_batch, sizeof(DTYPE) * n_hidden * batch_size)); CUDA_CHECK(hipMalloc((void**)&dev_v_dot_product_batch, sizeof(DTYPE) * n_visible * batch_size)); // hipDeviceSynchronize(); // CUDA_CHECK(hipGetLastError()); } void RBM_dp_matrix::copy_matrices_to_host() { #ifdef WEIGHT_MATRIX_PADDING CUDA_CHECK(hipMemcpy2D(WArray, n_visible * sizeof(DTYPE), dev_W, pitch, n_visible * sizeof(DTYPE), n_hidden, hipMemcpyDeviceToHost)); #else CUDA_CHECK(hipMemcpy(WArray, dev_W, n_hidden * n_visible * sizeof(DTYPE), hipMemcpyDeviceToHost)); #endif arrayToMatrix(WArray, W, n_hidden, n_visible); CUDA_CHECK(hipMemcpy(vbias, dev_vbias, n_visible * sizeof(DTYPE), hipMemcpyDeviceToHost)); CUDA_CHECK(hipMemcpy(hbias, dev_hbias, n_hidden * sizeof(DTYPE), hipMemcpyDeviceToHost)); #ifdef DEBUG DTYPE * nh_samples = new DTYPE[n_hidden * batch_size]; DTYPE * nv_samples = new DTYPE[n_visible * batch_size]; DTYPE * nh_means = new DTYPE[n_hidden * batch_size]; DTYPE * nv_means = new DTYPE[n_visible * batch_size]; DTYPE * ph_means = new DTYPE[n_hidden * batch_size]; DTYPE * ph_samples = new DTYPE[n_visible * batch_size]; hipMemcpy(nh_samples, dev_nh_samples_batch, n_hidden * batch_size * sizeof(DTYPE), hipMemcpyDeviceToHost); hipMemcpy(nv_samples, dev_nv_samples_batch, n_visible * batch_size * sizeof(DTYPE), hipMemcpyDeviceToHost); hipMemcpy(nh_means, dev_nh_means_batch, n_hidden * batch_size * sizeof(DTYPE), hipMemcpyDeviceToHost); hipMemcpy(nv_means, dev_nv_means_batch, n_visible * batch_size * sizeof(DTYPE), hipMemcpyDeviceToHost); hipMemcpy(ph_samples, dev_ph_sample_batch, n_hidden * batch_size * sizeof(DTYPE), hipMemcpyDeviceToHost); hipMemcpy(ph_means , dev_ph_mean_batch, n_hidden * batch_size * sizeof(DTYPE), hipMemcpyDeviceToHost); int count = 5; cout << "nh_samples:\n"; for(int i = 0; i < count; i++) { cout << nh_samples[i] << endl; } cout << "...\n"; cout << "nv_samples:\n"; for(int i = 0; i < count; i++) { cout << nv_samples[i] << endl; } cout << "...\n"; cout << "nh_means:\n"; for(int i = 0; i < count; i++) { cout << nh_means[i] << endl; } cout << "...\n"; cout << "nv_means:\n"; for(int i = 0; i < count; i++) { cout << nv_means[i] << endl; } cout << "...\n"; cout << "ph_samples:\n"; for(int i = 0; i < count; i++) { cout << ph_samples[i] << endl; } cout << "...\n"; cout << "ph_means:\n"; for(int i = 0; i < count; i++) { cout << ph_means[i] << endl; } cout << "...\n"; delete[] nh_samples; delete[] nv_samples; delete[] nh_means; delete[] nv_means; delete[] ph_means; delete[] ph_samples; #endif } RBM_dp_matrix::~RBM_dp_matrix() { hipblasDestroy(host_handle); #ifdef SAVE_WEIGHTS saveWeightMatrix(); #endif hipFree(dev_h_diffs); hipFree(dev_v_diffs); hipFree(dev_h_dot_product_batch); hipFree(dev_v_dot_product_batch); // destroy_cublas_handle<<<1,1>>>(dev_handle); // hipFree(dev_handle); #ifdef MULTI_WEIGHT_MATRIX delete[] WArray2; hipFree(dev_W2); #endif #ifdef WEIGHT_MATRIX_PADDING hipFree(dev_W_cublas); #endif #ifndef BIT_CODING hipFree(hdirections); hipFree(vdirections); #endif } }
b43cc7a53c92123e1278b78fb8ee0b49a55905b1.cu
#include <iostream> #include "../include/rbm_dp_matrix.h" #include "../include/rbm_baseline.h" #include "../include/utils.h" #include "../include/cuda_utils.h" #include "../include/write_weights_kernel.h" #include "cublas_v2.h" #include "../include/constants.h" #include <stdio.h> #include <climits> //Needed for INT_MAX #include <curand_kernel.h> #include <vector> using namespace std; using namespace utils; //with weight matrix padding and matrix transpose, things are complicated //pitch and pitch2 are the pitches of dev_W and dev_W2. //dev_W_cublas is the non-pitched memory for cublas to use. //write_matrix_transpose_pitch is the transpose writer kernel for pitch matrices. // #define DEBUG #define DPMM_Y_BLOCK_SIZE 16 #define SAMPLING_KERNEL_BLOCK_SIZE 1024 #define ADD_BIAS_KERNEL_BLOCK_SIZE 1024 #define WRITE_BIAS_KERNEL_BLOCK_SIZE 64 //Future work: Check which weight matrix to send to cublas. namespace dp_matrix { //Need to declare cublas library object for device // __device__ cublasHandle_t dev_cublas_handle; //Pre: All arrays/matrices are in row-major format. // A, B, C must be CUDA device pointers. // All other parameters are host data (including the handle). // A and B should contain data. // The data in C has no effect on the program result. // A is m rows and k columns // B is k rows and n columns // C is m rows and n columns // These dimensions are the dimensions AFTER the specified // transposition has been done. They are essentially the 3 // dimensions specifying the multiplication that will actually // happen. // transA is whether matrix A should be transposed. // transB is whether matrix B should be transposed. // handle is the initialized cublas handle. //Post: Sets C to the result of the matrix operation A.B, where A and // B have been transposed as specified by transA and transB. void gemm(DTYPE * A, DTYPE * B, DTYPE * C, int m, int n, int k, bool transA, bool transB, cublasHandle_t & handle) { //This function has a lot of nit-picky complexity due to the column //major formatting required by cublas input. //Basically, A and B are being switched in order to get the //transposed output of C in row major format. This requires an extra //transpose, which cancels out the transpose that was needed to //convert from row major to column major. The leading dimensions //also conditionally change.... DTYPE alpha = 1.0; DTYPE beta = 0.0; int lda = transA ? m : k; int ldb = transB ? k : n; int ldc = n;//lol not mistake cublasStatus_t status; // cudaDeviceSynchronize(); // double t1 = get_wall_time(); //Need B.A in order to get transposed result in C. #ifdef USING_DOUBLES // cublas_gemm: 0.000730038 // cublas_gemm: 0.000989199 status = cublasDgemm(handle, transB ? CUBLAS_OP_T : CUBLAS_OP_N, transA ? CUBLAS_OP_T : CUBLAS_OP_N, n, m, k, &alpha, B, ldb, A, lda, &beta, C, ldc); #else // cublas_gemm: 0.000174046 // cublas_gemm: 0.000134945 status = cublasSgemm(handle, transB ? CUBLAS_OP_T : CUBLAS_OP_N, transA ? CUBLAS_OP_T : CUBLAS_OP_N, n, m, k, &alpha, B, ldb, A, lda, &beta, C, ldc); #endif // cudaDeviceSynchronize(); // double t2 = get_wall_time(); // cout << "cublas_gemm: " << (t2-t1) << endl; if(status != CUBLAS_STATUS_SUCCESS) { cerr << "gemm error\n"; if(status == CUBLAS_STATUS_NOT_INITIALIZED) { cerr << "CUBLAS_STATUS_NOT_INITIALIZED\n"; } else if(status == CUBLAS_STATUS_ALLOC_FAILED) { cerr << "CUBLAS_STATUS_ALLOC_FAILED\n"; } else if(status == CUBLAS_STATUS_INVALID_VALUE) { cerr << "CUBLAS_STATUS_INVALID_VALUE\n"; } else if(status == CUBLAS_STATUS_ARCH_MISMATCH) { cerr << "CUBLAS_STATUS_ARCH_MISMATCH\n"; } else if(status == CUBLAS_STATUS_MAPPING_ERROR) { cerr << "CUBLAS_STATUS_MAPPING_ERROR\n"; } else if(status == CUBLAS_STATUS_EXECUTION_FAILED) { cerr << "CUBLAS_STATUS_EXECUTION_FAILED\n"; } else if(status == CUBLAS_STATUS_INTERNAL_ERROR) { cerr << "CUBLAS_STATUS_INTERNAL_ERROR\n"; } else if(status == CUBLAS_STATUS_NOT_SUPPORTED) { cerr << "CUBLAS_STATUS_NOT_SUPPORTED\n"; } else if(status == CUBLAS_STATUS_LICENSE_ERROR) { cerr << "CUBLAS_STATUS_LICENSE_ERROR\n"; } else { cerr << "Unknown CUBLAS error\n"; } } } RBM_dp_matrix::RBM_dp_matrix(int size, int n_v, int n_h, int b_size, int k, DTYPE **w, DTYPE *hb, DTYPE *vb, int data_num_rows, int data_num_cols) : baseline::RBM(size, n_v, n_h, b_size, k, w, hb, vb, data_num_rows, data_num_cols) { // cudaMalloc((void**) &dev_handle, sizeof(cublasHandle_t)); // cudaDeviceSetCacheConfig(cudaFuncCachePreferShared); cublasCreate(&host_handle); // init_cublas_handle<<<1,1>>>(dev_handle); // if(stream != NULL) { // init_cublas_handle_stream<<<1,1>>>(dev_handle, *stream); // } #ifdef MULTI_WEIGHT_MATRIX cout << "Using MULTI_WEIGHT_MATRIX" << endl; if(w == NULL) { WArray2 = new DTYPE[n_hidden * n_visible]; } #endif #ifndef BIT_CODING CUDA_CHECK(cudaMalloc((void**)&hdirections, batch_size * n_hidden * sizeof(bool))); CUDA_CHECK(cudaMalloc((void**)&vdirections, batch_size * n_visible * sizeof(bool))); #endif cout << "RBM_dp_matrix constructor" << endl; CUDA_CHECK(cudaMalloc((void**)&dev_h_diffs, batch_size * (n_hidden + 1) * sizeof(int))); CUDA_CHECK(cudaMalloc((void**)&dev_v_diffs, batch_size * (n_visible + 1) * sizeof(int))); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); // cudaDeviceSetSharedMemConfig(cudaSharedMemBankSizeEightByte); } //========================================================= //Primary RBM calculation methods and kernels //========================================================= __global__ void finish_sampling_kernel_matrix(DTYPE * mean, DTYPE * dot_product, DTYPE * sample, DTYPE * bias, int length, curandState_t * curand_states) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < length * const_batch_size) { int hidden_idx = i % length; #ifdef USING_DOUBLES DTYPE mean_i = 1.0 / (1.0 + exp(-(dot_product[i] + bias[hidden_idx]))); #else DTYPE mean_i = 1.f / (1.f + exp(-(dot_product[i] + bias[hidden_idx]))); #endif mean[i] = mean_i; #ifdef USING_DOUBLES DTYPE r = curand_uniform_double(&curand_states[i]);//double mode #else DTYPE r = curand_uniform(&curand_states[i]);//float mode #endif sample[i] = r < mean_i; } } //Average time: 0.000766 //Matrix based version of sample_h_given_v. Basically does //sample_h_given_v for all batch elements. void RBM_dp_matrix::sample_h_given_v_matrix(DTYPE *dev_v0_sample, DTYPE *dev_mean, DTYPE *dev_sample) { // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); gemm(dev_v0_sample, #ifdef WEIGHT_MATRIX_PADDING dev_W_cublas, #else dev_W, #endif dev_h_dot_product_batch, batch_size, n_hidden, n_visible, false, true, host_handle); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); int num_blocks = ((n_hidden * batch_size - 1) / SAMPLING_KERNEL_BLOCK_SIZE) + 1; finish_sampling_kernel_matrix<<<num_blocks, SAMPLING_KERNEL_BLOCK_SIZE>>> (dev_mean, dev_h_dot_product_batch, dev_sample, dev_hbias, n_hidden, dev_curand_states); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); } void RBM_dp_matrix::sample_v_given_h_matrix(DTYPE *h0_sample, DTYPE *v_mean, DTYPE *v_sample, DTYPE *prev_v_sample) { // cudaDeviceSynchronize();//sync from sample_h_given_v_cublas // matrix_dot_vector(W, const_n_hidden, const_n_visible, h0_sample, // v_dot_product, true, handle); gemm(h0_sample, #ifdef WEIGHT_MATRIX_PADDING dev_W_cublas, #else dev_W, #endif dev_v_dot_product_batch, batch_size, n_visible, n_hidden, false, false, host_handle); add_bias(v_mean, dev_v_dot_product_batch, dev_vbias, n_visible); save_sample_changes(v_mean, v_sample, prev_v_sample, dev_v_diffs, n_visible #ifndef BIT_CODING , vdirections #endif ); } #ifdef AGG_WARP_FILT //BEGIN: Code from url: "https://devblogs.nvidia.com/parallelforall/cuda-pro-tip-optimized-filtering-warp-aggregated-atomics/" __device__ inline int lane_id(void) { return threadIdx.x % WARP_SIZE; } __device__ int warp_bcast(int v, int leader) { return __shfl(v, leader); } // warp-aggregated atomic increment __device__ int atomicAggInc(int *ctr) { int mask = __ballot(1); // select the leader int leader = __ffs(mask) - 1; // leader does the update int res; if(lane_id() == leader) res = atomicAdd(ctr, __popc(mask)); // broadcast result res = warp_bcast(res, leader); // each thread computes its own value return res + __popc(mask & ((1 << lane_id()) - 1)); } //END //length is the length of each dot product (probably either //n_visible or n_hidden), and padded_length is the length rounded up //to the nearest warp size. //At the end of this function we want the value at diffs[0] to //represent the number of diffs that follow in the array. So we can //just send the diffs ptr to atomicAggInc so that it modifies the //first element accordingly. However, it's still really //complicated, since there are separate aggregations for each diffs //array in a batch happening simultaneously... Warps are //indivisible, but we'd need them to start their relative count //within a subwarp. The easiest way to fix this would be to set the //block size to "length" with "batch_size" blocks. However, this //would not support any layer with more than 1024 elements since //that is the max threadperblock count in CUDA. So to make it //general we round up length to the nearest divisible warp size when //calling this kernel, then perform some extra index shifting and //checking inside the kernel. As it turns out, the easy way to do //this is to allocate a 2D set of threads. // //WARNING: The diffs stored in the array will be the modded (non //batch) index, and this is for use later on in the dpmm kernel. __global__ void save_sample_changes_kernel(DTYPE * mean, DTYPE * sample, DTYPE * prev_sample, int * diffs_batch, int length, curandState_t * curand_state_ptr #ifndef BIT_CODING , bool * directions_batch #endif ) { int i = blockDim.x * blockIdx.x + threadIdx.x; int batch_i = blockDim.y * blockIdx.y + threadIdx.y; //Note: i no longer corresponds to the correct index for mean, //sample, and prev_sample due to the padding. The actual thread //indices will be calculated later in the kernel. if((i < length) && (batch_i < const_batch_size)) { //Only activate threads that are within a batch's length //Still need a shift for diffs array (cause it has a different //length) int shifted_i = (batch_i * length) + i; //Now we begin actual calculations DTYPE prev_sample_i = prev_sample[shifted_i]; // DTYPE r = curand_uniform_DTYPE(&curand_state_ptr[shifted_i]); #ifdef USING_DOUBLES DTYPE r = curand_uniform_double(&curand_state_ptr[shifted_i]);//double mode #else DTYPE r = curand_uniform(&curand_state_ptr[shifted_i]);//float mode #endif DTYPE new_sample_i = r < mean[shifted_i] ? 1.f : 0.f;//Just to be sure it's 1 or 0 sample[shifted_i] = new_sample_i; if(new_sample_i != prev_sample_i) { #ifdef BIT_CODING //Construct even number to indicate default dot product increasing int my_diff_value = 2 * i; if(prev_sample_i != 0.f) { my_diff_value++; } #else int my_diff_value = i; #endif int diffs_shift = batch_i * (length+1); int * diffs = &diffs_batch[diffs_shift]; int my_idx = atomicAggInc(diffs); //+1 to avoid first index (stores size) diffs[my_idx + 1] = my_diff_value; #ifndef BIT_CODING bool * directions = &directions_batch[batch_i * length]; #ifdef USING_DOUBLES directions[my_idx] = (prev_sample_i == 0.0); #else directions[my_idx] = (prev_sample_i == 0.f); #endif #endif } } } #else __global__ void save_sample_changes_kernel(DTYPE * mean, DTYPE * sample, DTYPE * prev_sample, int * diffs_batch, int length, curandState_t * curand_state_ptr #ifndef BIT_CODING , bool * directions_batch #endif ) { int i = blockDim.x * blockIdx.x + threadIdx.x; int batch_i = blockDim.y * blockIdx.y + threadIdx.y; //Note: i no longer corresponds to the correct index for mean, //sample, and prev_sample due to the padding. The actual thread //indices will be calculated later in the kernel. if((i < length) && (batch_i < const_batch_size)) { //Only activate threads that are within a batch's length //Still need a shift for diffs array (cause it has a different //length) int shifted_i = (batch_i * length) + i; //Now we begin actual calculations DTYPE prev_sample_i = prev_sample[shifted_i]; // DTYPE r = curand_uniform_DTYPE(&curand_state_ptr[shifted_i]); #ifdef USING_DOUBLES DTYPE r = curand_uniform_double(&curand_state_ptr[shifted_i]);//double mode DTYPE new_sample_i = r < mean[shifted_i] ? 1.0 : 0.0;//Just to be sure it's 1 or 0 #else DTYPE r = curand_uniform(&curand_state_ptr[shifted_i]);//float mode DTYPE new_sample_i = r < mean[shifted_i] ? 1.f : 0.f;//Just to be sure it's 1 or 0 #endif sample[shifted_i] = new_sample_i; if(new_sample_i != prev_sample_i) { //Construct even number to indicate default dot product increasing #ifdef BIT_CODING int my_diff_value = 2 * i;//instead of shifted_i if(prev_sample_i != 0.0) { //Then changing to negative, so dot product decreases, so //change to odd number (this encodes which direction the //dot product is going without extra memory usage) my_diff_value++; } #else int my_diff_value = i; #endif int diffs_shift = batch_i * (length+1); int * diffs = &diffs_batch[diffs_shift]; // int my_idx = atomicAggInc(diffs); int my_idx = atomicAdd(diffs, 1); //+1 to avoid first index (stores size) diffs[my_idx + 1] = my_diff_value; #ifndef BIT_CODING bool * directions = &directions_batch[batch_i * length]; #ifdef USING_DOUBLES directions[my_idx] = (prev_sample_i == 0.0); #else directions[my_idx] = (prev_sample_i == 0.f); #endif #endif } } } #endif __global__ void reset_diffs_counters(int * diffs, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < const_batch_size) { diffs[i * (length+1)] = 0; } } //Note: sample and prev_sample could be the same pointer. void RBM_dp_matrix::save_sample_changes(DTYPE * mean, DTYPE * sample, DTYPE * prev_sample, int * diffs, int length #ifndef BIT_CODING , bool * directions #endif ) { // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); //Set initial diffs index counter to 0 for each batch reset_diffs_counters<<<((batch_size - 1) / 32) + 1, 32>>>(diffs, length); //Need to setup some special padding for this kernel // int padded_length = length + (length%WARP_SIZE); // int num_threads = batch_size * padded_length; // cout << "length: " << length << endl; // cout << "padded_length: " << padded_length << endl; // cout << "batch_size: " << batch_size << endl; // num_blocks = ((num_threads - 1) / SAVE_SAMPLE_CHANGES_KERNEL_BLOCK_SIZE) + 1; // cout << "num threads: " << num_threads << endl; // cout << "num_blocks: " << num_blocks << endl; dim3 num_blocks, num_threads; dims_to_num_threads_and_blocks(length, batch_size, num_blocks, num_threads); save_sample_changes_kernel<<<num_blocks, num_threads>>> (mean, sample, prev_sample, diffs, length, dev_curand_states #ifndef BIT_CODING ,directions #endif ); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); //Print the changes for testing... // int * host_diffs = new int[batch_size * (length+1)]; // cudaMemcpy(host_diffs, diffs, batch_size * (length+1) * sizeof(int), // cudaMemcpyDeviceToHost); // int indices[] = {0, 1}; // for(int idx = 0; idx < 2; idx++) { // int * my_diffs = &host_diffs[indices[idx] * (length+1)]; // cout << "idx: " << idx << endl; // for(int i = 0; i < my_diffs[0]+3; i++) { // cout << my_diffs[i] << endl; // } // cout << endl; // } // delete[] host_diffs; } //This function is the same as finish_sampling_kernel except that it //doesn't do the sampling step. //This is useful for the first sample_v_given_h call since cublas //can still be used (requiring the existence of this function to //finish the biasing EFFICIENTLY (global mem coalescence)), but we //want to save changes during the sampling step. //1: Basic vector addition to add bias to mean. //2: Set mean[i] = sigmoid(mean[i]) //PRE: len(mean) == len(sample) == len(bias) == length __global__ void add_bias_kernel(DTYPE * mean, DTYPE * dot_product, DTYPE * bias, int length) { int i = blockDim.x * blockIdx.x + threadIdx.x; if(i < length * const_batch_size) { int length_i = i % length; #ifdef USING_DOUBLES mean[i] = 1.0 / (1.0 + exp(-(dot_product[i] + bias[length_i]))); #else mean[i] = 1.f / (1.f + exp(-(dot_product[i] + bias[length_i]))); #endif } } void RBM_dp_matrix::add_bias(DTYPE * mean, DTYPE * dot_product, DTYPE * bias, int length) { int num_blocks = (((length*batch_size) - 1) / ADD_BIAS_KERNEL_BLOCK_SIZE) + 1; add_bias_kernel<<<num_blocks, ADD_BIAS_KERNEL_BLOCK_SIZE>>> (mean, dot_product, bias, length); // cudaDeviceSynchronize(); } //len_diffs is the actual length of each array in diffs_batch __global__ void dpmm_kernel(DTYPE * W, int m, int n, int * diffs_batch, int len_diffs, DTYPE * dot_product_batch, int len_dot_product, bool transpose #ifndef BIT_CODING , bool * directions #endif ) { // extern __shared__ int diffs_batch_sh[]; // __shared__ int result_inc[1024];//TODO int i = blockDim.x * blockIdx.x + threadIdx.x; int batch_i = blockDim.y * blockIdx.y + threadIdx.y; //TODO: Implement a universal diffs transfer to more efficient //memory so that accessing diffs takes less time... if((i < len_dot_product) && (batch_i < const_batch_size)) { //Setup shifted pointers DTYPE * dot_product = &dot_product_batch[batch_i * len_dot_product]; DTYPE result_i_inc = 0.0f; int num_diffs = diffs_batch[batch_i * len_diffs]; // if(num_diffs > len_dot_product) printf("PROBLEM\n"); // if(i < num_diffs+1) {//WARNING: Need to fix potential missing threads // diffs_batch_sh[i] = diffs_batch[(batch_i * len_diffs) + i]; // } int my_base = (batch_i * len_diffs); int my_dir_base = (batch_i * (len_diffs-1)) - 1; for(int diffs_idx = 1; diffs_idx < num_diffs+1; diffs_idx++) { int curr_diff = diffs_batch[my_base + diffs_idx]; // int curr_diff = diffs_batch_sh[diffs_idx]; #ifdef BIT_CODING int diff_value = curr_diff >> 1; bool diff_direction = (curr_diff & 1) == 0; #else int diff_value = curr_diff; bool diff_direction = directions[my_dir_base + diffs_idx]; #endif // int curr_diff = diffs_sh[diffs_idx-1]; int idx; // //ASSERT: m = n_hidden, n = n_visible #ifdef MULTI_WEIGHT_MATRIX #ifdef WEIGHT_MATRIX_PADDING if(transpose) //Then W2 is being used idx = diff_value * (const_pitch2/sizeof(DTYPE)) + i; else idx = diff_value * (const_pitch/sizeof(DTYPE)) + i; #else if(transpose) idx = diff_value * m + i; else idx = diff_value * n + i; #endif #else #ifdef WEIGHT_MATRIX_PADDING if(transpose) idx = i * (const_pitch/sizeof(DTYPE)) + diff_value; else idx = diff_value * (const_pitch/sizeof(DTYPE)) + i; #else if(transpose) idx = i * n + diff_value; else idx = diff_value * n + i; #endif #endif // result_i_inc += W[idx] * (curr_diff % 2 == 0 ? 1.0 : -1.0); // if(diff_direction) // result_i_inc += W[idx]; // else // result_i_inc -= W[idx]; result_i_inc += W[idx] * (diff_direction ? 1 : -1); // result_inc[i] += W[idx] * (diff_direction ? 1 : -1); } dot_product[i] += result_i_inc; } } #ifndef USE_DPMM //num blocks in y dimension, must be >=3 for float to limit shared memory, >=6 for doubles (probably) #define DPVM_Y_NUM_BLOCKS 3 //This determines the number of blocks in the z dimension. //Increasing yields more parallelism, but less shared memory reuse. #define NUM_BATCH_BLOCKS 16 __global__ void dpvm_kernel(DTYPE * W, int m, int n, int * diffs_batch, int len_diffs, DTYPE * dot_product_batch, int len_dot_product, bool transpose #ifndef BIT_CODING , bool * directions_batch #endif ) { __shared__ float block_results[WARP_SIZE]; __shared__ DTYPE W_sh[WARP_SIZE * (784 / DPVM_Y_NUM_BLOCKS)];//diffs index the W matrix's rows (len_diffs-1), TODO: upgrade to dynamic to fix int W_col_idx = blockDim.x * blockIdx.x + threadIdx.x;//Used for weight matrix accessing //Transfer all W data to shared memory (can this be upgraded to conditional transfer if used?). int num_W_rows_per_block = (len_diffs - 1) / DPVM_Y_NUM_BLOCKS; for(int i_iter = 0; i_iter < num_W_rows_per_block; i_iter++) { int i = blockIdx.y * num_W_rows_per_block + i_iter;//i is the row if((i < len_diffs - 1) && (W_col_idx < len_dot_product)) { W_sh[i_iter * WARP_SIZE + threadIdx.x] = W[i * (transpose ? m:n) + W_col_idx]; } } // __syncthreads(); // int y = threadIdx.y; // int block_results_idx = threadIdx.y * blockDim.x + threadIdx.x;//Used later as well int block_results_idx = threadIdx.x;//Used later as well //setup ptrs for specific batch and iterate int num_batch_els = const_batch_size / NUM_BATCH_BLOCKS; //an iter (batch_iter) is the iteration index we're on, the index (batch_i) is the batch element! Important distinction here for(int batch_iter = 0; batch_iter < num_batch_els; batch_iter++) { int batch_i = blockIdx.z * num_batch_els + batch_iter; int * diffs = &diffs_batch[len_diffs * batch_i]; DTYPE * dot_product = &dot_product_batch[len_dot_product * batch_i]; bool * directions = &directions_batch[(len_diffs-1) * batch_i]; //Reset the results shared mem block_results[block_results_idx] = 0.f; //TODO: Implement a universal diffs transfer to more efficient //memory so that accessing diffs takes less time... if(W_col_idx < len_dot_product) {//TODO: consider moving up //Setup shifted pointers int num_diffs = diffs[0]; // int num_diffs_iters = ((num_diffs-1) / DPVM_Y_BLOCK_SIZE) + 1 + 1; //+1 for normal blocking calculation, +1 for diffs array shift for(int diffs_idx = 1; diffs_idx < num_diffs+1; diffs_idx++) { // int diffs_idx = diffs_base_idx + threadIdx.y; // if(diffs_idx - 1 < num_diffs) { int curr_diff = diffs[diffs_idx]; #ifdef BIT_CODING int diff_value = curr_diff >> 1; bool diff_direction = (curr_diff & 1) == 0; #else int diff_value = curr_diff; bool diff_direction = directions[diffs_idx-1]; #endif //Lots of unnecessary checks, but whatevs if((blockIdx.y * num_W_rows_per_block <= diff_value) && (diff_value < (blockIdx.y+1) * num_W_rows_per_block)) { // int curr_diff = diffs_sh[diffs_idx-1]; int idx; // //ASSERT: m = n_hidden, n = n_visible #ifdef MULTI_WEIGHT_MATRIX #ifdef WEIGHT_MATRIX_PADDING if(transpose) //Then W2 is being used idx = diff_value * (const_pitch2/sizeof(DTYPE)) + i; else idx = diff_value * (const_pitch/sizeof(DTYPE)) + i; #else // if(transpose) // idx = diff_value * m + i; // else // idx = diff_value * n + i; // if(transpose) idx = (diff_value - (blockIdx.y * num_W_rows_per_block)) * WARP_SIZE + threadIdx.x; // else // idx = diff_value * WARP_SIZE #endif #else #ifdef WEIGHT_MATRIX_PADDING if(transpose) idx = i * (const_pitch/sizeof(DTYPE)) + diff_value; else idx = diff_value * (const_pitch/sizeof(DTYPE)) + i; #else if(transpose) idx = i * n + diff_value; else idx = diff_value * n + i; #endif #endif // result_i_inc += W[idx] * (diff_direction ? 1 : -1); // block_results[block_results_idx] += W[idx] * (diff_direction ? 1 : -1); block_results[block_results_idx] += W_sh[idx] * (diff_direction ? 1 : -1); // } } } atomicAdd(&dot_product[W_col_idx], block_results[block_results_idx]); // __syncthreads(); // if(threadIdx.y == 0) { /* for(int k = 0; k < DPVM_Y_BLOCK_SIZE; k++) { dot_product[i] += block_results[k * blockDim.x + threadIdx.x]; }//incrementing in shared memory isn't faster, I guess it's not a bottleneck. */ /* for(int k = 1; k < DPVM_Y_BLOCK_SIZE; k++) { block_results[threadIdx.x] += block_results[k * blockDim.x + threadIdx.x]; } dot_product[W_col_idx] += block_results[threadIdx.x]; */ // } // __syncthreads();//blocks could race ahead and reset block_results before being read } /* //Parallel reduce to first row of shared memory for the block int num_reducing = DPVM_Y_BLOCK_SIZE / 2; int access_shift = 1; while(num_reducing != 0) { if(threadIdx.y % (DPVM_Y_BLOCK_SIZE / num_reducing) == 0) block_results[block_results_idx] += block_results[block_results_idx + (access_shift * blockDim.x)]; num_reducing /= 2; access_shift *= 2; __syncthreads(); } //Send to global memory if(threadIdx.y == 0) dot_product[i] += block_results[threadIdx.x]; */ } } #endif void RBM_dp_matrix::dpmm(int m, int n, int * diffs, DTYPE * dot_product, bool transpose #ifndef BIT_CODING , bool * directions #endif ) { // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); //Figure out the correct length of the dot_product based on transpose: int len_dot_product = transpose ? m : n; int len_diffs = transpose ? n+1 : m+1;//diffs arrays are longer // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); #ifdef MULTI_WEIGHT_MATRIX DTYPE * corrected_W = transpose ? dev_W2 : dev_W; #else DTYPE * corrected_W = dev_W; #endif #ifdef USE_DPMM dim3 num_blocks(getNumBlocks(len_dot_product, WARP_SIZE), getNumBlocks(batch_size, DPMM_Y_BLOCK_SIZE)); dim3 num_threads(WARP_SIZE, DPMM_Y_BLOCK_SIZE); dpmm_kernel<<<num_blocks, num_threads>>> (corrected_W, m, n, diffs, len_diffs, dot_product, len_dot_product, transpose #ifndef BIT_CODING , directions #endif ); #else // int num_blocks = getNumBlocks(len_dot_product, WARP_SIZE); // int num_threads = WARP_SIZE; dim3 num_blocks(getNumBlocks(len_dot_product, WARP_SIZE), DPVM_Y_NUM_BLOCKS, NUM_BATCH_BLOCKS); dim3 num_threads(WARP_SIZE, 1, 1);//only 1 z thd, since it will iterate dpvm_kernel<<<num_blocks, num_threads>>> (corrected_W, m, n, diffs, len_diffs, dot_product, len_dot_product, transpose #ifndef BIT_CODING , directions #endif ); /* dim3 num_blocks(getNumBlocks(len_dot_product, WARP_SIZE), 1); dim3 num_threads(WARP_SIZE, DPVM_Y_BLOCK_SIZE); for(int i = 0; i < batch_size; i++) { dpvm_kernel<<<num_blocks, num_threads>>> (corrected_W, m, n, diffs + (len_diffs * i), len_diffs, dot_product + (len_dot_product * i), len_dot_product, transpose #ifndef BIT_CODING , directions + (i * (len_diffs - 1)) #endif ); } */ #endif // cudaDeviceSynchronize(); // double t2 = get_wall_time(); // cout << "dpmm microseconds: " << (t2-t1) * 1000000 << endl; //doubles // dpmm: 0.000123024 // dpmm: 0.000359058 //floats // dpmm: 0.00114083 <<< 10X slower? // dpmm: 0.000381947 // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); } //PRE: prev_sample should be the last sample array that was sent // into a sample_h_given_v* call. //TODO: This one is taking a long time (around 0.9-1.4 ms) // s_v_g_h_delta only takes 0.38-0.39ms // Room for improvement! void RBM_dp_matrix::sample_h_given_v_delta(DTYPE * v0_sample, DTYPE * h_mean, DTYPE * h_sample, DTYPE * prev_h_sample) { // cerr << "sample_h_given_v_delta\n"; // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); //This code performs a column analysis and prints if there's a column with no diffs. int * v_diffs = new int[(n_visible+1) * batch_size]; cudaMemcpy(v_diffs, dev_v_diffs, (n_visible+1) * batch_size * sizeof(int), cudaMemcpyDeviceToHost); bool * v_checked = new bool[n_visible]; for(int i = 0; i < n_visible; i++) { v_checked[i] = true; } for(int row = 0; row < batch_size; row++) { int num_diffs = v_diffs[row * (n_visible+1)]; // cout << "num_diffs = " << num_diffs << endl; for(int i = 1; i < num_diffs+1; i++) { int col = v_diffs[row * (n_visible+1) + i]; v_checked[col] = false; } } int num_cols_empty = 0; for(int i = 0; i < n_visible; i++) { if(v_checked[i]) { num_cols_empty++; // cout << i << " is EMPTY" << endl; } } // cout << "percent cols empty = " << (num_cols_empty / ((float) n_visible)) << endl; delete[] v_diffs; delete[] v_checked; // int num_diffs_first_v; // cudaMemcpy(&num_diffs_first_v, dev_v_diffs, sizeof(int), cudaMemcpyDeviceToHost); // cout << "v percent diff: " << (num_diffs_first_v/((float)n_visible)) << endl; // cout << ": " << (num_diffs_first_v/((float)n_visible)) << endl; dpmm(n_hidden, n_visible, dev_v_diffs, dev_h_dot_product_batch, true #ifndef BIT_CODING , vdirections #endif ); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); add_bias(h_mean, dev_h_dot_product_batch, dev_hbias, n_hidden); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); save_sample_changes(h_mean, h_sample, prev_h_sample, dev_h_diffs, n_hidden #ifndef BIT_CODING , hdirections #endif ); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); } //PRE: prev_sample should be the last sample array that was sent // into a sample_v_given_h* call. // All paramters should be device allocated. void RBM_dp_matrix::sample_v_given_h_delta(DTYPE * h0_sample, DTYPE * v_mean, DTYPE * v_sample, DTYPE * prev_v_sample) { // cerr << "sample_v_given_h_delta\n"; // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); //TODO: Fix MULTI_WEIGHT_MATRIX // cerr << "n_hidden: " << n_hidden << endl; // cerr << "n_visible: " << n_visible << endl; // int num_diffs_first_h; // cudaMemcpy(&num_diffs_first_h, dev_h_diffs, sizeof(int), cudaMemcpyDeviceToHost); // cout << "h percent diff: " << (num_diffs_first_h/((float)n_hidden)) << endl; dpmm(n_hidden, n_visible, dev_h_diffs, dev_v_dot_product_batch, false #ifndef BIT_CODING , hdirections #endif ); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); add_bias(v_mean, dev_v_dot_product_batch, dev_vbias, n_visible); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); save_sample_changes(v_mean, v_sample, prev_v_sample, dev_v_diffs, n_visible #ifndef BIT_CODING , vdirections #endif ); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); } // #ifdef MULTI_WEIGHT_MATRIX // __global__ // void write_matrix_transpose(DTYPE * W, DTYPE * W2) { // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; // __shared__ DTYPE my_tile[MAX_THREAD_SQUARE_EDGE][MAX_THREAD_SQUARE_EDGE]; // if((x < const_n_visible) && (y < const_n_hidden)) { // my_tile[threadIdx.y][threadIdx.x] = W[y * const_n_visible + x]; // } // __syncthreads(); // if((y < const_n_visible) && (x < const_n_hidden)) { // } // } // __global__ void write_matrix_transpose(float *W, const float *W2) // { // __shared__ float tile[MAX_THREAD_SQUARE_EDGE][MAX_THREAD_SQUARE_EDGE+1]; // int x = blockIdx.x * MAX_THREAD_SQUARE_EDGE + threadIdx.x; // int y = blockIdx.y * MAX_THREAD_SQUARE_EDGE + threadIdx.y; // int width = gridDim.x * MAX_THREAD_SQUARE_EDGE; // for (int j = 0; j < MAX_THREAD_SQUARE_EDGE; j += BLOCK_ROWS) // tile[(threadIdx.y+j)*MAX_THREAD_SQUARE_EDGE + threadIdx.x] = W[(y+j)*width + x]; // __syncthreads(); // for (int j = 0; j < MAX_THREAD_SQUARE_EDGE; j += BLOCK_ROWS) // W2[(y+j)*width + x] = tile[(threadIdx.y+j)*MAX_THREAD_SQUARE_EDGE + threadIdx.x]; // } // #endif // #define TILE_DIM 32 //PRE: src has const_n_hidden rows and const_n_visible columns. // dest is allocated with const_n_hidden * const_n_visible elements //POST: Writes the tranpose of src to dest. // __global__ // void write_matrix_transpose(DTYPE * src, DTYPE * dest) { // __shared__ float tile[MAX_THREAD_SQUARE_EDGE][MAX_THREAD_SQUARE_EDGE]; // int x = blockIdx.x * blockDim.x + threadIdx.x; // int y = blockIdx.y * blockDim.y + threadIdx.y; // if((x < const_n_visible) && (y < const_n_hidden)) { // tile[threadIdx.y][threadIdx.x] = src[y*const_n_visible + x]; // } // __syncthreads(); // x = blockIdx.y * blockDim.y + threadIdx.x; // transpose block offset // y = blockIdx.x * blockDim.x + threadIdx.y; // if((x < const_n_hidden) && (y < const_n_visible)) { // dest[y*const_n_hidden + x] = tile[threadIdx.x][threadIdx.y]; // } // } void RBM_dp_matrix::gibbs_hvh_delta(DTYPE *h0_sample, DTYPE *nv_means, DTYPE *nv_samples, DTYPE *nh_means, DTYPE *nh_samples) { sample_v_given_h_delta(h0_sample , nv_means, nv_samples, nv_samples); sample_h_given_v_delta(nv_samples, nh_means, nh_samples, nh_samples); } DTYPE abs(DTYPE d) { if(d < 0) return -d; return d; } void compare_W_W2(DTYPE * dev_W, DTYPE * dev_W2, int n_visible, int n_hidden) { DTYPE * W = new DTYPE[n_visible * n_hidden]; DTYPE * W2 = new DTYPE[n_visible * n_hidden]; cudaMemcpy(W, dev_W, sizeof(DTYPE) * n_visible * n_hidden, cudaMemcpyDeviceToHost); cudaMemcpy(W2, dev_W2, sizeof(DTYPE) * n_visible * n_hidden, cudaMemcpyDeviceToHost); DTYPE total_diff = 0.0; for(int row = 0; row < n_hidden; row++) { for(int col = 0; col < n_visible; col++) { total_diff += abs(W[row * n_visible + col] - W2[col*n_hidden + row]); } } cout << "W diff: " << total_diff << endl; delete[] W; delete[] W2; } void RBM_dp_matrix::contrastive_divergence(int curr_i, DTYPE lr, DTYPE wc, DTYPE * dev_data) { // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); //TODO: Upgrade to use the multi-weight matrix functionality // reset_d_arrays(); DTYPE * dev_input = &dev_data[data_num_cols * (curr_i*batch_size)]; sample_h_given_v_matrix(dev_input , dev_ph_mean_batch , dev_ph_sample_batch); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); sample_v_given_h_matrix(dev_ph_sample_batch, dev_nv_means_batch, dev_nv_samples_batch, dev_input);//include dev_input as the //previous sample // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); sample_h_given_v_delta(dev_nv_samples_batch, dev_nh_means_batch, dev_nh_samples_batch, dev_ph_sample_batch); for(int step = 1; step < k; step++) { // sample_v_given_h_delta(dev_nh_samples_batch, dev_nv_means_batch, dev_nv_samples_batch, // dev_nv_samples_batch); // cerr << "sample_h_given_v_delta\n"; // sample_h_given_v_delta(dev_nv_samples_batch, dev_nh_means_batch, dev_nh_samples_batch, // dev_nh_samples_batch); // cerr << "gibbs_hvh_delta\n"; gibbs_hvh_delta(dev_nh_samples_batch, dev_nv_means_batch, dev_nv_samples_batch, dev_nh_means_batch, dev_nh_samples_batch); } // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); int most_nodes = max(n_hidden, n_visible); dim3 num_blocks, num_threads; dims_to_num_threads_and_blocks(n_visible, n_hidden, num_blocks, num_threads); // #ifdef SIMULTANEOUS_EXECUTION // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); // GET_TIME(t1); //MULTI_WEIGHT_MATRIX only adds 0.00035 seconds extra time onto // what was originally 0.00195 seconds. (GTX 980 test) write_weights <<< num_blocks, num_threads>>> (dev_data, dev_W, #ifdef MULTI_WEIGHT_MATRIX dev_W2, #endif lr, wc, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, dev_hbias, dev_vbias, dev_dhbias, dev_dvbias, data_num_rows, data_num_cols, curr_i); // #ifdef MULTI_WEIGHT_MATRIX // write_transpose<<< num_blocks, num_threads>>> (dev_W, dev_W2); // #endif #ifdef MULTI_WEIGHT_MATRIX #ifdef EFFICIENT_TRANSPOSE dim3 dimGrid((n_visible-1)/MAX_THREAD_SQUARE_EDGE+1, (n_hidden-1)/MAX_THREAD_SQUARE_EDGE+1); dim3 dimBlock(MAX_THREAD_SQUARE_EDGE, BLOCK_ROWS); //Need to update W2 efficiently // dims_to_num_threads_and_blocks(most_nodes, most_nodes, num_blocks, num_threads); #ifdef WEIGHT_MATRIX_PADDING write_matrix_transpose_pitch<<<dimGrid, dimBlock>>>(dev_W, dev_W2); #else write_matrix_transpose<<<dimGrid, dimBlock>>>(dev_W, dev_W2); #endif #endif #endif #ifdef WEIGHT_MATRIX_PADDING //Cant go device to device, so have to go hacky and copy it out //to main memory then back CUDA_CHECK(cudaMemcpy2D(WArray, n_visible * sizeof(DTYPE), dev_W, pitch, n_visible * sizeof(DTYPE), n_hidden, cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(dev_W_cublas, WArray, n_visible * n_hidden * sizeof(DTYPE), cudaMemcpyHostToDevice)); // CUDA_CHECK(cudaMemcpy2D(dev_W_cublas, n_visible * sizeof(DTYPE), dev_W, pitch, // n_visible * sizeof(DTYPE), n_hidden * sizeof(DTYPE), // cudaMemcpyDeviceToDevice)); #endif // compare_W_W2(dev_W, dev_W2, n_visible, n_hidden); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); // GET_TIME(t2); // cerr << "weight matrix update time: " << get_duration(t1, t2) << endl; int num_bias_blocks = 1 + ((most_nodes-1) / WRITE_BIAS_KERNEL_BLOCK_SIZE); int num_bias_threads = WRITE_BIAS_KERNEL_BLOCK_SIZE; write_bias_results_to_memory <<< num_bias_blocks, num_bias_threads>>> (dev_data, lr, wc, dev_ph_mean_batch, dev_nv_means_batch, dev_nh_means_batch, dev_ph_sample_batch, dev_nv_samples_batch, dev_nh_samples_batch, dev_hbias, dev_vbias, dev_dhbias, dev_dvbias, data_num_rows, data_num_cols, curr_i); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); // GET_TIME(k2_t2); // cerr << "k2 time: " << get_duration(k2_t1, k2_t2) << endl; } void RBM_dp_matrix::reset_d_arrays() { //Since dW_pitch is the width of the dev_dW array rows, we //multiply by the number of rows (n_hidden) to get the number of //bytes to reset: // CUDA_CHECK(cudaMemset(dev_dhbias, 0, n_hidden * sizeof(DTYPE))); // CUDA_CHECK(cudaMemset(dev_dvbias, 0, n_visible * sizeof(DTYPE))); // CUDA_CHECK(cudaMemset(dev_ph_mean_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(cudaMemset(dev_nv_means_batch , 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(cudaMemset(dev_nh_means_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(cudaMemset(dev_ph_sample_batch , 0, sizeof(DTYPE) * n_hidden * batch_size)); // CUDA_CHECK(cudaMemset(dev_nv_samples_batch, 0, sizeof(DTYPE) * n_visible * batch_size)); // CUDA_CHECK(cudaMemset(dev_nh_samples_batch, 0, sizeof(DTYPE) * n_hidden * batch_size)); } void RBM_dp_matrix::allocate_special_memory() { // data = new DTYPE[data_num_rows * data_num_cols]; // for(int i = 0; i < data_num_rows * data_num_cols; i++) { // data[i] = (DTYPE) int_data[i]; // } // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); CUDA_CHECK(cudaMalloc((void**)&dev_ph_sample_batch , sizeof(DTYPE) * n_hidden * batch_size)); CUDA_CHECK(cudaMalloc((void**)&dev_nv_samples_batch, sizeof(DTYPE) * n_visible * batch_size)); CUDA_CHECK(cudaMalloc((void**)&dev_nh_samples_batch, sizeof(DTYPE) * n_hidden * batch_size)); #ifdef WEIGHT_MATRIX_PADDING CUDA_CHECK(cudaMallocPitch((void**)&dev_W, &pitch, n_visible * sizeof(DTYPE), n_hidden)); //Copy pitch to const memory cudaMemcpyToSymbol(const_pitch, &pitch , sizeof(size_t)); matrixToArray(W, WArray , n_hidden, n_visible); CUDA_CHECK(cudaMemcpy2D(dev_W, pitch, WArray, n_visible * sizeof(DTYPE), n_visible * sizeof(DTYPE), n_hidden, cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc((void**)&dev_W_cublas, n_hidden * n_visible * sizeof(DTYPE))); // CUDA_CHECK(cudaMemcpy2D(dev_W_cublas, n_visible * sizeof(DTYPE), dev_W, pitch, // n_visible * sizeof(DTYPE), n_hidden * sizeof(DTYPE), // cudaMemcpyDeviceToDevice)); CUDA_CHECK(cudaMemcpy(dev_W_cublas, WArray, n_hidden * n_visible * sizeof(DTYPE), cudaMemcpyHostToDevice)); #else CUDA_CHECK(cudaMalloc((void**)&dev_W , n_hidden * n_visible * sizeof(DTYPE))); matrixToArray (W, WArray , n_hidden, n_visible); CUDA_CHECK(cudaMemcpy(dev_W, WArray, n_hidden * n_visible * sizeof(DTYPE), cudaMemcpyHostToDevice)); #endif //Allocate transpose(s) #ifdef MULTI_WEIGHT_MATRIX #ifdef WEIGHT_MATRIX_PADDING CUDA_CHECK(cudaMallocPitch((void**)&dev_W2, &pitch2, n_hidden * sizeof(DTYPE), n_visible)); cudaMemcpyToSymbol(const_pitch2, &pitch2 , sizeof(size_t)); matrixToArrayTrans(W, WArray2, n_hidden, n_visible); CUDA_CHECK(cudaMemcpy2D(dev_W2, pitch2, WArray2, n_hidden * sizeof(DTYPE), n_hidden * sizeof(DTYPE), n_visible, cudaMemcpyHostToDevice)); #else CUDA_CHECK(cudaMalloc((void**)&dev_W2, n_hidden * n_visible * sizeof(DTYPE))); matrixToArrayTrans(W, WArray2, n_hidden, n_visible); CUDA_CHECK(cudaMemcpy(dev_W2, WArray2, n_hidden * n_visible * sizeof(DTYPE), cudaMemcpyHostToDevice)); #endif #endif // CUDA_CHECK(cudaMalloc((void**)&dev_data, // data_num_rows * data_num_cols * sizeof(DTYPE))); // CUDA_CHECK(cudaMemcpy(dev_data, data, data_num_rows * data_num_cols * sizeof(DTYPE), // cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMalloc((void**)&dev_h_dot_product_batch, sizeof(DTYPE) * n_hidden * batch_size)); CUDA_CHECK(cudaMalloc((void**)&dev_v_dot_product_batch, sizeof(DTYPE) * n_visible * batch_size)); // cudaDeviceSynchronize(); // CUDA_CHECK(cudaGetLastError()); } void RBM_dp_matrix::copy_matrices_to_host() { #ifdef WEIGHT_MATRIX_PADDING CUDA_CHECK(cudaMemcpy2D(WArray, n_visible * sizeof(DTYPE), dev_W, pitch, n_visible * sizeof(DTYPE), n_hidden, cudaMemcpyDeviceToHost)); #else CUDA_CHECK(cudaMemcpy(WArray, dev_W, n_hidden * n_visible * sizeof(DTYPE), cudaMemcpyDeviceToHost)); #endif arrayToMatrix(WArray, W, n_hidden, n_visible); CUDA_CHECK(cudaMemcpy(vbias, dev_vbias, n_visible * sizeof(DTYPE), cudaMemcpyDeviceToHost)); CUDA_CHECK(cudaMemcpy(hbias, dev_hbias, n_hidden * sizeof(DTYPE), cudaMemcpyDeviceToHost)); #ifdef DEBUG DTYPE * nh_samples = new DTYPE[n_hidden * batch_size]; DTYPE * nv_samples = new DTYPE[n_visible * batch_size]; DTYPE * nh_means = new DTYPE[n_hidden * batch_size]; DTYPE * nv_means = new DTYPE[n_visible * batch_size]; DTYPE * ph_means = new DTYPE[n_hidden * batch_size]; DTYPE * ph_samples = new DTYPE[n_visible * batch_size]; cudaMemcpy(nh_samples, dev_nh_samples_batch, n_hidden * batch_size * sizeof(DTYPE), cudaMemcpyDeviceToHost); cudaMemcpy(nv_samples, dev_nv_samples_batch, n_visible * batch_size * sizeof(DTYPE), cudaMemcpyDeviceToHost); cudaMemcpy(nh_means, dev_nh_means_batch, n_hidden * batch_size * sizeof(DTYPE), cudaMemcpyDeviceToHost); cudaMemcpy(nv_means, dev_nv_means_batch, n_visible * batch_size * sizeof(DTYPE), cudaMemcpyDeviceToHost); cudaMemcpy(ph_samples, dev_ph_sample_batch, n_hidden * batch_size * sizeof(DTYPE), cudaMemcpyDeviceToHost); cudaMemcpy(ph_means , dev_ph_mean_batch, n_hidden * batch_size * sizeof(DTYPE), cudaMemcpyDeviceToHost); int count = 5; cout << "nh_samples:\n"; for(int i = 0; i < count; i++) { cout << nh_samples[i] << endl; } cout << "...\n"; cout << "nv_samples:\n"; for(int i = 0; i < count; i++) { cout << nv_samples[i] << endl; } cout << "...\n"; cout << "nh_means:\n"; for(int i = 0; i < count; i++) { cout << nh_means[i] << endl; } cout << "...\n"; cout << "nv_means:\n"; for(int i = 0; i < count; i++) { cout << nv_means[i] << endl; } cout << "...\n"; cout << "ph_samples:\n"; for(int i = 0; i < count; i++) { cout << ph_samples[i] << endl; } cout << "...\n"; cout << "ph_means:\n"; for(int i = 0; i < count; i++) { cout << ph_means[i] << endl; } cout << "...\n"; delete[] nh_samples; delete[] nv_samples; delete[] nh_means; delete[] nv_means; delete[] ph_means; delete[] ph_samples; #endif } RBM_dp_matrix::~RBM_dp_matrix() { cublasDestroy(host_handle); #ifdef SAVE_WEIGHTS saveWeightMatrix(); #endif cudaFree(dev_h_diffs); cudaFree(dev_v_diffs); cudaFree(dev_h_dot_product_batch); cudaFree(dev_v_dot_product_batch); // destroy_cublas_handle<<<1,1>>>(dev_handle); // cudaFree(dev_handle); #ifdef MULTI_WEIGHT_MATRIX delete[] WArray2; cudaFree(dev_W2); #endif #ifdef WEIGHT_MATRIX_PADDING cudaFree(dev_W_cublas); #endif #ifndef BIT_CODING cudaFree(hdirections); cudaFree(vdirections); #endif } }
997281c41ba1a52b3d07f823d668367adf3cf79a.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <cusparse_v2.h> #include <nsparse.hpp> #include <CSR.hpp> #include <SpGEMM.hpp> #include <HashSpGEMM_volta.hpp> typedef int IT; #ifdef FLOAT typedef float VT; #else typedef double VT; #endif template <class idType, class valType> void spgemm_hash(CSR<idType, valType> a, CSR<idType, valType> b, CSR<idType, valType> &c) { idType i; long long int flop_count; hipEvent_t event[2]; float msec, ave_msec, flops; for (i = 0; i < 2; i++) { hipEventCreate(&(event[i])); } /* Memcpy A and B from Host to Device */ a.memcpyHtD(); b.memcpyHtD(); /* Count flop of SpGEMM computation */ get_spgemm_flop(a, b, flop_count); /* Execution of SpGEMM on Device */ ave_msec = 0; for (i = 0; i < SpGEMM_TRI_NUM; i++) { if (i > 0) { c.release_csr(); } hipEventRecord(event[0], 0); SpGEMM_Hash(a, b, c); hipEventRecord(event[1], 0); hipDeviceSynchronize(); hipEventElapsedTime(&msec, event[0], event[1]); if (i > 0) { ave_msec += msec; } } ave_msec /= SpGEMM_TRI_NUM - 1; flops = (float)(flop_count) / 1000 / 1000 / ave_msec; printf("SpGEMM using CSR format (Hash): %f[GFLOPS], %f[ms]\n", flops, ave_msec); /* Numeric Only */ ave_msec = 0; for (i = 0; i < SpGEMM_TRI_NUM; i++) { hipEventRecord(event[0], 0); SpGEMM_Hash_Numeric(a, b, c); hipEventRecord(event[1], 0); hipDeviceSynchronize(); hipEventElapsedTime(&msec, event[0], event[1]); if (i > 0) { ave_msec += msec; } } ave_msec /= SpGEMM_TRI_NUM - 1; flops = (float)(flop_count) / 1000 / 1000 / ave_msec; printf("SpGEMM using CSR format (Hash, only numeric phase): %f[GFLOPS], %f[ms]\n", flops, ave_msec); c.memcpyDtH(); c.release_csr(); #ifdef sfDEBUG CSR<IT, VT> cusparse_c; SpGEMM_cuSPARSE(a, b, cusparse_c); if (c == cusparse_c) { std::cout << "HashSpGEMM is correctly executed" << std::endl; } std::cout << "Nnz of A: " << a.nnz << std::endl; std::cout << "Number of intermediate products: " << flop_count / 2 << std::endl; std::cout << "Nnz of C: " << c.nnz << std::endl; cusparse_c.release_cpu_csr(); #endif a.release_csr(); b.release_csr(); for (i = 0; i < 2; i++) { hipEventDestroy(event[i]); } } /*Main Function*/ int main(int argc, char *argv[]) { CSR<IT, VT> a, b, c; /* Set CSR reding from MM file or generating random matrix */ std::cout << "Initialize Matrix A" << std::endl; std::cout << "Read matrix data from " << argv[1] << std::endl; a.init_data_from_mtx(argv[1]); std::cout << "Initialize Matrix B" << std::endl; std::cout << "Read matrix data from " << argv[1] << std::endl; b.init_data_from_mtx(argv[1]); /* Execution of SpGEMM on GPU */ spgemm_hash(a, b, c); a.release_cpu_csr(); b.release_cpu_csr(); c.release_cpu_csr(); return 0; }
997281c41ba1a52b3d07f823d668367adf3cf79a.cu
#include <stdio.h> #include <stdlib.h> #include <sys/time.h> #include <math.h> #include <cuda.h> #include <helper_cuda.h> #include <cusparse_v2.h> #include <nsparse.hpp> #include <CSR.hpp> #include <SpGEMM.hpp> #include <HashSpGEMM_volta.hpp> typedef int IT; #ifdef FLOAT typedef float VT; #else typedef double VT; #endif template <class idType, class valType> void spgemm_hash(CSR<idType, valType> a, CSR<idType, valType> b, CSR<idType, valType> &c) { idType i; long long int flop_count; cudaEvent_t event[2]; float msec, ave_msec, flops; for (i = 0; i < 2; i++) { cudaEventCreate(&(event[i])); } /* Memcpy A and B from Host to Device */ a.memcpyHtD(); b.memcpyHtD(); /* Count flop of SpGEMM computation */ get_spgemm_flop(a, b, flop_count); /* Execution of SpGEMM on Device */ ave_msec = 0; for (i = 0; i < SpGEMM_TRI_NUM; i++) { if (i > 0) { c.release_csr(); } cudaEventRecord(event[0], 0); SpGEMM_Hash(a, b, c); cudaEventRecord(event[1], 0); cudaDeviceSynchronize(); cudaEventElapsedTime(&msec, event[0], event[1]); if (i > 0) { ave_msec += msec; } } ave_msec /= SpGEMM_TRI_NUM - 1; flops = (float)(flop_count) / 1000 / 1000 / ave_msec; printf("SpGEMM using CSR format (Hash): %f[GFLOPS], %f[ms]\n", flops, ave_msec); /* Numeric Only */ ave_msec = 0; for (i = 0; i < SpGEMM_TRI_NUM; i++) { cudaEventRecord(event[0], 0); SpGEMM_Hash_Numeric(a, b, c); cudaEventRecord(event[1], 0); cudaDeviceSynchronize(); cudaEventElapsedTime(&msec, event[0], event[1]); if (i > 0) { ave_msec += msec; } } ave_msec /= SpGEMM_TRI_NUM - 1; flops = (float)(flop_count) / 1000 / 1000 / ave_msec; printf("SpGEMM using CSR format (Hash, only numeric phase): %f[GFLOPS], %f[ms]\n", flops, ave_msec); c.memcpyDtH(); c.release_csr(); #ifdef sfDEBUG CSR<IT, VT> cusparse_c; SpGEMM_cuSPARSE(a, b, cusparse_c); if (c == cusparse_c) { std::cout << "HashSpGEMM is correctly executed" << std::endl; } std::cout << "Nnz of A: " << a.nnz << std::endl; std::cout << "Number of intermediate products: " << flop_count / 2 << std::endl; std::cout << "Nnz of C: " << c.nnz << std::endl; cusparse_c.release_cpu_csr(); #endif a.release_csr(); b.release_csr(); for (i = 0; i < 2; i++) { cudaEventDestroy(event[i]); } } /*Main Function*/ int main(int argc, char *argv[]) { CSR<IT, VT> a, b, c; /* Set CSR reding from MM file or generating random matrix */ std::cout << "Initialize Matrix A" << std::endl; std::cout << "Read matrix data from " << argv[1] << std::endl; a.init_data_from_mtx(argv[1]); std::cout << "Initialize Matrix B" << std::endl; std::cout << "Read matrix data from " << argv[1] << std::endl; b.init_data_from_mtx(argv[1]); /* Execution of SpGEMM on GPU */ spgemm_hash(a, b, c); a.release_cpu_csr(); b.release_cpu_csr(); c.release_cpu_csr(); return 0; }
9cd00dffd1d402cb433abee7633d823eee08da93.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /////////////////////////////////////////////////////////////////////////////// // // The MIT License // // Copyright (c) 2006 Scientific Computing and Imaging Institute, // University of Utah (USA) // // License for the specific language governing rights and limitations under // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // /////////////////////////////////////////////////////////////////////////////// #ifndef ELVIS_CORE_ISOSURFACE_MODULE_CUDA_CU #define ELVIS_CORE_ISOSURFACE_MODULE_CUDA_CU #include <ElVis/Core/Jacobi.hpp> #include <ElVis/Core/matrix.cu> #include <ElVis/Core/Cuda.h> #include <ElVis/Core/Interval.hpp> class OrthogonalLegendreBasis { public: __device__ static ElVisFloat Eval(unsigned int i, const ElVisFloat& x) { return Sqrtf((MAKE_FLOAT(2.0) * i + MAKE_FLOAT(1.0)) / MAKE_FLOAT(2.0)) * ElVis::OrthoPoly::P(i, 0, 0, x); } }; template <typename FuncType> __device__ void GenerateLeastSquaresPolynomialProjection( unsigned int order, const ElVisFloat* __restrict__ allNodes, const ElVisFloat* __restrict__ allWeights, const FuncType& f, ElVisFloat* workspace, ElVisFloat* coeffs) { // Nodes and weights start with two point rules unsigned int index = (order - 1) * (order); index = index >> 1; index += order - 1; // ELVIS_PRINTF("Index %d\n", index); const ElVisFloat* nodes = &allNodes[index]; const ElVisFloat* weights = &allWeights[index]; for (unsigned int j = 0; j <= order; ++j) { workspace[j] = f(nodes[j]); } for (unsigned int c_index = 0; c_index <= order; ++c_index) { coeffs[c_index] = MAKE_FLOAT(0.0); for (unsigned int k = 0; k <= order; ++k) { // ELVIS_PRINTF("K %d, node %2.15f, weight %2.15f, sample // %2.15f, basis %2.15f\n", // k, nodes[k], weights[k], workspace[k], // OrthogonalLegendreBasis::Eval(c_index, nodes[k])); coeffs[c_index] += workspace[k] * OrthogonalLegendreBasis::Eval(c_index, nodes[k]) * weights[k]; } } } template <typename FuncType> __device__ void GenerateLeastSquaresPolynomialProjectionParallel( unsigned int order, const ElVisFloat* __restrict__ allNodes, const ElVisFloat* __restrict__ allWeights, const FuncType& f, ElVisFloat* workspace, ElVisFloat* coeffs) { } __device__ ElVisFloat& AccessArray(ElVisFloat* a, int i, int j, int n) { return a[i * n + j]; } template <typename T1, typename T2> __device__ T1 SIGN(const T1& a, const T2& b) { return b >= 0 ? (a >= 0 ? a : -a) : (a >= 0 ? -a : a); } __device__ void balance(SquareMatrix& a) { int n = a.GetSize(); const ElVisFloat RADIX = 2; bool done = false; ElVisFloat sqrdx = RADIX * RADIX; while (!done) { done = true; for (int i = 0; i < n; i++) { ElVisFloat r = 0.0, c = 0.0; for (int j = 0; j < n; j++) { if (j != i) { c += abs(a(j, i)); r += abs(a(i, j)); } } if (c != 0.0 && r != 0.0) { ElVisFloat g = r / RADIX; ElVisFloat f = 1.0; ElVisFloat s = c + r; while (c < g) { f *= RADIX; c *= sqrdx; } g = r * RADIX; while (c > g) { f /= RADIX; c /= sqrdx; } if ((c + r) / f < 0.95 * s) { done = false; g = 1.0 / f; // scale[i] *= f; for (int j = 0; j < n; j++) a(i, j) *= g; for (int j = 0; j < n; j++) a(j, i) *= f; } } } } } // returns roots in wri. Since we don't care about complex roots, they are just // set to -1.0 __device__ void hqr(SquareMatrix& a, int n, ElVisFloat* wri) { int nn, m, l, k, j, its, i, mmin; ElVisFloat z, y, x, w, v, u, t, s, r, q, p, anorm = MAKE_FLOAT(0.0); const ElVisFloat EPS = MAKE_FLOAT(1e-8); for (i = 0; i < n; i++) { for (j = max(i - 1, 0); j < n; j++) { anorm += abs(a(i, j)); } } nn = n - 1; t = 0.0; while (nn >= 0) { its = 0; do { for (l = nn; l > 0; l--) { s = abs(a(l - 1, l - 1)) + abs(a(l, l)); if (s == 0.0) s = anorm; if (abs(a(l, l - 1)) <= EPS * s) { a(l, l - 1) = 0.0; break; } } x = a(nn, nn); if (l == nn) { wri[nn--] = x + t; } else { y = a(nn - 1, nn - 1); w = a(nn, nn - 1) * a(nn - 1, nn); if (l == nn - 1) { p = 0.5 * (y - x); q = p * p + w; z = sqrt(abs(q)); x += t; if (q >= 0.0) { z = p + SIGN(z, p); wri[nn - 1] = wri[nn] = x + z; if (z != 0.0) wri[nn] = x - w / z; } else { // wri[nn]=Complex(x+p,-z); // wri[nn-1]=conj(wri[nn]); wri[nn] = MAKE_FLOAT(-10.0); wri[nn - 1] = MAKE_FLOAT(-10.0); } nn -= 2; } else { if (its == 30) return; if (its == 10 || its == 20) { t += x; for (i = 0; i < nn + 1; i++) a(i, i) -= x; s = abs(a(nn, nn - 1)) + abs(a(nn - 1, nn - 2)); y = x = 0.75 * s; w = -0.4375 * s * s; } ++its; for (m = nn - 2; m >= l; m--) { z = a(m, m); r = x - z; s = y - z; p = (r * s - w) / a(m + 1, m) + a(m, m + 1); q = a(m + 1, m + 1) - z - r - s; r = a(m + 2, m + 1); s = abs(p) + abs(q) + abs(r); p /= s; q /= s; r /= s; if (m == l) break; u = abs(a(m, m - 1)) * (abs(q) + abs(r)); v = abs(p) * (abs(a(m - 1, m - 1)) + abs(z) + abs(a(m + 1, m + 1))); if (u <= EPS * v) break; } for (i = m; i < nn - 1; i++) { a(i + 2, i) = 0.0; if (i != m) a(i + 2, i - 1) = 0.0; } for (k = m; k < nn; k++) { if (k != m) { p = a(k, k - 1); q = a(k + 1, k - 1); r = 0.0; if (k + 1 != nn) r = a(k + 2, k - 1); if ((x = abs(p) + abs(q) + abs(r)) != 0.0) { p /= x; q /= x; r /= x; } } if ((s = SIGN(sqrt(p * p + q * q + r * r), p)) != 0.0) { if (k == m) { if (l != m) a(k, k - 1) = -a(k, k - 1); } else { a(k, k - 1) = -s * x; } p += s; x = p / s; y = q / s; z = r / s; q /= p; r /= p; for (j = k; j < nn + 1; j++) { p = a(k, j) + q * a(k + 1, j); if (k + 1 != nn) { p += r * a(k + 2, j); a(k + 2, j) -= p * z; } a(k + 1, j) -= p * y; a(k, j) -= p * x; } mmin = nn < k + 3 ? nn : k + 3; for (i = l; i < mmin + 1; i++) { p = x * a(i, k) + y * a(i, k + 1); if (k + 1 != nn) { p += z * a(i, k + 2); a(i, k + 2) -= p * r; } a(i, k + 1) -= p * q; a(i, k) -= p; } } } } } } while (l + 1 < nn); } } struct IsosurfaceFieldEvaluator { public: ELVIS_DEVICE IsosurfaceFieldEvaluator() : Origin(), Direction(), A(), B(), ElementId(0), ElementType(0), ReferencePointType(ElVis::eReferencePointIsInvalid), InitialGuess() { } __device__ ElVisFloat operator()(const ElVisFloat& t) const { // Incoming t is [-1..1], we need to scale to [A,B] ElVisFloat scaledT = (t + MAKE_FLOAT(1.0)) / MAKE_FLOAT(2.0) * (B - A) + A; ElVisFloat3 p = Origin + scaledT * Direction; ElVisFloat s = EvaluateFieldCuda( ElementId, ElementType, FieldId, p, ReferencePointType, InitialGuess); ReferencePointType = ElVis::eReferencePointIsInitialGuess; return s; } ElVisFloat3 Origin; ElVisFloat3 Direction; ElVisFloat A; ElVisFloat B; unsigned int ElementId; unsigned int ElementType; int FieldId; mutable ElVis::ReferencePointParameterType ReferencePointType; mutable ElVisFloat3 InitialGuess; private: IsosurfaceFieldEvaluator(const IsosurfaceFieldEvaluator& rhs); IsosurfaceFieldEvaluator& operator=(const IsosurfaceFieldEvaluator& rhs); }; __device__ void GenerateRowMajorHessenbergMatrix( const ElVisFloat* monomialCoefficients, int n, SquareMatrix& h) { // First row for (int column = 0; column < n - 1; ++column) { h(0, column) = MAKE_FLOAT(0.0); } for (int row = 1; row < n; ++row) { for (int column = 0; column < n - 1; ++column) { if (row == column + 1) { h(row, column) = MAKE_FLOAT(1.0); } else { h(row, column) = MAKE_FLOAT(0.0); } } } ElVisFloat inverse = MAKE_FLOAT(-1.0) / monomialCoefficients[n]; for (int row = 0; row < n; ++row) { h(row, n - 1) = monomialCoefficients[row] * inverse; } } __device__ void ConvertToMonomial(unsigned int order, ElVisFloat* monomialConversionBuffer, const ElVisFloat* legendreCoeffs, ElVisFloat* monomialCoeffs) { int tableIndex = 0; for (int i = 2; i <= order; ++i) { tableIndex += i * i; } // ELVIS_PRINTF("Table Index %d\n", tableIndex); SquareMatrix m(&monomialConversionBuffer[tableIndex], order + 1); // Now that we have the coefficient table we can convert. for (unsigned int coeffIndex = 0; coeffIndex <= order; ++coeffIndex) { monomialCoeffs[coeffIndex] = MAKE_FLOAT(0.0); for (unsigned int legCoeffIndex = 0; legCoeffIndex <= order; ++legCoeffIndex) { // ElVisFloat multiplier = // AccessArray(buffer,legCoeffIndex,coeffIndex,order+1); // ELVIS_PRINTF("Legendre Coeff %2.15f, multiplier %2.15f\n", // legendreCoeffs[legCoeffIndex], multiplier); monomialCoeffs[coeffIndex] += legendreCoeffs[legCoeffIndex] * m(legCoeffIndex, coeffIndex); } } } __device__ void PrintMatrix(SquareMatrix& m) { // for(unsigned int row = 0; row < m.GetSize(); ++row) // { // for(unsigned int column = 0; column < m.GetSize(); ++column) // { // ELVIS_PRINTF("%2.15f, ", m(row, column)); // } // ELVIS_PRINTF("\n"); // } } extern "C" __global__ void CopyToElementId( const int* __restrict__ elementIdBuffer, const int* __restrict__ elementTypeBuffer, ElVis::ElementId* out, bool enableTrace, int tracex, int tracey, int bufferSize) { int index = threadIdx.x + blockDim.x * blockIdx.x; if (index >= bufferSize) return; out[index].Id = elementIdBuffer[index]; out[index].Type = elementTypeBuffer[index]; } extern "C" __global__ void FindIsosurfaceInSegment( ElVisFloat3 origin, const int* __restrict__ segmentElementId, const int* __restrict__ segmentElementType, const int* __restrict__ segmentdIdBuffer, const ElVisFloat3* __restrict__ segmentDirection, const ElVisFloat* __restrict__ segmentStart, const ElVisFloat* __restrict__ segmentEnd, int fieldId, int numIsosurfaces, const ElVisFloat* __restrict__ isovalues, bool enableTrace, int tracex, int tracey, int screen_x, int screen_y, const ElVisFloat* __restrict__ gaussNodes, const ElVisFloat* __restrict__ gaussWeights, ElVisFloat* __restrict__ monomialConversionTable, ElVisFloat* __restrict__ SampleBuffer, ElVisFloat3* __restrict__ intersection_buffer) { if (numIsosurfaces == 0) return; int2 trace = make_int2(tracex, tracey); uint2 pixel; pixel.x = blockIdx.x * blockDim.x + threadIdx.x; pixel.y = blockIdx.y * blockDim.y + threadIdx.y; bool traceEnabled = (pixel.x == trace.x && pixel.y == trace.y && enableTrace); if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Find Isosurface .\n"); } uint2 screen; screen.x = screen_x; screen.y = screen_y; // screen.x = gridDim.x * blockDim.x; // screen.y = gridDim.y * blockDim.y; if (pixel.x >= screen.x || pixel.y >= screen.y) { return; } int pixelIndex = pixel.x + screen.x * pixel.y; int segmentIndex = segmentdIdBuffer[pixelIndex]; if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Segment index %d, pixel index // %d\n", segmentIndex, pixelIndex); } if (segmentEnd[segmentIndex] < MAKE_FLOAT(0.0)) { if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Exiting because ray has left // volume based on segment end\n", segmentIndex); } return; } int elementId = segmentElementId[segmentIndex]; if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Element id %d\n", elementId); } if (elementId == -1) { if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Exiting because element id is // 0\n", segmentIndex); } return; } int elementTypeId = segmentElementType[segmentIndex]; ElVisFloat a = segmentStart[segmentIndex]; ElVisFloat b = segmentEnd[segmentIndex]; ElVisFloat3 rayDirection = segmentDirection[segmentIndex]; ElVisFloat d = (b - a); if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Ray Direction (%2.10f, %2.10f, // %2.10f), segment distance %2.10f and endopints [%2.10f, %2.10f]\n", // rayDirection.x, rayDirection.y, rayDirection.z, d, a, b); } if (d == MAKE_FLOAT(0.0)) { if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Exiting because d is 0\n", // rayDirection.x, rayDirection.y, rayDirection.z, d); } return; } ElVisFloat bestDepth = depth_buffer[segmentIndex]; if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Best Depth %2.10f and a %2.10f\n", // bestDepth, a); } // if( bestDepth <= a ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because existing depth value %2.10f exists // before segment start %2.10f\n", bestDepth, a); // } // return; // } ElVisFloat3 p0 = origin + a * rayDirection; ElVisFloat3 p1 = origin + b * rayDirection; ElVis::Interval<ElVisFloat> range; EstimateRangeCuda(elementId, elementTypeId, fieldId, p0, p1, range); if (traceEnabled) { // ELVIS_PRINTF("Range of scalar field is (%2.10f, %2.10f)\n", // range.GetLow(), range.GetHigh()); // ELVIS_PRINTF("Origin (%f, %f, %f)\n", origin.x, origin.y, origin.z); // ELVIS_PRINTF("Direction (%f, %f, %f)\n", rayDirection.x, rayDirection.y, // rayDirection.z); // ELVIS_PRINTF("Integration domain [%f, %f]\n", a, b); } for (int isosurfaceId = 0; isosurfaceId < numIsosurfaces; ++isosurfaceId) { if (!range.IsEmpty() && !range.Contains(isovalues[isosurfaceId])) { continue; } if (traceEnabled) { // ELVIS_PRINTF("Searching for isovalue %f\n", isovalues[isosurfaceId]); } // Project onto a polynomial along the ray. // Generate an nth order polynomial projection. // First pass, create an mth element local array to store the value, and // exit out if the required order is // too large. ElVisFloat polynomialCoefficients[32]; ElVisFloat monomialCoefficients[32]; ElVisFloat workspace[32]; ElVisFloat h_data[10 * 10]; int requiredOrder = 8; for (int i = 0; i < 32; ++i) { polynomialCoefficients[i] = -73.45; workspace[i] = -73.45; monomialCoefficients[i] = -73.45; } IsosurfaceFieldEvaluator f; f.Origin = origin; f.Direction = rayDirection; f.ElementId = elementId; f.ElementType = elementTypeId; f.A = a; f.B = b; f.FieldId = fieldId; GenerateLeastSquaresPolynomialProjection(requiredOrder, gaussNodes, gaussWeights, f, workspace, polynomialCoefficients); if (traceEnabled) { // ELVIS_PRINTF("Legendre %2.15f, %2.15f, %2.15f, %2.15f, // %2.15f, %2.15f, %2.15f\n", // polynomialCoefficients[0], // polynomialCoefficients[1], // polynomialCoefficients[2], // polynomialCoefficients[3], // polynomialCoefficients[4], // polynomialCoefficients[5], // polynomialCoefficients[6], // polynomialCoefficients[7], // polynomialCoefficients[8]); } // Fix up the polynomial order if we requested higher than necessary. int reducedOrder = requiredOrder; ElVisFloat epsilon = MAKE_FLOAT(1e-8); for (int i = requiredOrder; i >= 1; --i) { if (Fabsf(polynomialCoefficients[i]) > epsilon) { reducedOrder = i; break; } } if (traceEnabled) { // ELVIS_PRINTF("Reduced order %d\n", reducedOrder ); } ConvertToMonomial(reducedOrder, monomialConversionTable, polynomialCoefficients, monomialCoefficients); if (traceEnabled) { // ELVIS_PRINTF("Monomial %2.15f, %2.15f, %2.15f, %2.15f, // %2.15f, %2.15f, %2.15f\n", // monomialCoefficients[0], // monomialCoefficients[1], // monomialCoefficients[2], // monomialCoefficients[3], // monomialCoefficients[4], // monomialCoefficients[5], // monomialCoefficients[6], // monomialCoefficients[7], // monomialCoefficients[8]); } monomialCoefficients[0] -= isovalues[isosurfaceId]; SquareMatrix h(h_data, reducedOrder); GenerateRowMajorHessenbergMatrix(monomialCoefficients, reducedOrder, h); // if( traceEnabled ) // { // ELVIS_PRINTF("Before balancing.\n"); // PrintMatrix(h); // } balance(h); // if( traceEnabled ) // { // ELVIS_PRINTF("After balancing.\n"); // PrintMatrix(h); // } ElVisFloat roots[8]; for (int i = 0; i < 8; ++i) { roots[i] = -4582.23; } hqr(h, reducedOrder, roots); // if( traceEnabled ) // { // ELVIS_PRINTF("Roots %2.15f, %2.15f, %2.15f, %2.15f, %2.15f, // %2.15f\n", // roots[0], // roots[1], // roots[2], // roots[3], // roots[4], // roots[5]); // } ElVisFloat foundRoot = ELVIS_FLOAT_MAX; for (int i = 0; i < reducedOrder; ++i) { ElVisFloat root = roots[i]; if (root >= MAKE_FLOAT(-1.0) && root <= MAKE_FLOAT(1.0) && root <= foundRoot) { ElVisFloat foundT = (root + MAKE_FLOAT(1.0)) / MAKE_FLOAT(2.0) * (f.B - f.A) + f.A; if (foundT < bestDepth) { foundRoot = root; } } } if (foundRoot != ELVIS_FLOAT_MAX) { ElVisFloat foundT = (foundRoot + MAKE_FLOAT(1.0)) / MAKE_FLOAT(2.0) * (f.B - f.A) + f.A; ElVisFloat3 foundIntersectionPoint = origin + foundT * rayDirection; intersection_buffer[segmentIndex] = foundIntersectionPoint; SampleBuffer[segmentIndex] = EvaluateFieldCuda( elementId, elementTypeId, fieldId, foundIntersectionPoint); if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: ######################## Found // root %2.15f, in world %2.15f with value %f \n", foundRoot, foundT, // SampleBuffer[segmentIndex]); } EvaluateNormalCuda(elementId, elementTypeId, fieldId, foundIntersectionPoint, normal_buffer[segmentIndex]); depth_buffer[segmentIndex] = foundT; bestDepth = foundT; // // This depth buffer is wrong, need accumulated. // depth_buffer[launch_index] = (far+near)/(far-near) - 2.0f/foundT // * far*near/(far-near); // depth_buffer[launch_index] = // (depth_buffer[launch_index]+1.0)/2.0; } } } #endif // ELVIS_CORE_ISOSURFACE_MODULE_CUDA_CU
9cd00dffd1d402cb433abee7633d823eee08da93.cu
/////////////////////////////////////////////////////////////////////////////// // // The MIT License // // Copyright (c) 2006 Scientific Computing and Imaging Institute, // University of Utah (USA) // // License for the specific language governing rights and limitations under // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. // /////////////////////////////////////////////////////////////////////////////// #ifndef ELVIS_CORE_ISOSURFACE_MODULE_CUDA_CU #define ELVIS_CORE_ISOSURFACE_MODULE_CUDA_CU #include <ElVis/Core/Jacobi.hpp> #include <ElVis/Core/matrix.cu> #include <ElVis/Core/Cuda.h> #include <ElVis/Core/Interval.hpp> class OrthogonalLegendreBasis { public: __device__ static ElVisFloat Eval(unsigned int i, const ElVisFloat& x) { return Sqrtf((MAKE_FLOAT(2.0) * i + MAKE_FLOAT(1.0)) / MAKE_FLOAT(2.0)) * ElVis::OrthoPoly::P(i, 0, 0, x); } }; template <typename FuncType> __device__ void GenerateLeastSquaresPolynomialProjection( unsigned int order, const ElVisFloat* __restrict__ allNodes, const ElVisFloat* __restrict__ allWeights, const FuncType& f, ElVisFloat* workspace, ElVisFloat* coeffs) { // Nodes and weights start with two point rules unsigned int index = (order - 1) * (order); index = index >> 1; index += order - 1; // ELVIS_PRINTF("Index %d\n", index); const ElVisFloat* nodes = &allNodes[index]; const ElVisFloat* weights = &allWeights[index]; for (unsigned int j = 0; j <= order; ++j) { workspace[j] = f(nodes[j]); } for (unsigned int c_index = 0; c_index <= order; ++c_index) { coeffs[c_index] = MAKE_FLOAT(0.0); for (unsigned int k = 0; k <= order; ++k) { // ELVIS_PRINTF("K %d, node %2.15f, weight %2.15f, sample // %2.15f, basis %2.15f\n", // k, nodes[k], weights[k], workspace[k], // OrthogonalLegendreBasis::Eval(c_index, nodes[k])); coeffs[c_index] += workspace[k] * OrthogonalLegendreBasis::Eval(c_index, nodes[k]) * weights[k]; } } } template <typename FuncType> __device__ void GenerateLeastSquaresPolynomialProjectionParallel( unsigned int order, const ElVisFloat* __restrict__ allNodes, const ElVisFloat* __restrict__ allWeights, const FuncType& f, ElVisFloat* workspace, ElVisFloat* coeffs) { } __device__ ElVisFloat& AccessArray(ElVisFloat* a, int i, int j, int n) { return a[i * n + j]; } template <typename T1, typename T2> __device__ T1 SIGN(const T1& a, const T2& b) { return b >= 0 ? (a >= 0 ? a : -a) : (a >= 0 ? -a : a); } __device__ void balance(SquareMatrix& a) { int n = a.GetSize(); const ElVisFloat RADIX = 2; bool done = false; ElVisFloat sqrdx = RADIX * RADIX; while (!done) { done = true; for (int i = 0; i < n; i++) { ElVisFloat r = 0.0, c = 0.0; for (int j = 0; j < n; j++) { if (j != i) { c += abs(a(j, i)); r += abs(a(i, j)); } } if (c != 0.0 && r != 0.0) { ElVisFloat g = r / RADIX; ElVisFloat f = 1.0; ElVisFloat s = c + r; while (c < g) { f *= RADIX; c *= sqrdx; } g = r * RADIX; while (c > g) { f /= RADIX; c /= sqrdx; } if ((c + r) / f < 0.95 * s) { done = false; g = 1.0 / f; // scale[i] *= f; for (int j = 0; j < n; j++) a(i, j) *= g; for (int j = 0; j < n; j++) a(j, i) *= f; } } } } } // returns roots in wri. Since we don't care about complex roots, they are just // set to -1.0 __device__ void hqr(SquareMatrix& a, int n, ElVisFloat* wri) { int nn, m, l, k, j, its, i, mmin; ElVisFloat z, y, x, w, v, u, t, s, r, q, p, anorm = MAKE_FLOAT(0.0); const ElVisFloat EPS = MAKE_FLOAT(1e-8); for (i = 0; i < n; i++) { for (j = max(i - 1, 0); j < n; j++) { anorm += abs(a(i, j)); } } nn = n - 1; t = 0.0; while (nn >= 0) { its = 0; do { for (l = nn; l > 0; l--) { s = abs(a(l - 1, l - 1)) + abs(a(l, l)); if (s == 0.0) s = anorm; if (abs(a(l, l - 1)) <= EPS * s) { a(l, l - 1) = 0.0; break; } } x = a(nn, nn); if (l == nn) { wri[nn--] = x + t; } else { y = a(nn - 1, nn - 1); w = a(nn, nn - 1) * a(nn - 1, nn); if (l == nn - 1) { p = 0.5 * (y - x); q = p * p + w; z = sqrt(abs(q)); x += t; if (q >= 0.0) { z = p + SIGN(z, p); wri[nn - 1] = wri[nn] = x + z; if (z != 0.0) wri[nn] = x - w / z; } else { // wri[nn]=Complex(x+p,-z); // wri[nn-1]=conj(wri[nn]); wri[nn] = MAKE_FLOAT(-10.0); wri[nn - 1] = MAKE_FLOAT(-10.0); } nn -= 2; } else { if (its == 30) return; if (its == 10 || its == 20) { t += x; for (i = 0; i < nn + 1; i++) a(i, i) -= x; s = abs(a(nn, nn - 1)) + abs(a(nn - 1, nn - 2)); y = x = 0.75 * s; w = -0.4375 * s * s; } ++its; for (m = nn - 2; m >= l; m--) { z = a(m, m); r = x - z; s = y - z; p = (r * s - w) / a(m + 1, m) + a(m, m + 1); q = a(m + 1, m + 1) - z - r - s; r = a(m + 2, m + 1); s = abs(p) + abs(q) + abs(r); p /= s; q /= s; r /= s; if (m == l) break; u = abs(a(m, m - 1)) * (abs(q) + abs(r)); v = abs(p) * (abs(a(m - 1, m - 1)) + abs(z) + abs(a(m + 1, m + 1))); if (u <= EPS * v) break; } for (i = m; i < nn - 1; i++) { a(i + 2, i) = 0.0; if (i != m) a(i + 2, i - 1) = 0.0; } for (k = m; k < nn; k++) { if (k != m) { p = a(k, k - 1); q = a(k + 1, k - 1); r = 0.0; if (k + 1 != nn) r = a(k + 2, k - 1); if ((x = abs(p) + abs(q) + abs(r)) != 0.0) { p /= x; q /= x; r /= x; } } if ((s = SIGN(sqrt(p * p + q * q + r * r), p)) != 0.0) { if (k == m) { if (l != m) a(k, k - 1) = -a(k, k - 1); } else { a(k, k - 1) = -s * x; } p += s; x = p / s; y = q / s; z = r / s; q /= p; r /= p; for (j = k; j < nn + 1; j++) { p = a(k, j) + q * a(k + 1, j); if (k + 1 != nn) { p += r * a(k + 2, j); a(k + 2, j) -= p * z; } a(k + 1, j) -= p * y; a(k, j) -= p * x; } mmin = nn < k + 3 ? nn : k + 3; for (i = l; i < mmin + 1; i++) { p = x * a(i, k) + y * a(i, k + 1); if (k + 1 != nn) { p += z * a(i, k + 2); a(i, k + 2) -= p * r; } a(i, k + 1) -= p * q; a(i, k) -= p; } } } } } } while (l + 1 < nn); } } struct IsosurfaceFieldEvaluator { public: ELVIS_DEVICE IsosurfaceFieldEvaluator() : Origin(), Direction(), A(), B(), ElementId(0), ElementType(0), ReferencePointType(ElVis::eReferencePointIsInvalid), InitialGuess() { } __device__ ElVisFloat operator()(const ElVisFloat& t) const { // Incoming t is [-1..1], we need to scale to [A,B] ElVisFloat scaledT = (t + MAKE_FLOAT(1.0)) / MAKE_FLOAT(2.0) * (B - A) + A; ElVisFloat3 p = Origin + scaledT * Direction; ElVisFloat s = EvaluateFieldCuda( ElementId, ElementType, FieldId, p, ReferencePointType, InitialGuess); ReferencePointType = ElVis::eReferencePointIsInitialGuess; return s; } ElVisFloat3 Origin; ElVisFloat3 Direction; ElVisFloat A; ElVisFloat B; unsigned int ElementId; unsigned int ElementType; int FieldId; mutable ElVis::ReferencePointParameterType ReferencePointType; mutable ElVisFloat3 InitialGuess; private: IsosurfaceFieldEvaluator(const IsosurfaceFieldEvaluator& rhs); IsosurfaceFieldEvaluator& operator=(const IsosurfaceFieldEvaluator& rhs); }; __device__ void GenerateRowMajorHessenbergMatrix( const ElVisFloat* monomialCoefficients, int n, SquareMatrix& h) { // First row for (int column = 0; column < n - 1; ++column) { h(0, column) = MAKE_FLOAT(0.0); } for (int row = 1; row < n; ++row) { for (int column = 0; column < n - 1; ++column) { if (row == column + 1) { h(row, column) = MAKE_FLOAT(1.0); } else { h(row, column) = MAKE_FLOAT(0.0); } } } ElVisFloat inverse = MAKE_FLOAT(-1.0) / monomialCoefficients[n]; for (int row = 0; row < n; ++row) { h(row, n - 1) = monomialCoefficients[row] * inverse; } } __device__ void ConvertToMonomial(unsigned int order, ElVisFloat* monomialConversionBuffer, const ElVisFloat* legendreCoeffs, ElVisFloat* monomialCoeffs) { int tableIndex = 0; for (int i = 2; i <= order; ++i) { tableIndex += i * i; } // ELVIS_PRINTF("Table Index %d\n", tableIndex); SquareMatrix m(&monomialConversionBuffer[tableIndex], order + 1); // Now that we have the coefficient table we can convert. for (unsigned int coeffIndex = 0; coeffIndex <= order; ++coeffIndex) { monomialCoeffs[coeffIndex] = MAKE_FLOAT(0.0); for (unsigned int legCoeffIndex = 0; legCoeffIndex <= order; ++legCoeffIndex) { // ElVisFloat multiplier = // AccessArray(buffer,legCoeffIndex,coeffIndex,order+1); // ELVIS_PRINTF("Legendre Coeff %2.15f, multiplier %2.15f\n", // legendreCoeffs[legCoeffIndex], multiplier); monomialCoeffs[coeffIndex] += legendreCoeffs[legCoeffIndex] * m(legCoeffIndex, coeffIndex); } } } __device__ void PrintMatrix(SquareMatrix& m) { // for(unsigned int row = 0; row < m.GetSize(); ++row) // { // for(unsigned int column = 0; column < m.GetSize(); ++column) // { // ELVIS_PRINTF("%2.15f, ", m(row, column)); // } // ELVIS_PRINTF("\n"); // } } extern "C" __global__ void CopyToElementId( const int* __restrict__ elementIdBuffer, const int* __restrict__ elementTypeBuffer, ElVis::ElementId* out, bool enableTrace, int tracex, int tracey, int bufferSize) { int index = threadIdx.x + blockDim.x * blockIdx.x; if (index >= bufferSize) return; out[index].Id = elementIdBuffer[index]; out[index].Type = elementTypeBuffer[index]; } extern "C" __global__ void FindIsosurfaceInSegment( ElVisFloat3 origin, const int* __restrict__ segmentElementId, const int* __restrict__ segmentElementType, const int* __restrict__ segmentdIdBuffer, const ElVisFloat3* __restrict__ segmentDirection, const ElVisFloat* __restrict__ segmentStart, const ElVisFloat* __restrict__ segmentEnd, int fieldId, int numIsosurfaces, const ElVisFloat* __restrict__ isovalues, bool enableTrace, int tracex, int tracey, int screen_x, int screen_y, const ElVisFloat* __restrict__ gaussNodes, const ElVisFloat* __restrict__ gaussWeights, ElVisFloat* __restrict__ monomialConversionTable, ElVisFloat* __restrict__ SampleBuffer, ElVisFloat3* __restrict__ intersection_buffer) { if (numIsosurfaces == 0) return; int2 trace = make_int2(tracex, tracey); uint2 pixel; pixel.x = blockIdx.x * blockDim.x + threadIdx.x; pixel.y = blockIdx.y * blockDim.y + threadIdx.y; bool traceEnabled = (pixel.x == trace.x && pixel.y == trace.y && enableTrace); if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Find Isosurface .\n"); } uint2 screen; screen.x = screen_x; screen.y = screen_y; // screen.x = gridDim.x * blockDim.x; // screen.y = gridDim.y * blockDim.y; if (pixel.x >= screen.x || pixel.y >= screen.y) { return; } int pixelIndex = pixel.x + screen.x * pixel.y; int segmentIndex = segmentdIdBuffer[pixelIndex]; if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Segment index %d, pixel index // %d\n", segmentIndex, pixelIndex); } if (segmentEnd[segmentIndex] < MAKE_FLOAT(0.0)) { if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Exiting because ray has left // volume based on segment end\n", segmentIndex); } return; } int elementId = segmentElementId[segmentIndex]; if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Element id %d\n", elementId); } if (elementId == -1) { if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Exiting because element id is // 0\n", segmentIndex); } return; } int elementTypeId = segmentElementType[segmentIndex]; ElVisFloat a = segmentStart[segmentIndex]; ElVisFloat b = segmentEnd[segmentIndex]; ElVisFloat3 rayDirection = segmentDirection[segmentIndex]; ElVisFloat d = (b - a); if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Ray Direction (%2.10f, %2.10f, // %2.10f), segment distance %2.10f and endopints [%2.10f, %2.10f]\n", // rayDirection.x, rayDirection.y, rayDirection.z, d, a, b); } if (d == MAKE_FLOAT(0.0)) { if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Exiting because d is 0\n", // rayDirection.x, rayDirection.y, rayDirection.z, d); } return; } ElVisFloat bestDepth = depth_buffer[segmentIndex]; if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: Best Depth %2.10f and a %2.10f\n", // bestDepth, a); } // if( bestDepth <= a ) // { // if( traceEnabled ) // { // ELVIS_PRINTF("Exiting because existing depth value %2.10f exists // before segment start %2.10f\n", bestDepth, a); // } // return; // } ElVisFloat3 p0 = origin + a * rayDirection; ElVisFloat3 p1 = origin + b * rayDirection; ElVis::Interval<ElVisFloat> range; EstimateRangeCuda(elementId, elementTypeId, fieldId, p0, p1, range); if (traceEnabled) { // ELVIS_PRINTF("Range of scalar field is (%2.10f, %2.10f)\n", // range.GetLow(), range.GetHigh()); // ELVIS_PRINTF("Origin (%f, %f, %f)\n", origin.x, origin.y, origin.z); // ELVIS_PRINTF("Direction (%f, %f, %f)\n", rayDirection.x, rayDirection.y, // rayDirection.z); // ELVIS_PRINTF("Integration domain [%f, %f]\n", a, b); } for (int isosurfaceId = 0; isosurfaceId < numIsosurfaces; ++isosurfaceId) { if (!range.IsEmpty() && !range.Contains(isovalues[isosurfaceId])) { continue; } if (traceEnabled) { // ELVIS_PRINTF("Searching for isovalue %f\n", isovalues[isosurfaceId]); } // Project onto a polynomial along the ray. // Generate an nth order polynomial projection. // First pass, create an mth element local array to store the value, and // exit out if the required order is // too large. ElVisFloat polynomialCoefficients[32]; ElVisFloat monomialCoefficients[32]; ElVisFloat workspace[32]; ElVisFloat h_data[10 * 10]; int requiredOrder = 8; for (int i = 0; i < 32; ++i) { polynomialCoefficients[i] = -73.45; workspace[i] = -73.45; monomialCoefficients[i] = -73.45; } IsosurfaceFieldEvaluator f; f.Origin = origin; f.Direction = rayDirection; f.ElementId = elementId; f.ElementType = elementTypeId; f.A = a; f.B = b; f.FieldId = fieldId; GenerateLeastSquaresPolynomialProjection(requiredOrder, gaussNodes, gaussWeights, f, workspace, polynomialCoefficients); if (traceEnabled) { // ELVIS_PRINTF("Legendre %2.15f, %2.15f, %2.15f, %2.15f, // %2.15f, %2.15f, %2.15f\n", // polynomialCoefficients[0], // polynomialCoefficients[1], // polynomialCoefficients[2], // polynomialCoefficients[3], // polynomialCoefficients[4], // polynomialCoefficients[5], // polynomialCoefficients[6], // polynomialCoefficients[7], // polynomialCoefficients[8]); } // Fix up the polynomial order if we requested higher than necessary. int reducedOrder = requiredOrder; ElVisFloat epsilon = MAKE_FLOAT(1e-8); for (int i = requiredOrder; i >= 1; --i) { if (Fabsf(polynomialCoefficients[i]) > epsilon) { reducedOrder = i; break; } } if (traceEnabled) { // ELVIS_PRINTF("Reduced order %d\n", reducedOrder ); } ConvertToMonomial(reducedOrder, monomialConversionTable, polynomialCoefficients, monomialCoefficients); if (traceEnabled) { // ELVIS_PRINTF("Monomial %2.15f, %2.15f, %2.15f, %2.15f, // %2.15f, %2.15f, %2.15f\n", // monomialCoefficients[0], // monomialCoefficients[1], // monomialCoefficients[2], // monomialCoefficients[3], // monomialCoefficients[4], // monomialCoefficients[5], // monomialCoefficients[6], // monomialCoefficients[7], // monomialCoefficients[8]); } monomialCoefficients[0] -= isovalues[isosurfaceId]; SquareMatrix h(h_data, reducedOrder); GenerateRowMajorHessenbergMatrix(monomialCoefficients, reducedOrder, h); // if( traceEnabled ) // { // ELVIS_PRINTF("Before balancing.\n"); // PrintMatrix(h); // } balance(h); // if( traceEnabled ) // { // ELVIS_PRINTF("After balancing.\n"); // PrintMatrix(h); // } ElVisFloat roots[8]; for (int i = 0; i < 8; ++i) { roots[i] = -4582.23; } hqr(h, reducedOrder, roots); // if( traceEnabled ) // { // ELVIS_PRINTF("Roots %2.15f, %2.15f, %2.15f, %2.15f, %2.15f, // %2.15f\n", // roots[0], // roots[1], // roots[2], // roots[3], // roots[4], // roots[5]); // } ElVisFloat foundRoot = ELVIS_FLOAT_MAX; for (int i = 0; i < reducedOrder; ++i) { ElVisFloat root = roots[i]; if (root >= MAKE_FLOAT(-1.0) && root <= MAKE_FLOAT(1.0) && root <= foundRoot) { ElVisFloat foundT = (root + MAKE_FLOAT(1.0)) / MAKE_FLOAT(2.0) * (f.B - f.A) + f.A; if (foundT < bestDepth) { foundRoot = root; } } } if (foundRoot != ELVIS_FLOAT_MAX) { ElVisFloat foundT = (foundRoot + MAKE_FLOAT(1.0)) / MAKE_FLOAT(2.0) * (f.B - f.A) + f.A; ElVisFloat3 foundIntersectionPoint = origin + foundT * rayDirection; intersection_buffer[segmentIndex] = foundIntersectionPoint; SampleBuffer[segmentIndex] = EvaluateFieldCuda( elementId, elementTypeId, fieldId, foundIntersectionPoint); if (traceEnabled) { // ELVIS_PRINTF("FindIsosurfaceInSegment: ######################## Found // root %2.15f, in world %2.15f with value %f \n", foundRoot, foundT, // SampleBuffer[segmentIndex]); } EvaluateNormalCuda(elementId, elementTypeId, fieldId, foundIntersectionPoint, normal_buffer[segmentIndex]); depth_buffer[segmentIndex] = foundT; bestDepth = foundT; // // This depth buffer is wrong, need accumulated. // depth_buffer[launch_index] = (far+near)/(far-near) - 2.0f/foundT // * far*near/(far-near); // depth_buffer[launch_index] = // (depth_buffer[launch_index]+1.0)/2.0; } } } #endif // ELVIS_CORE_ISOSURFACE_MODULE_CUDA_CU
b1a52294100495e8b0d702cdf035721d1f82999a.hip
// !!! This is a file automatically generated by hipify!!! #include "SamplingKernel.cuh" #define gpuErr(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void SampleKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState) { //unsigned int blockCounter = 0; unsigned int* deviceCounter; hipMalloc(&deviceCounter, sizeof(unsigned int)); hipMemset(deviceCounter, 0, sizeof(unsigned int)); // srand(time(NULL)); // hiprandState_t* randState; // hipMalloc(&randState, sizeof(hiprandState_t)*GridDim*BlockDim); // H_ERR(hipDeviceSynchronize()); // gpuErr(hipPeekAtLastError()); //initRandState << <GridDim, BlockDim >> >(randState); H_ERR(hipDeviceSynchronize()); // for (int i = 0; i < iterWT; i++) { //hipMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), hipMemcpyHostToDevice); LDAKernelTrainD << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, deviceCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid, randState, argDoc.deviceWTHeadDense, argWTDen.numOfWordD, argDoc.tokenSegment); H_ERR(hipDeviceSynchronize()); } //(double alpha, double beta, int* d_Index, int* d_TopicIndex, int* d_SparseDTCount, int* d_SparseDTIndex, int* d_SparseDTValue, int* d_TokenCountDT, int* d_TokenOffsetDT, int* d_DocListCount, int* d_DocListOffset, int* d_WTDense, int* d_WTDenseCopy, int* d_TokenCount, int* d_TokenOffset, int* d_WordListCount, int* d_WordListOffset, int* d_WTRowSum, int* d_blockCounter, int*d_DocIndex, int D, int W, double* d_Perplexity, hiprandState_t *randState, double *WTHeadDense, int numOfWordD); void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState) { int numOfWordD = argWT.wordLength - argWT.numOfWordS; unsigned int* deviceCounter; hipMalloc(&deviceCounter, sizeof(unsigned int)); hipMemset(deviceCounter, 0, sizeof(unsigned int)); //initRandState << <GridDim, BlockDim >> >(randState); H_ERR(hipDeviceSynchronize()); LDAKernelTrain << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, deviceCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid, randState, argDoc.deviceWTHeadDense, numOfWordD, argWT.numOfWordS); H_ERR(hipDeviceSynchronize()); } void PerplexityKernel(Document &argDoc) { float* sumPerplexity; hipMalloc(&sumPerplexity, sizeof(float)); LDATrainPerplexityReduce << <1, BlockDim >> > (argDoc.devicePerplexityMid, argDoc.totalNumOfTokens, sumPerplexity); hipMemcpy(&argDoc.sumPerplexity, sumPerplexity, sizeof(float), hipMemcpyDeviceToHost); H_ERR(hipDeviceSynchronize()); } // //void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, hiprandState_t* randState) { // // int blockCounter = 0; // int iterWT = (argWT.numOfWordS - 1) / GridDim + 1; // float Perplexity = 0.0; // int numOfWordD = argWT.wordLength - argWT.numOfWordS; // // srand(time(NULL)); // // // hiprandState_t* randState; // // hipMalloc(&randState, sizeof(hiprandState_t)*GridDim*BlockDim); // // H_ERR(hipDeviceSynchronize()); // // gpuErr(hipPeekAtLastError()); // // initRandState << <GridDim, BlockDim >> >(randState); // H_ERR(hipDeviceSynchronize()); // // for (int i = 0; i < iterWT; i++) { // // hipMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), hipMemcpyHostToDevice); // // LDAKernelTrain << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexity, randState, argDoc.deviceWTHeadDense, numOfWordD, argWT.numOfWordS); // H_ERR(hipDeviceSynchronize()); // blockCounter++; // // } // LDATrainPerplexityReduce1 << <GridDim, BlockDim >> > (argDoc.devicePerplexity, argDoc.devicePerplexityMid, argDoc.TLLengthVec[argDT.chunkId]); // // H_ERR(hipDeviceSynchronize()); // // //} // // //
b1a52294100495e8b0d702cdf035721d1f82999a.cu
#include "SamplingKernel.cuh" #define gpuErr(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void SampleKernelD(WTD &argWTDen, WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState) { //unsigned int blockCounter = 0; unsigned int* deviceCounter; cudaMalloc(&deviceCounter, sizeof(unsigned int)); cudaMemset(deviceCounter, 0, sizeof(unsigned int)); // srand(time(NULL)); // curandState* randState; // cudaMalloc(&randState, sizeof(curandState)*GridDim*BlockDim); // H_ERR(cudaDeviceSynchronize()); // gpuErr(cudaPeekAtLastError()); //initRandState << <GridDim, BlockDim >> >(randState); H_ERR(cudaDeviceSynchronize()); // for (int i = 0; i < iterWT; i++) { //cudaMemcpy(deviceCounter, &blockCounter, sizeof(unsigned int), cudaMemcpyHostToDevice); LDAKernelTrainD << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWTDen.deviceWTDense, argWTDen.deviceWTDenseCopy, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, deviceCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid, randState, argDoc.deviceWTHeadDense, argWTDen.numOfWordD, argDoc.tokenSegment); H_ERR(cudaDeviceSynchronize()); } //(double alpha, double beta, int* d_Index, int* d_TopicIndex, int* d_SparseDTCount, int* d_SparseDTIndex, int* d_SparseDTValue, int* d_TokenCountDT, int* d_TokenOffsetDT, int* d_DocListCount, int* d_DocListOffset, int* d_WTDense, int* d_WTDenseCopy, int* d_TokenCount, int* d_TokenOffset, int* d_WordListCount, int* d_WordListOffset, int* d_WTRowSum, int* d_blockCounter, int*d_DocIndex, int D, int W, double* d_Perplexity, curandState *randState, double *WTHeadDense, int numOfWordD); void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState) { int numOfWordD = argWT.wordLength - argWT.numOfWordS; unsigned int* deviceCounter; cudaMalloc(&deviceCounter, sizeof(unsigned int)); cudaMemset(deviceCounter, 0, sizeof(unsigned int)); //initRandState << <GridDim, BlockDim >> >(randState); H_ERR(cudaDeviceSynchronize()); LDAKernelTrain << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, deviceCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexityMid, randState, argDoc.deviceWTHeadDense, numOfWordD, argWT.numOfWordS); H_ERR(cudaDeviceSynchronize()); } void PerplexityKernel(Document &argDoc) { float* sumPerplexity; cudaMalloc(&sumPerplexity, sizeof(float)); LDATrainPerplexityReduce << <1, BlockDim >> > (argDoc.devicePerplexityMid, argDoc.totalNumOfTokens, sumPerplexity); cudaMemcpy(&argDoc.sumPerplexity, sumPerplexity, sizeof(float), cudaMemcpyDeviceToHost); H_ERR(cudaDeviceSynchronize()); } // //void SampleKernel(WTAll &argWT, DTChunk &argDT, Document &argDoc, curandState* randState) { // // int blockCounter = 0; // int iterWT = (argWT.numOfWordS - 1) / GridDim + 1; // float Perplexity = 0.0; // int numOfWordD = argWT.wordLength - argWT.numOfWordS; // // srand(time(NULL)); // // // curandState* randState; // // cudaMalloc(&randState, sizeof(curandState)*GridDim*BlockDim); // // H_ERR(cudaDeviceSynchronize()); // // gpuErr(cudaPeekAtLastError()); // // initRandState << <GridDim, BlockDim >> >(randState); // H_ERR(cudaDeviceSynchronize()); // // for (int i = 0; i < iterWT; i++) { // // cudaMemcpy(argDoc.d_blockCounter, &blockCounter, sizeof(int), cudaMemcpyHostToDevice); // // LDAKernelTrain << <GridDim, BlockDim >> > (alpha, beta, argDoc.deviceMapWord2Doc, argDoc.deviceTLTopic, argDT.deviceNZDTCount, argDT.deviceDTIndex, argDT.deviceDTValue, argDoc.deviceTLDocCount, argDoc.deviceTLDocOffset, argDT.deviceDTCount, argDT.deviceDTOffset, argWT.deviceNZWTCount, argWT.deviceWTIndex, argWT.deviceWTValue, argDoc.deviceTLWordCount, argDoc.deviceTLWordOffset, argWT.deviceWTCount, argWT.deviceWTOffset, argWT.deviceWTRowSum, argDoc.d_blockCounter, argDoc.deviceMapDoc2Word, argDoc.docLengthVec[argDT.chunkId], argWT.wordLength, argDoc.devicePerplexity, randState, argDoc.deviceWTHeadDense, numOfWordD, argWT.numOfWordS); // H_ERR(cudaDeviceSynchronize()); // blockCounter++; // // } // LDATrainPerplexityReduce1 << <GridDim, BlockDim >> > (argDoc.devicePerplexity, argDoc.devicePerplexityMid, argDoc.TLLengthVec[argDT.chunkId]); // // H_ERR(cudaDeviceSynchronize()); // // //} // // //
539efe5ad5f0c2a0daa5bd289123fd93f10985a1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <dcgn/dcgn.h> #include <dcgn/CUDAFunctions.h> #include <cstdlib> #include <cstdio> const int MIN_SIZE = 1; const int MAX_SIZE = 1048576; const int ITERS = 30; __shared__ clock_t timers[11]; __global__ void kernel(void * gmem, clock_t * clocks, const dcgn::GPUInitRequest libParam) { dcgn::gpu::init(libParam); int index = 0; for (int i = MIN_SIZE; i <= MAX_SIZE; i *= 2) { dcgn::gpu::barrier(0); timers[index] = clock(); for (int j = 0; j < ITERS; ++j) { dcgn::gpu::send(0, 0, gmem, i); } dcgn::gpu::barrier(0); timers[index] = (clock() - timers[index]) / ITERS; ++index; } for (int i = 0; i < index; ++i) { clocks[i] = timers[i]; } } __host__ void gpuKernel(void * info, const dcgn::GPUInitRequest libParam, const uint3 & gridSize, const uint3 & blockSize, const int sharedMemSize, hipStream_t * const stream) { void ** mem = (void ** )info; hipMalloc(mem, MAX_SIZE); hipLaunchKernelGGL(( kernel), dim3(gridSize), dim3(blockSize), sharedMemSize, *stream, *mem, (clock_t *)*mem, libParam); } __host__ void gpuDtor(void * info) { void ** ptr; clock_t clocks[11]; int deviceIndex = 0; hipDevice_t dev; hipDeviceProp_t prop; hipInit(0); hipDeviceGet(&dev, deviceIndex); hipGetDeviceProperties(&prop, dev); ptr = (void ** )info; hipMemcpy(clocks, *ptr, sizeof(clock_t) * 11, hipMemcpyDeviceToHost); for (int i = 0; i < 11; ++i) { printf("%20.10f\n", ((double)clocks[i] / (double)prop.clockRate) / 1000.0); } hipFree(*ptr); } void cpuKernel(void * info) { void * mem = (void * )malloc(MAX_SIZE); for (int i = MIN_SIZE; i <= MAX_SIZE; i *= 2) { dcgn::CommStatus stat; dcgn::barrier(); for (int j = 0; j < ITERS; ++j) { dcgn::recv(1, mem, i, &stat); } dcgn::barrier(); } free(mem); } int main(int argc, char ** argv) { void * gpuMem; int gpus[] = { 0, -1 }; uint3 gs = { 1, 1, 1 }, bs = { 1, 1, 1 }; dcgn::init(&argc, &argv); dcgn::initComm(-1); dcgn::initCPU(1); dcgn::initGPU(gpus, 1, 0); dcgn::start(); dcgn::launchCPUKernel(0, cpuKernel, 0); dcgn::launchGPUKernel(0, gpuKernel, gpuDtor, &gpuMem, gs, bs); dcgn::finalize(); return 0; }
539efe5ad5f0c2a0daa5bd289123fd93f10985a1.cu
#include <dcgn/dcgn.h> #include <dcgn/CUDAFunctions.h> #include <cstdlib> #include <cstdio> const int MIN_SIZE = 1; const int MAX_SIZE = 1048576; const int ITERS = 30; __shared__ clock_t timers[11]; __global__ void kernel(void * gmem, clock_t * clocks, const dcgn::GPUInitRequest libParam) { dcgn::gpu::init(libParam); int index = 0; for (int i = MIN_SIZE; i <= MAX_SIZE; i *= 2) { dcgn::gpu::barrier(0); timers[index] = clock(); for (int j = 0; j < ITERS; ++j) { dcgn::gpu::send(0, 0, gmem, i); } dcgn::gpu::barrier(0); timers[index] = (clock() - timers[index]) / ITERS; ++index; } for (int i = 0; i < index; ++i) { clocks[i] = timers[i]; } } __host__ void gpuKernel(void * info, const dcgn::GPUInitRequest libParam, const uint3 & gridSize, const uint3 & blockSize, const int sharedMemSize, cudaStream_t * const stream) { void ** mem = (void ** )info; cudaMalloc(mem, MAX_SIZE); kernel<<<gridSize, blockSize, sharedMemSize, *stream>>>(*mem, (clock_t *)*mem, libParam); } __host__ void gpuDtor(void * info) { void ** ptr; clock_t clocks[11]; int deviceIndex = 0; CUdevice dev; CUdevprop prop; cuInit(0); cuDeviceGet(&dev, deviceIndex); cuDeviceGetProperties(&prop, dev); ptr = (void ** )info; cudaMemcpy(clocks, *ptr, sizeof(clock_t) * 11, cudaMemcpyDeviceToHost); for (int i = 0; i < 11; ++i) { printf("%20.10f\n", ((double)clocks[i] / (double)prop.clockRate) / 1000.0); } cudaFree(*ptr); } void cpuKernel(void * info) { void * mem = (void * )malloc(MAX_SIZE); for (int i = MIN_SIZE; i <= MAX_SIZE; i *= 2) { dcgn::CommStatus stat; dcgn::barrier(); for (int j = 0; j < ITERS; ++j) { dcgn::recv(1, mem, i, &stat); } dcgn::barrier(); } free(mem); } int main(int argc, char ** argv) { void * gpuMem; int gpus[] = { 0, -1 }; uint3 gs = { 1, 1, 1 }, bs = { 1, 1, 1 }; dcgn::init(&argc, &argv); dcgn::initComm(-1); dcgn::initCPU(1); dcgn::initGPU(gpus, 1, 0); dcgn::start(); dcgn::launchCPUKernel(0, cpuKernel, 0); dcgn::launchGPUKernel(0, gpuKernel, gpuDtor, &gpuMem, gs, bs); dcgn::finalize(); return 0; }
246cca9a681ec06b625e03cc5b5420212bb52de9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* .cuda.cu - Copyright 2019 Utrecht University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include ".cuda.h" namespace lh2core { // path tracing buffers and global variables __constant__ CoreInstanceDesc* instanceDescriptors; __constant__ CoreMaterial* materials; __constant__ CoreLightTri* areaLights; __constant__ CorePointLight* pointLights; __constant__ CoreSpotLight* spotLights; __constant__ CoreDirectionalLight* directionalLights; __constant__ int4 lightCounts; // area, point, spot, directional __constant__ uint* argb32; __constant__ float4* argb128; __constant__ uint* nrm32; __constant__ float3* skyPixels; __constant__ int skywidth; __constant__ int skyheight; __constant__ PathState* pathStates; __constant__ float4* debugData; // path tracer settings __constant__ __device__ float geometryEpsilon; __constant__ __device__ float clampValue; // access __host__ void SetInstanceDescriptors( CoreInstanceDesc* p ) { hipMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); } __host__ void SetMaterialList( CoreMaterial* p ) { hipMemcpyToSymbol( materials, &p, sizeof( void* ) ); } __host__ void SetAreaLights( CoreLightTri* p ) { hipMemcpyToSymbol( areaLights, &p, sizeof( void* ) ); } __host__ void SetPointLights( CorePointLight* p ) { hipMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); } __host__ void SetSpotLights( CoreSpotLight* p ) { hipMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); } __host__ void SetDirectionalLights( CoreDirectionalLight* p ) { hipMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); } __host__ void SetLightCounts( int area, int point, int spot, int directional ) { const int4 counts = make_int4( area, point, spot, directional ); hipMemcpyToSymbol( lightCounts, &counts, sizeof( int4 ) ); } __host__ void SetARGB32Pixels( uint* p ) { hipMemcpyToSymbol( argb32, &p, sizeof( void* ) ); } __host__ void SetARGB128Pixels( float4* p ) { hipMemcpyToSymbol( argb128, &p, sizeof( void* ) ); } __host__ void SetNRM32Pixels( uint* p ) { hipMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); } __host__ void SetSkyPixels( float3* p ) { hipMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); } __host__ void SetSkySize( int w, int h ) { hipMemcpyToSymbol( skywidth, &w, sizeof( int ) ); hipMemcpyToSymbol( skyheight, &h, sizeof( int ) ); } __host__ void SetPathStates( PathState* p ) { hipMemcpyToSymbol( pathStates, &p, sizeof( void* ) ); } __host__ void SetDebugData( float4* p ) { hipMemcpyToSymbol( debugData, &p, sizeof( void* ) ); } // access __host__ void SetGeometryEpsilon( float e ) { hipMemcpyToSymbol( geometryEpsilon, &e, sizeof( float ) ); } __host__ void SetClampValue( float c ) { hipMemcpyToSymbol( clampValue, &c, sizeof( float ) ); } // counters for persistent threads static __device__ Counters* counters; __global__ void InitCountersForExtend_Kernel( int pathCount ) { if (threadIdx.x != 0) return; counters->activePaths = pathCount; // remaining active paths counters->shaded = 0; // persistent thread atomic for shade kernel counters->generated = 0; // persistent thread atomic for generate in .optix.cu counters->extensionRays = 0; // compaction counter for extension rays counters->shadowRays = 0; // compaction counter for connections counters->connected = 0; counters->totalExtensionRays = pathCount; counters->totalShadowRays = 0; } __host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); } __global__ void InitCountersSubsequent_Kernel() { if (threadIdx.x != 0) return; counters->totalExtensionRays += counters->extensionRays; counters->activePaths = counters->extensionRays; // remaining active paths counters->extended = 0; // persistent thread atomic for genSecond in .optix.cu counters->shaded = 0; // persistent thread atomic for shade kernel counters->extensionRays = 0; // compaction counter for extension rays } __host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); } __host__ void SetCounters( Counters* p ) { hipMemcpyToSymbol( counters, &p, sizeof( void* ) ); } // functional blocks #include "tools_shared.h" #include "sampling_shared.h" #define FILTERINGCORE // in material_shared.h this ensures that albedo exceeds 0; needed for filtering. #include "material_shared.h" #include "lights_shared.h" #include "bsdf.h" #include "pathtracer.h" #include "finalize_shared.h" } // namespace lh2core // EOF
246cca9a681ec06b625e03cc5b5420212bb52de9.cu
/* .cuda.cu - Copyright 2019 Utrecht University Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include ".cuda.h" namespace lh2core { // path tracing buffers and global variables __constant__ CoreInstanceDesc* instanceDescriptors; __constant__ CoreMaterial* materials; __constant__ CoreLightTri* areaLights; __constant__ CorePointLight* pointLights; __constant__ CoreSpotLight* spotLights; __constant__ CoreDirectionalLight* directionalLights; __constant__ int4 lightCounts; // area, point, spot, directional __constant__ uint* argb32; __constant__ float4* argb128; __constant__ uint* nrm32; __constant__ float3* skyPixels; __constant__ int skywidth; __constant__ int skyheight; __constant__ PathState* pathStates; __constant__ float4* debugData; // path tracer settings __constant__ __device__ float geometryEpsilon; __constant__ __device__ float clampValue; // access __host__ void SetInstanceDescriptors( CoreInstanceDesc* p ) { cudaMemcpyToSymbol( instanceDescriptors, &p, sizeof( void* ) ); } __host__ void SetMaterialList( CoreMaterial* p ) { cudaMemcpyToSymbol( materials, &p, sizeof( void* ) ); } __host__ void SetAreaLights( CoreLightTri* p ) { cudaMemcpyToSymbol( areaLights, &p, sizeof( void* ) ); } __host__ void SetPointLights( CorePointLight* p ) { cudaMemcpyToSymbol( pointLights, &p, sizeof( void* ) ); } __host__ void SetSpotLights( CoreSpotLight* p ) { cudaMemcpyToSymbol( spotLights, &p, sizeof( void* ) ); } __host__ void SetDirectionalLights( CoreDirectionalLight* p ) { cudaMemcpyToSymbol( directionalLights, &p, sizeof( void* ) ); } __host__ void SetLightCounts( int area, int point, int spot, int directional ) { const int4 counts = make_int4( area, point, spot, directional ); cudaMemcpyToSymbol( lightCounts, &counts, sizeof( int4 ) ); } __host__ void SetARGB32Pixels( uint* p ) { cudaMemcpyToSymbol( argb32, &p, sizeof( void* ) ); } __host__ void SetARGB128Pixels( float4* p ) { cudaMemcpyToSymbol( argb128, &p, sizeof( void* ) ); } __host__ void SetNRM32Pixels( uint* p ) { cudaMemcpyToSymbol( nrm32, &p, sizeof( void* ) ); } __host__ void SetSkyPixels( float3* p ) { cudaMemcpyToSymbol( skyPixels, &p, sizeof( void* ) ); } __host__ void SetSkySize( int w, int h ) { cudaMemcpyToSymbol( skywidth, &w, sizeof( int ) ); cudaMemcpyToSymbol( skyheight, &h, sizeof( int ) ); } __host__ void SetPathStates( PathState* p ) { cudaMemcpyToSymbol( pathStates, &p, sizeof( void* ) ); } __host__ void SetDebugData( float4* p ) { cudaMemcpyToSymbol( debugData, &p, sizeof( void* ) ); } // access __host__ void SetGeometryEpsilon( float e ) { cudaMemcpyToSymbol( geometryEpsilon, &e, sizeof( float ) ); } __host__ void SetClampValue( float c ) { cudaMemcpyToSymbol( clampValue, &c, sizeof( float ) ); } // counters for persistent threads static __device__ Counters* counters; __global__ void InitCountersForExtend_Kernel( int pathCount ) { if (threadIdx.x != 0) return; counters->activePaths = pathCount; // remaining active paths counters->shaded = 0; // persistent thread atomic for shade kernel counters->generated = 0; // persistent thread atomic for generate in .optix.cu counters->extensionRays = 0; // compaction counter for extension rays counters->shadowRays = 0; // compaction counter for connections counters->connected = 0; counters->totalExtensionRays = pathCount; counters->totalShadowRays = 0; } __host__ void InitCountersForExtend( int pathCount ) { InitCountersForExtend_Kernel << <1, 32 >> > (pathCount); } __global__ void InitCountersSubsequent_Kernel() { if (threadIdx.x != 0) return; counters->totalExtensionRays += counters->extensionRays; counters->activePaths = counters->extensionRays; // remaining active paths counters->extended = 0; // persistent thread atomic for genSecond in .optix.cu counters->shaded = 0; // persistent thread atomic for shade kernel counters->extensionRays = 0; // compaction counter for extension rays } __host__ void InitCountersSubsequent() { InitCountersSubsequent_Kernel << <1, 32 >> > (); } __host__ void SetCounters( Counters* p ) { cudaMemcpyToSymbol( counters, &p, sizeof( void* ) ); } // functional blocks #include "tools_shared.h" #include "sampling_shared.h" #define FILTERINGCORE // in material_shared.h this ensures that albedo exceeds 0; needed for filtering. #include "material_shared.h" #include "lights_shared.h" #include "bsdf.h" #include "pathtracer.h" #include "finalize_shared.h" } // namespace lh2core // EOF
7cc87fa38dd99e825144ee64d2fb79c28d5ead1e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // Created by Niu Chuang on 17-9-30. // #include <cfloat> #include <vector> #include "caffe/layers/eltwise_td_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SumBackward(const int nthreads, const Dtype* bottom_data, const Dtype* bottom_data_a, const Dtype* out_data, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { top_data[index] = out_data[index] == 0 ? Dtype(0):bottom_data[index]*bottom_data_a[index]/out_data[index]; } } template <typename Dtype> void EltwiseTDLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // int* mask = NULL; // const int count = top[0]->count(); // Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* out_data = bottom[1]->gpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: NOT_IMPLEMENTED; break; case EltwiseParameter_EltwiseOp_SUM: // caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? // for (int i = 0; i < bottom.size(); ++i) { // caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); // } for (int i = 0; i < top.size(); ++i){ const int count = top[i]->count(); Dtype* top_data = top[i]->mutable_gpu_data(); const Dtype* bottom_data_a = bottom[i+2]->gpu_data(); hipLaunchKernelGGL(( SumBackward<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, bottom_data_a, out_data, top_data); // for (int j = 0; j < count; ++j){ // top_data[j] = out_data[j] == 0 ? Dtype(0):bottom_data_a[j]*bottom_data[j]/out_data[j]; // } } break; case EltwiseParameter_EltwiseOp_MAX: NOT_IMPLEMENTED; break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void Backward_all(const int nthreads, const Dtype* top_diff_a, const Dtype* top_diff_b, const Dtype* activation_data_a, const Dtype* activation_data_b, const Dtype* bottom_data, const Dtype* out_data, Dtype* bottom_diff, Dtype* activation_diff_a, Dtype* activation_diff_b) { CUDA_KERNEL_LOOP(index, nthreads) { bottom_diff[index] = out_data[index] == 0 ? Dtype(0):(top_diff_a[index]*activation_data_a[index] + top_diff_b[index]*activation_data_b[index])/out_data[index]; activation_diff_a[index] = out_data[index] == 0 ? Dtype(0):(top_diff_a[index] - top_diff_b[index]) * bottom_data[index] * activation_data_b[index] / (out_data[index] * out_data[index]); activation_diff_b[index] = out_data[index] == 0 ? Dtype(0):(top_diff_b[index] - top_diff_a[index]) * bottom_data[index] * activation_data_a[index] / (out_data[index] * out_data[index]); } } template <typename Dtype> void EltwiseTDLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!(propagate_down[0] || propagate_down[2] || propagate_down[3])) { return; } const Dtype* top_diff_a = top[0]->gpu_diff(); const Dtype* top_diff_b = top[1]->gpu_diff(); const Dtype* out_data = bottom[1]->gpu_data(); const Dtype* activation_data_a = bottom[2]->gpu_data(); const Dtype* activation_data_b = bottom[3]->gpu_data(); Dtype* activation_diff_a = bottom[2]->mutable_gpu_diff(); Dtype* activation_diff_b = bottom[3]->mutable_gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: NOT_IMPLEMENTED; break; case EltwiseParameter_EltwiseOp_SUM: hipLaunchKernelGGL(( Backward_all<Dtype>) , dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff_a, top_diff_b, activation_data_a, activation_data_b, bottom_data, out_data, bottom_diff, activation_diff_a, activation_diff_b); break; case EltwiseParameter_EltwiseOp_MAX: NOT_IMPLEMENTED; break; default: LOG(FATAL)<< "Unknown elementwise operation."; } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseTDLayer); } // namespace caffe
7cc87fa38dd99e825144ee64d2fb79c28d5ead1e.cu
// // Created by Niu Chuang on 17-9-30. // #include <cfloat> #include <vector> #include "caffe/layers/eltwise_td_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SumBackward(const int nthreads, const Dtype* bottom_data, const Dtype* bottom_data_a, const Dtype* out_data, Dtype* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { top_data[index] = out_data[index] == 0 ? Dtype(0):bottom_data[index]*bottom_data_a[index]/out_data[index]; } } template <typename Dtype> void EltwiseTDLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // int* mask = NULL; // const int count = top[0]->count(); // Dtype* top_data = top[0]->mutable_gpu_data(); const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* out_data = bottom[1]->gpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: NOT_IMPLEMENTED; break; case EltwiseParameter_EltwiseOp_SUM: // caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? // for (int i = 0; i < bottom.size(); ++i) { // caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); // } for (int i = 0; i < top.size(); ++i){ const int count = top[i]->count(); Dtype* top_data = top[i]->mutable_gpu_data(); const Dtype* bottom_data_a = bottom[i+2]->gpu_data(); SumBackward<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, bottom_data_a, out_data, top_data); // for (int j = 0; j < count; ++j){ // top_data[j] = out_data[j] == 0 ? Dtype(0):bottom_data_a[j]*bottom_data[j]/out_data[j]; // } } break; case EltwiseParameter_EltwiseOp_MAX: NOT_IMPLEMENTED; break; default: LOG(FATAL) << "Unknown elementwise operation."; } } template <typename Dtype> __global__ void Backward_all(const int nthreads, const Dtype* top_diff_a, const Dtype* top_diff_b, const Dtype* activation_data_a, const Dtype* activation_data_b, const Dtype* bottom_data, const Dtype* out_data, Dtype* bottom_diff, Dtype* activation_diff_a, Dtype* activation_diff_b) { CUDA_KERNEL_LOOP(index, nthreads) { bottom_diff[index] = out_data[index] == 0 ? Dtype(0):(top_diff_a[index]*activation_data_a[index] + top_diff_b[index]*activation_data_b[index])/out_data[index]; activation_diff_a[index] = out_data[index] == 0 ? Dtype(0):(top_diff_a[index] - top_diff_b[index]) * bottom_data[index] * activation_data_b[index] / (out_data[index] * out_data[index]); activation_diff_b[index] = out_data[index] == 0 ? Dtype(0):(top_diff_b[index] - top_diff_a[index]) * bottom_data[index] * activation_data_a[index] / (out_data[index] * out_data[index]); } } template <typename Dtype> void EltwiseTDLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (!(propagate_down[0] || propagate_down[2] || propagate_down[3])) { return; } const Dtype* top_diff_a = top[0]->gpu_diff(); const Dtype* top_diff_b = top[1]->gpu_diff(); const Dtype* out_data = bottom[1]->gpu_data(); const Dtype* activation_data_a = bottom[2]->gpu_data(); const Dtype* activation_data_b = bottom[3]->gpu_data(); Dtype* activation_diff_a = bottom[2]->mutable_gpu_diff(); Dtype* activation_diff_b = bottom[3]->mutable_gpu_diff(); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: NOT_IMPLEMENTED; break; case EltwiseParameter_EltwiseOp_SUM: Backward_all<Dtype> <<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff_a, top_diff_b, activation_data_a, activation_data_b, bottom_data, out_data, bottom_diff, activation_diff_a, activation_diff_b); break; case EltwiseParameter_EltwiseOp_MAX: NOT_IMPLEMENTED; break; default: LOG(FATAL)<< "Unknown elementwise operation."; } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseTDLayer); } // namespace caffe
59400923dfde14b8a60b91da264c9ce72d7633da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void fix_nan_and_inf_kernel(float *input, size_t size) { const int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < size) { float val = input[index]; if (isnan(val) || isinf(val)) { input[index] = 1.0f / (fabs((float)index) + 1); // pseudo random value } } }
59400923dfde14b8a60b91da264c9ce72d7633da.cu
#include "includes.h" __global__ void fix_nan_and_inf_kernel(float *input, size_t size) { const int index = blockIdx.x*blockDim.x + threadIdx.x; if (index < size) { float val = input[index]; if (isnan(val) || isinf(val)) { input[index] = 1.0f / (fabs((float)index) + 1); // pseudo random value } } }
7700628bc1383c686938cf1eb5c535bbed56816f.hip
// !!! This is a file automatically generated by hipify!!! #include "SDL.h" #include <stdio.h> #include <time.h> #include <stdint.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <complex> #include <thrust/complex.h> #define LEN 1024 #define LENSHIFT 10 #define ITERMAX 1024 #define getindex(i, j) (((i)<<LENSHIFT)+(j)) #define NCOLOR 64 #define NCOLORMASK 63 SDL_Window *screen; SDL_Renderer *ren; SDL_Texture *tex; SDL_Surface *mysurf; uint32_t iterscpu[LEN*LEN]; uint32_t colors[NCOLOR+1]; uint32_t* iters; /* ---- Kernel definition ---- This method does exactly the same as the CPU method This method is used to determine whether a value is in or out of the Mandelbrot set. We run through the formula Zn + 1 = Zn ^ 2 + C up to itermax (this is the depth of the previous image) and at each iteration, we check if the real and imaginary part of Z is greater than 2; if this happens we return the last iteration (which will give the color of the corresponding pixel). So we'll return the iteration in which it went out of bounds, and then interpret this number as a color. If it completes the loop without going out of bounds, we will give it the color black */ __device__ uint32_t compute_iteration_gpu(double ci, double cj, uint32_t itermax) { // We cannot use the complexe number of std, so we programming it manually double zRe = 0; // real number double zIm = 0; // imaginary number double zReRes = 0; double zImRes = 0; uint32_t iter = 0; // iteration for (iter = 0; iter < itermax; iter++){ // Compute the new imaginary and real part of Z zIm = zRe * zIm; zIm += zIm + cj; zRe = zReRes - zImRes + ci; zReRes = zRe * zRe; zImRes = zIm * zIm; if (zReRes + zImRes >= 4.0) break; // greater than 2, so the value is out of the Mandelbrot set } return iter; } /* OLD METHOD : The compute_iteration_gpu method is similar to the compute_iteration_cpu method. using thrust api ! */ /*__device__ uint32_t compute_iteration_gpu(double ci, double cj, uint32_t itermax) { thrust::complex<double> z(0); thrust::complex<double> c(ci, cj); uint32_t iter = 0; for (iter = 0; iter < itermax; iter++) { z = (z * z) + c; if(abs(z) >= 2) break; } return iter; } */ /* CPU Version This method is used to determine whether a value is in or out of the Mandelbrot set. We run through the formula Zn + 1 = Zn ^ 2 + C up to itermax (this is the depth of the previous image) and at each iteration, we check if the real and imaginary part of Z is greater than 2; if this happens we return the last iteration (which will give the color of the corresponding pixel). So we'll return the iteration in which it went out of bounds, and then interpret this number as a color. If it completes the loop without going out of bounds, we will give it the color black */ int compute_iteration_cpu(double ci, double cj, uint32_t itermax) { // use complex type for complexe number from std std::complex<double> z(0); std::complex<double> c(ci, cj); uint32_t iter = 0; for (iter = 0; iter < itermax; iter++) { // Compute the new imaginary and real part of Z z = (z * z) + c; if(abs(z) >= 2) break; // greater than 2, so the value is out of the Mandelbrot set } return iter; } /* Mandelbrot using CPU This function compute all iteration of complexe number, for each pixel i and j and stock the result into the arr. X and Y parameter corresponds to the starting point of the iteration, i.e. at the top left of the screen (coordinate (0, 0)) The parameter delta is used to know the next pixel to compute the itration. The variable itermax (1024) is there to give the maximum depth to calculate for the image */ void iterate_cpu(uint32_t *arr, double x, double y, double delta, uint32_t itermax) { for (int i = 0; i < LEN * LEN; i++) { int xi = i % LEN; // index i, 0 to 1023, avoid to used two loop int yj = i / LEN; // index j, 0 to 1023, avoid to used two loop double ci = x + (yj * delta); double cj = y - (xi * delta); arr[getindex(xi, yj)] = compute_iteration_cpu(ci, cj, itermax); // compute itration } return; } /* ---- Kernel definition ---- Mandelbrot using GPU Unlike the CPU method, we do not need to do a loop, because it is handled by the threads of each grid, one thread will take care of the calculation for each iteration. X and Y parameter corresponds to the starting point of the iteration, i.e. at the top left of the screen (coordinate (0, 0)) The parameter delta is used to know the next pixel to compute the itration. The variable itermax (1024) is there to give the maximum depth to calculate for the image */ __global__ void iterate_gpu(uint32_t* arr, double x, double y, double delta, uint32_t itermax){ int tId = blockDim.x * blockIdx.x + threadIdx.x ; int xi = tId % LEN; // index i, 0 to 1023 int yj = tId / LEN; // index j, 0 to 1023 double ci = x + (yj * delta); double cj = y - (xi * delta); arr[getindex(xi, yj)] = compute_iteration_gpu(ci, cj, itermax); // compute the iteration return; } /* This function call the kernel method (using __global___) */ void kernel_call(uint32_t* arr, double x, double y, double delta, uint32_t itermax){ uint32_t thread_max = 1024; // number max of thread per grid int number_of_blocks = ceil((LEN*LEN)/thread_max); // calculate ideal number grid based on screen size hipLaunchKernelGGL(( iterate_gpu), dim3(number_of_blocks), dim3(thread_max), 0, 0, arr, x, y, delta, itermax); // Kernel invocation with 1024 threads hipDeviceSynchronize(); return; } void generate_colors(const SDL_PixelFormat* format){ double h = 0.0; for(int i=0; i<NCOLOR; i++){ int ph = h / 60; float f = (h/60.0 - ph); int v = 255; int p = 64; int q = (int)(255*(1 - f*0.75f)); int t = (int)(255*(0.25f + f*0.75f)); switch(ph){ case 0: colors[i] = SDL_MapRGB(format, v, t, p); break; case 1: colors[i] = SDL_MapRGB(format, q, v, p); break; case 2: colors[i] = SDL_MapRGB(format, p, v, t); break; case 3: colors[i] = SDL_MapRGB(format, p, q, v); break; case 4: colors[i] = SDL_MapRGB(format, t, p, v); break; case 5: colors[i] = SDL_MapRGB(format, v, p, q); break; default: break; } h += 360.0/NCOLOR; } colors[NCOLOR] = SDL_MapRGB(format, 0, 0, 0); return; } int main(int argc, char** argv){ SDL_Event e; bool usegpu = false; if(argc > 1){ usegpu = (strcmp(argv[1], "gpu") == 0); } uint32_t* gpuarray; uint32_t* hostarray; // Initialize SDL if( SDL_Init(SDL_INIT_VIDEO) < 0 ) { fprintf(stderr, "Couldn't initialize SDL: %s\n", SDL_GetError()); exit(1); } atexit(SDL_Quit); // Create window screen = SDL_CreateWindow("Mandelbrot", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, LEN, LEN, SDL_WINDOW_SHOWN); if ( screen == NULL ) { fprintf(stderr, "Couldn't set up window: %s\n", SDL_GetError()); exit(1); } // Initialize CUDA if(usegpu){ hipSetDeviceFlags(hipDeviceScheduleBlockingSync); hipMalloc((void**)&gpuarray, LEN*LEN*sizeof(uint32_t)); hipHostMalloc((void**)&hostarray, LEN*LEN*sizeof(uint32_t), hipHostMallocDefault); } // Create renderer and texture SDL_PixelFormat* fmt = SDL_AllocFormat(SDL_PIXELFORMAT_RGBA32); generate_colors(fmt); ren = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC); tex = SDL_CreateTexture(ren, fmt->format, SDL_TEXTUREACCESS_STREAMING, LEN, LEN); // Timing float totaltime = 0.0f; uint32_t frames = 0; // Window for Mandelbrot double targetx = -0.743643887037158704752191506114774; double targety = 0.131825904205311970493132056385139; double centerx = 0.0; double centery = 0.0; double delta = 4.0/LEN; const double scale = 0.94; uint32_t itermax = 32; const uint32_t iterstep = 8; while(true){ bool flag = false; while(SDL_PollEvent(&e)){ if(e.type==SDL_QUIT){ flag = true; } } if(flag) break; clock_t t; float tsec; t = clock(); // renderer if(!usegpu){ iterate_cpu(iterscpu, centerx - delta*LEN/2, centery + delta*LEN/2, delta, itermax); iters = iterscpu; }else{ kernel_call(gpuarray, centerx - delta*LEN/2, centery + delta*LEN/2, delta, itermax); hipMemcpyAsync(hostarray, gpuarray, LEN * LEN * sizeof(uint32_t), hipMemcpyDeviceToHost); hipDeviceSynchronize(); iters = hostarray; } int len = LEN; uint32_t* surf = NULL; SDL_LockTexture(tex, NULL, (void**)(&surf), &len); for(uint32_t i=0; i<LEN*LEN; i++){ if (iters[i] < itermax){ surf[i] = colors[iters[i]&NCOLORMASK]; }else{ surf[i] = colors[NCOLOR]; } } SDL_UnlockTexture(tex); SDL_RenderClear(ren); SDL_RenderCopy(ren, tex, NULL, NULL); SDL_RenderPresent(ren); centerx = targetx + (centerx - targetx)*scale; centery = targety + (centery - targety)*scale; delta *= scale; itermax += iterstep; t = clock() - t; tsec = ((float)t)/CLOCKS_PER_SEC; totaltime += tsec; tsec = 1.0f/60 - tsec; if(tsec > 0) SDL_Delay((uint32_t)(tsec*1000)); frames++; if(frames>=530) break; } char s[100]; sprintf(s, "Average FPS: %.1f\nFrame count: %u", frames/totaltime, frames); SDL_ShowSimpleMessageBox(SDL_MESSAGEBOX_INFORMATION, "Benchmark", s, screen); SDL_FreeFormat(fmt); SDL_DestroyTexture(tex); SDL_DestroyRenderer(ren); SDL_DestroyWindow(screen); if(usegpu){ hipFree(gpuarray); hipHostFree(hostarray); } exit(0); }
7700628bc1383c686938cf1eb5c535bbed56816f.cu
#include "SDL.h" #include <stdio.h> #include <time.h> #include <stdint.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <iostream> #include <complex> #include <thrust/complex.h> #define LEN 1024 #define LENSHIFT 10 #define ITERMAX 1024 #define getindex(i, j) (((i)<<LENSHIFT)+(j)) #define NCOLOR 64 #define NCOLORMASK 63 SDL_Window *screen; SDL_Renderer *ren; SDL_Texture *tex; SDL_Surface *mysurf; uint32_t iterscpu[LEN*LEN]; uint32_t colors[NCOLOR+1]; uint32_t* iters; /* ---- Kernel definition ---- This method does exactly the same as the CPU method This method is used to determine whether a value is in or out of the Mandelbrot set. We run through the formula Zn + 1 = Zn ^ 2 + C up to itermax (this is the depth of the previous image) and at each iteration, we check if the real and imaginary part of Z is greater than 2; if this happens we return the last iteration (which will give the color of the corresponding pixel). So we'll return the iteration in which it went out of bounds, and then interpret this number as a color. If it completes the loop without going out of bounds, we will give it the color black */ __device__ uint32_t compute_iteration_gpu(double ci, double cj, uint32_t itermax) { // We cannot use the complexe number of std, so we programming it manually double zRe = 0; // real number double zIm = 0; // imaginary number double zReRes = 0; double zImRes = 0; uint32_t iter = 0; // iteration for (iter = 0; iter < itermax; iter++){ // Compute the new imaginary and real part of Z zIm = zRe * zIm; zIm += zIm + cj; zRe = zReRes - zImRes + ci; zReRes = zRe * zRe; zImRes = zIm * zIm; if (zReRes + zImRes >= 4.0) break; // greater than 2, so the value is out of the Mandelbrot set } return iter; } /* OLD METHOD : The compute_iteration_gpu method is similar to the compute_iteration_cpu method. using thrust api ! */ /*__device__ uint32_t compute_iteration_gpu(double ci, double cj, uint32_t itermax) { thrust::complex<double> z(0); thrust::complex<double> c(ci, cj); uint32_t iter = 0; for (iter = 0; iter < itermax; iter++) { z = (z * z) + c; if(abs(z) >= 2) break; } return iter; } */ /* CPU Version This method is used to determine whether a value is in or out of the Mandelbrot set. We run through the formula Zn + 1 = Zn ^ 2 + C up to itermax (this is the depth of the previous image) and at each iteration, we check if the real and imaginary part of Z is greater than 2; if this happens we return the last iteration (which will give the color of the corresponding pixel). So we'll return the iteration in which it went out of bounds, and then interpret this number as a color. If it completes the loop without going out of bounds, we will give it the color black */ int compute_iteration_cpu(double ci, double cj, uint32_t itermax) { // use complex type for complexe number from std std::complex<double> z(0); std::complex<double> c(ci, cj); uint32_t iter = 0; for (iter = 0; iter < itermax; iter++) { // Compute the new imaginary and real part of Z z = (z * z) + c; if(abs(z) >= 2) break; // greater than 2, so the value is out of the Mandelbrot set } return iter; } /* Mandelbrot using CPU This function compute all iteration of complexe number, for each pixel i and j and stock the result into the arr. X and Y parameter corresponds to the starting point of the iteration, i.e. at the top left of the screen (coordinate (0, 0)) The parameter delta is used to know the next pixel to compute the itération. The variable itermax (1024) is there to give the maximum depth to calculate for the image */ void iterate_cpu(uint32_t *arr, double x, double y, double delta, uint32_t itermax) { for (int i = 0; i < LEN * LEN; i++) { int xi = i % LEN; // index i, 0 to 1023, avoid to used two loop int yj = i / LEN; // index j, 0 to 1023, avoid to used two loop double ci = x + (yj * delta); double cj = y - (xi * delta); arr[getindex(xi, yj)] = compute_iteration_cpu(ci, cj, itermax); // compute itération } return; } /* ---- Kernel definition ---- Mandelbrot using GPU Unlike the CPU method, we do not need to do a loop, because it is handled by the threads of each grid, one thread will take care of the calculation for each iteration. X and Y parameter corresponds to the starting point of the iteration, i.e. at the top left of the screen (coordinate (0, 0)) The parameter delta is used to know the next pixel to compute the itération. The variable itermax (1024) is there to give the maximum depth to calculate for the image */ __global__ void iterate_gpu(uint32_t* arr, double x, double y, double delta, uint32_t itermax){ int tId = blockDim.x * blockIdx.x + threadIdx.x ; int xi = tId % LEN; // index i, 0 to 1023 int yj = tId / LEN; // index j, 0 to 1023 double ci = x + (yj * delta); double cj = y - (xi * delta); arr[getindex(xi, yj)] = compute_iteration_gpu(ci, cj, itermax); // compute the iteration return; } /* This function call the kernel method (using __global___) */ void kernel_call(uint32_t* arr, double x, double y, double delta, uint32_t itermax){ uint32_t thread_max = 1024; // number max of thread per grid int number_of_blocks = ceil((LEN*LEN)/thread_max); // calculate ideal number grid based on screen size iterate_gpu<<<number_of_blocks, thread_max, 0>>>(arr, x, y, delta, itermax); // Kernel invocation with 1024 threads cudaDeviceSynchronize(); return; } void generate_colors(const SDL_PixelFormat* format){ double h = 0.0; for(int i=0; i<NCOLOR; i++){ int ph = h / 60; float f = (h/60.0 - ph); int v = 255; int p = 64; int q = (int)(255*(1 - f*0.75f)); int t = (int)(255*(0.25f + f*0.75f)); switch(ph){ case 0: colors[i] = SDL_MapRGB(format, v, t, p); break; case 1: colors[i] = SDL_MapRGB(format, q, v, p); break; case 2: colors[i] = SDL_MapRGB(format, p, v, t); break; case 3: colors[i] = SDL_MapRGB(format, p, q, v); break; case 4: colors[i] = SDL_MapRGB(format, t, p, v); break; case 5: colors[i] = SDL_MapRGB(format, v, p, q); break; default: break; } h += 360.0/NCOLOR; } colors[NCOLOR] = SDL_MapRGB(format, 0, 0, 0); return; } int main(int argc, char** argv){ SDL_Event e; bool usegpu = false; if(argc > 1){ usegpu = (strcmp(argv[1], "gpu") == 0); } uint32_t* gpuarray; uint32_t* hostarray; // Initialize SDL if( SDL_Init(SDL_INIT_VIDEO) < 0 ) { fprintf(stderr, "Couldn't initialize SDL: %s\n", SDL_GetError()); exit(1); } atexit(SDL_Quit); // Create window screen = SDL_CreateWindow("Mandelbrot", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED, LEN, LEN, SDL_WINDOW_SHOWN); if ( screen == NULL ) { fprintf(stderr, "Couldn't set up window: %s\n", SDL_GetError()); exit(1); } // Initialize CUDA if(usegpu){ cudaSetDeviceFlags(cudaDeviceScheduleBlockingSync); cudaMalloc((void**)&gpuarray, LEN*LEN*sizeof(uint32_t)); cudaHostAlloc((void**)&hostarray, LEN*LEN*sizeof(uint32_t), cudaHostAllocDefault); } // Create renderer and texture SDL_PixelFormat* fmt = SDL_AllocFormat(SDL_PIXELFORMAT_RGBA32); generate_colors(fmt); ren = SDL_CreateRenderer(screen, -1, SDL_RENDERER_ACCELERATED | SDL_RENDERER_PRESENTVSYNC); tex = SDL_CreateTexture(ren, fmt->format, SDL_TEXTUREACCESS_STREAMING, LEN, LEN); // Timing float totaltime = 0.0f; uint32_t frames = 0; // Window for Mandelbrot double targetx = -0.743643887037158704752191506114774; double targety = 0.131825904205311970493132056385139; double centerx = 0.0; double centery = 0.0; double delta = 4.0/LEN; const double scale = 0.94; uint32_t itermax = 32; const uint32_t iterstep = 8; while(true){ bool flag = false; while(SDL_PollEvent(&e)){ if(e.type==SDL_QUIT){ flag = true; } } if(flag) break; clock_t t; float tsec; t = clock(); // renderer if(!usegpu){ iterate_cpu(iterscpu, centerx - delta*LEN/2, centery + delta*LEN/2, delta, itermax); iters = iterscpu; }else{ kernel_call(gpuarray, centerx - delta*LEN/2, centery + delta*LEN/2, delta, itermax); cudaMemcpyAsync(hostarray, gpuarray, LEN * LEN * sizeof(uint32_t), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); iters = hostarray; } int len = LEN; uint32_t* surf = NULL; SDL_LockTexture(tex, NULL, (void**)(&surf), &len); for(uint32_t i=0; i<LEN*LEN; i++){ if (iters[i] < itermax){ surf[i] = colors[iters[i]&NCOLORMASK]; }else{ surf[i] = colors[NCOLOR]; } } SDL_UnlockTexture(tex); SDL_RenderClear(ren); SDL_RenderCopy(ren, tex, NULL, NULL); SDL_RenderPresent(ren); centerx = targetx + (centerx - targetx)*scale; centery = targety + (centery - targety)*scale; delta *= scale; itermax += iterstep; t = clock() - t; tsec = ((float)t)/CLOCKS_PER_SEC; totaltime += tsec; tsec = 1.0f/60 - tsec; if(tsec > 0) SDL_Delay((uint32_t)(tsec*1000)); frames++; if(frames>=530) break; } char s[100]; sprintf(s, "Average FPS: %.1f\nFrame count: %u", frames/totaltime, frames); SDL_ShowSimpleMessageBox(SDL_MESSAGEBOX_INFORMATION, "Benchmark", s, screen); SDL_FreeFormat(fmt); SDL_DestroyTexture(tex); SDL_DestroyRenderer(ren); SDL_DestroyWindow(screen); if(usegpu){ cudaFree(gpuarray); cudaFreeHost(hostarray); } exit(0); }
b629280e3ea38f296664e2b61e9a55807dbed778.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <time.h> __global__ void add_vectors(float *a, float *b, float *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } int main(void) { float *a_h, *a_d, *b_h, *b_d, *c_h, *c_d; const int N = 10; size_t size = N * sizeof(float); a_h = (float *)malloc(size); b_h = (float *)malloc(size); c_h = (float *)malloc(size); srand(time(NULL)); hipMalloc((void **) &a_d, size); hipMalloc((void **) &b_d, size); hipMalloc((void **) &c_d, size); for (int i=0; i<N; i++) { a_h[i] = rand() / (float)RAND_MAX; b_h[i] = rand() / (float)RAND_MAX; } hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice); hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice); int block_size = 4; int n_blocks = N/block_size + (N % block_size == 0 ? 0 : 1); hipLaunchKernelGGL(( add_vectors), dim3(n_blocks), dim3(block_size), 0, 0, a_d, b_d, c_d, N); hipMemcpy(c_h, c_d, sizeof(float)*N, hipMemcpyDeviceToHost); for (int i=0; i<N; i++) { printf("%d\t%f\t%f\t= %f\n", i, a_h[i], b_h[i], c_h[i]); } free(a_h); free(b_h); free(c_h); hipFree(a_d); hipFree(b_d); hipFree(c_d); }
b629280e3ea38f296664e2b61e9a55807dbed778.cu
#include <stdio.h> #include <cuda.h> #include <time.h> __global__ void add_vectors(float *a, float *b, float *c, int N) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx < N) { c[idx] = a[idx] + b[idx]; } } int main(void) { float *a_h, *a_d, *b_h, *b_d, *c_h, *c_d; const int N = 10; size_t size = N * sizeof(float); a_h = (float *)malloc(size); b_h = (float *)malloc(size); c_h = (float *)malloc(size); srand(time(NULL)); cudaMalloc((void **) &a_d, size); cudaMalloc((void **) &b_d, size); cudaMalloc((void **) &c_d, size); for (int i=0; i<N; i++) { a_h[i] = rand() / (float)RAND_MAX; b_h[i] = rand() / (float)RAND_MAX; } cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice); int block_size = 4; int n_blocks = N/block_size + (N % block_size == 0 ? 0 : 1); add_vectors<<<n_blocks, block_size>>> (a_d, b_d, c_d, N); cudaMemcpy(c_h, c_d, sizeof(float)*N, cudaMemcpyDeviceToHost); for (int i=0; i<N; i++) { printf("%d\t%f\t%f\t= %f\n", i, a_h[i], b_h[i], c_h[i]); } free(a_h); free(b_h); free(c_h); cudaFree(a_d); cudaFree(b_d); cudaFree(c_d); }
e0331026f828fdcc22efce2e23b88c740b3b9897.hip
// !!! This is a file automatically generated by hipify!!! /* * Triangle counter with workload balancing * * @author: Manish Jain * @author: Vashishtha Adtani */ #include <iostream> #include <string> #include <sstream> #include <algorithm> #include <stdlib.h> #include <math.h> #include <stdio.h> #include <vector> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <fstream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <driver_functions.h> #include "cudaTriangleCounter.h" #define BLOCK_SIZE 32 struct GlobalConstants { int *NodeList; int *ListLen; int numNodes; int numEdges; }; __constant__ GlobalConstants cuConstCounterParams; void CudaTriangleCounter::setup() { int deviceCount = 0; std::string name; hipError_t err = hipGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Initializing CUDA for CountingTriangles\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { hipDeviceProp_t deviceProps; hipGetDeviceProperties(&deviceProps, i); name = deviceProps.name; printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } // printf("---------------------------------------------------------\n"); // By this time the graph should be loaded. Copying graph to // data structures into device memory so that it is accessible to // CUDA kernels // hipMalloc(&cudaDeviceListLen, sizeof(int ) * numNodes); hipMemcpy(cudaDeviceListLen, list_len, sizeof(int) * numNodes, hipMemcpyHostToDevice); hipMalloc((void **)&cudaDeviceNodeList, node_list_size * sizeof(int)); hipMemcpy(cudaDeviceNodeList, node_list, sizeof(int) * node_list_size, hipMemcpyHostToDevice); GlobalConstants params; params.ListLen = cudaDeviceListLen; params.NodeList = cudaDeviceNodeList; params.numNodes = numNodes; params.numEdges = numEdges; hipMemcpyToSymbol(cuConstCounterParams, &params, sizeof(GlobalConstants)); } CudaTriangleCounter::CudaTriangleCounter(char *fileName) { clock_t start, diff, malloc_diff; int node, edge_id, temp = 0; int total_nodes = 0; int total_edges = 0; int msec; std::string line; std::ifstream myfile; myfile.open(fileName); std::string token; if (strstr(fileName,"new_orkut") != NULL) { printf("This is the NEW_ORKUT FILE **\n"); total_nodes = 3072600; total_edges = 117185083 + 1; } else { std::getline(myfile,line); std::stringstream lineStream(line); while (lineStream >> token) { if (temp == 0) { total_nodes = std::stoi(token, NULL, 10) + 1; } else if (temp == 1) { total_edges = std::stoi(token, NULL, 10) + 1; } else { printf("!!!!!!!!!!!! TEMP IS %d\n ", temp); break; } temp++; } } start = clock(); numNodes = total_nodes; node_list_size = total_edges * 2; numEdges = total_edges; printf("total_nodes %d\n", total_nodes); printf("node_list_size %d\n", node_list_size); printf("numEdges %d\n", numEdges); list_len = (int *)calloc(total_nodes, sizeof(int)); start_addr = (int *)calloc(total_nodes, sizeof(int)); node_list = (int *)calloc(node_list_size, sizeof(int)); malloc_diff = clock() - start; msec = malloc_diff * 1000 / CLOCKS_PER_SEC; printf("memory allocated ......\n"); node = 1; temp = 1; int neighbors; while(std::getline(myfile, line)) { neighbors = 0; std::stringstream lineStream(line); std::string token; while(lineStream >> token) { edge_id = std::stoi(token, NULL, 10); if (edge_id > node) { node_list[temp++] = edge_id; neighbors++; } } list_len[node] = neighbors; node++; } printf("graph created......\n"); diff = clock() - start; msec = diff * 1000 / CLOCKS_PER_SEC; printf("time taken %d seconds %d milliseconds\n", msec/1000, msec%1000); myfile.close(); } CudaTriangleCounter::~CudaTriangleCounter() { free(node_list); free(list_len); } /* * Kernel to count number of triangles formed by a single edge. And store the count * in an array on which we will run reduction later to find total number of triangles * in the given graph. */ __global__ void countTriangleKernel(int *countArray, edge_tuple_t *compressed_list, int *start_addr, int num) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= num) { return; } if (i == 0) { countArray[i] = 0; return; } int j = 0, k = 0, count=0; int *node_list = cuConstCounterParams.NodeList; int *list_len = cuConstCounterParams.ListLen; edge_tuple_t *edgeList = compressed_list; int u = edgeList[i].u; int v = edgeList[i].v; /* Fetching neigbour vertices from the node list */ int *list1 = node_list + start_addr[u-1] + 1; int len1 = list_len[u]; int *list2 = node_list + start_addr[v-1] + 1; int len2 = list_len[v]; /* * Traversing both lists to find the common nodes. Each common node * will be counted as a triangle */ while ( j < len1 && k < len2) { if (list1[j] == list2[k]) { count++; j++; k++; } else if (list1[j] < list2[k]) { j++; } else { k++; } } countArray[i] = count; } /* * Creating data structure which stores all the edges */ __global__ void createEdgeList(edge_tuple_t *edge_list, int *start_addr) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= cuConstCounterParams.numNodes) { return; } if (i == 0) { return; } int *node_list = cuConstCounterParams.NodeList; int *list_len = cuConstCounterParams.ListLen; int start_index = start_addr[i-1] + 1; int *list = node_list + start_addr[i-1] + 1; int len = list_len[i]; for (int j=0; j<len; j++) { edge_list[start_index].u = i; edge_list[start_index].v = list[j]; start_index++; } } #define THRESHOLD 50000 __global__ void segregateList(edge_tuple_t *edge_list, int *small_edge, int *large_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; int *list_len = cuConstCounterParams.ListLen; if ( i >= cuConstCounterParams.numEdges) { return; } if (i == 0) { large_edge[i] = 0; small_edge[i] = 0; return; } int u = edge_list[i].u; int v = edge_list[i].v; if ((list_len[u] > THRESHOLD) || (list_len[v] > THRESHOLD)) { large_edge[i] = 1; small_edge[i] = 0; } else { large_edge[i] = 0; small_edge[i] = 1; } } __global__ void createSmallList(edge_tuple_t *edge_list, edge_tuple_t *small_edge_list, int *small_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= (cuConstCounterParams.numEdges)) { return; } if (small_edge[i] != small_edge[i+1]) { int index = small_edge[i]; small_edge_list[index] = edge_list[i]; } } __global__ void createLargeList(edge_tuple_t *edge_list, edge_tuple_t *large_edge_list, int *large_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= (cuConstCounterParams.numEdges-1)) { return; } if (large_edge[i] != large_edge[i+1]) { int index = large_edge[i]; large_edge_list[index] = edge_list[i]; } } /* * Counts the number of triangles in the given graph. We first find out the * starting address of each list where list stores the neighbours of particular * node. We then create the list of all edges from the given nodes and their * neighbours. */ void CudaTriangleCounter::countTriangles() { dim3 blockdim = BLOCK_SIZE; dim3 griddim = (numEdges + BLOCK_SIZE)/BLOCK_SIZE; dim3 griddim1 = (numNodes + BLOCK_SIZE)/BLOCK_SIZE; int count; edge_tuple_t *edge_list, *small_edge_list, *large_edge_list; int *small_edge, *large_edge; int num_small_edges, num_large_edges; int *temp; /* Calculating start address of each neighbour list */ hipMalloc(&cudaDeviceStartAddr, sizeof(int ) * numNodes); thrust::device_ptr<int> dev_ptr1(cudaDeviceListLen); thrust::device_ptr<int> output_ptr(cudaDeviceStartAddr); thrust::inclusive_scan(dev_ptr1, dev_ptr1 + numNodes, output_ptr); /* Create a list of all edges present in the graph */ hipMalloc((void **)&edge_list, numEdges * sizeof(edge_tuple_t)); hipLaunchKernelGGL(( createEdgeList), dim3(griddim1), dim3(blockdim), 0, 0, edge_list, cudaDeviceStartAddr); hipDeviceSynchronize(); hipMalloc(&small_edge, sizeof(int ) * numEdges); hipMalloc(&large_edge, sizeof(int ) * numEdges); hipLaunchKernelGGL(( segregateList), dim3(griddim), dim3(blockdim), 0, 0, edge_list, small_edge, large_edge); hipDeviceSynchronize(); thrust::device_ptr<int> small_ptr(small_edge); thrust::inclusive_scan(small_ptr, small_ptr + numEdges, small_ptr); thrust::device_ptr<int> large_ptr(large_edge); thrust::inclusive_scan(large_ptr, large_ptr + numEdges, large_ptr); temp = (int *) malloc (numEdges * sizeof(int)); hipMemcpy(temp, small_edge, sizeof(int) * numEdges, hipMemcpyDeviceToHost); hipMemcpy(&num_small_edges, &small_edge[numEdges-1], sizeof(int), hipMemcpyDeviceToHost); hipMemcpy(&num_large_edges, &large_edge[numEdges-1], sizeof(int), hipMemcpyDeviceToHost); hipMalloc((void **)&small_edge_list, ( 1 +num_small_edges) * sizeof(edge_tuple_t)); hipMalloc((void **)&large_edge_list, ( 1 + num_large_edges) * sizeof(edge_tuple_t)); hipLaunchKernelGGL(( createSmallList), dim3(griddim), dim3(blockdim), 0, 0, edge_list, small_edge_list, small_edge); hipDeviceSynchronize(); hipLaunchKernelGGL(( createLargeList), dim3(griddim), dim3(blockdim), 0, 0, edge_list, large_edge_list, large_edge); hipDeviceSynchronize(); int *countArraySmall, *countArrayLarge; hipMalloc((void **)&countArraySmall, (2 + num_small_edges) * sizeof(int)); hipMalloc((void **)&countArrayLarge, (2 + num_large_edges) * sizeof(int)); dim3 griddim2 = (num_small_edges + 1 + BLOCK_SIZE)/BLOCK_SIZE; /* Applying intersection rule on all small edges to find number of triangles */ hipLaunchKernelGGL(( countTriangleKernel), dim3(griddim2), dim3(blockdim), 0, 0, countArraySmall, small_edge_list, cudaDeviceStartAddr, num_small_edges+1); hipDeviceSynchronize(); thrust::device_ptr<int> dev_ptr2(countArraySmall); thrust::inclusive_scan(dev_ptr2, dev_ptr2 + num_small_edges+1, dev_ptr2); int count1, count2; hipMemcpy(&count1, &countArraySmall[num_small_edges], sizeof(int), hipMemcpyDeviceToHost); dim3 griddim3 = (num_large_edges + 1 + BLOCK_SIZE)/BLOCK_SIZE; /* Applying intersection rule on all large edges to find number of triangles */ hipLaunchKernelGGL(( countTriangleKernel), dim3(griddim3), dim3(blockdim), 0, 0, countArrayLarge, large_edge_list, cudaDeviceStartAddr, num_large_edges+1); hipDeviceSynchronize(); thrust::device_ptr<int> dev_ptr3(countArrayLarge); thrust::inclusive_scan(dev_ptr3, dev_ptr3 + num_large_edges + 1, dev_ptr3); hipMemcpy(&count2, &countArrayLarge[num_large_edges], sizeof(int), hipMemcpyDeviceToHost); count = count1 + count2; printf("count %d\n", count); }
e0331026f828fdcc22efce2e23b88c740b3b9897.cu
/* * Triangle counter with workload balancing * * @author: Manish Jain * @author: Vashishtha Adtani */ #include <iostream> #include <string> #include <sstream> #include <algorithm> #include <stdlib.h> #include <math.h> #include <stdio.h> #include <vector> #include <thrust/scan.h> #include <thrust/device_ptr.h> #include <thrust/execution_policy.h> #include <fstream> #include <cuda.h> #include <cuda_runtime.h> #include <driver_functions.h> #include "cudaTriangleCounter.h" #define BLOCK_SIZE 32 struct GlobalConstants { int *NodeList; int *ListLen; int numNodes; int numEdges; }; __constant__ GlobalConstants cuConstCounterParams; void CudaTriangleCounter::setup() { int deviceCount = 0; std::string name; cudaError_t err = cudaGetDeviceCount(&deviceCount); printf("---------------------------------------------------------\n"); printf("Initializing CUDA for CountingTriangles\n"); printf("Found %d CUDA devices\n", deviceCount); for (int i=0; i<deviceCount; i++) { cudaDeviceProp deviceProps; cudaGetDeviceProperties(&deviceProps, i); name = deviceProps.name; printf("Device %d: %s\n", i, deviceProps.name); printf(" SMs: %d\n", deviceProps.multiProcessorCount); printf(" Global mem: %.0f MB\n", static_cast<float>(deviceProps.totalGlobalMem) / (1024 * 1024)); printf(" CUDA Cap: %d.%d\n", deviceProps.major, deviceProps.minor); } // printf("---------------------------------------------------------\n"); // By this time the graph should be loaded. Copying graph to // data structures into device memory so that it is accessible to // CUDA kernels // cudaMalloc(&cudaDeviceListLen, sizeof(int ) * numNodes); cudaMemcpy(cudaDeviceListLen, list_len, sizeof(int) * numNodes, cudaMemcpyHostToDevice); cudaMalloc((void **)&cudaDeviceNodeList, node_list_size * sizeof(int)); cudaMemcpy(cudaDeviceNodeList, node_list, sizeof(int) * node_list_size, cudaMemcpyHostToDevice); GlobalConstants params; params.ListLen = cudaDeviceListLen; params.NodeList = cudaDeviceNodeList; params.numNodes = numNodes; params.numEdges = numEdges; cudaMemcpyToSymbol(cuConstCounterParams, &params, sizeof(GlobalConstants)); } CudaTriangleCounter::CudaTriangleCounter(char *fileName) { clock_t start, diff, malloc_diff; int node, edge_id, temp = 0; int total_nodes = 0; int total_edges = 0; int msec; std::string line; std::ifstream myfile; myfile.open(fileName); std::string token; if (strstr(fileName,"new_orkut") != NULL) { printf("This is the NEW_ORKUT FILE **\n"); total_nodes = 3072600; total_edges = 117185083 + 1; } else { std::getline(myfile,line); std::stringstream lineStream(line); while (lineStream >> token) { if (temp == 0) { total_nodes = std::stoi(token, NULL, 10) + 1; } else if (temp == 1) { total_edges = std::stoi(token, NULL, 10) + 1; } else { printf("!!!!!!!!!!!! TEMP IS %d\n ", temp); break; } temp++; } } start = clock(); numNodes = total_nodes; node_list_size = total_edges * 2; numEdges = total_edges; printf("total_nodes %d\n", total_nodes); printf("node_list_size %d\n", node_list_size); printf("numEdges %d\n", numEdges); list_len = (int *)calloc(total_nodes, sizeof(int)); start_addr = (int *)calloc(total_nodes, sizeof(int)); node_list = (int *)calloc(node_list_size, sizeof(int)); malloc_diff = clock() - start; msec = malloc_diff * 1000 / CLOCKS_PER_SEC; printf("memory allocated ......\n"); node = 1; temp = 1; int neighbors; while(std::getline(myfile, line)) { neighbors = 0; std::stringstream lineStream(line); std::string token; while(lineStream >> token) { edge_id = std::stoi(token, NULL, 10); if (edge_id > node) { node_list[temp++] = edge_id; neighbors++; } } list_len[node] = neighbors; node++; } printf("graph created......\n"); diff = clock() - start; msec = diff * 1000 / CLOCKS_PER_SEC; printf("time taken %d seconds %d milliseconds\n", msec/1000, msec%1000); myfile.close(); } CudaTriangleCounter::~CudaTriangleCounter() { free(node_list); free(list_len); } /* * Kernel to count number of triangles formed by a single edge. And store the count * in an array on which we will run reduction later to find total number of triangles * in the given graph. */ __global__ void countTriangleKernel(int *countArray, edge_tuple_t *compressed_list, int *start_addr, int num) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= num) { return; } if (i == 0) { countArray[i] = 0; return; } int j = 0, k = 0, count=0; int *node_list = cuConstCounterParams.NodeList; int *list_len = cuConstCounterParams.ListLen; edge_tuple_t *edgeList = compressed_list; int u = edgeList[i].u; int v = edgeList[i].v; /* Fetching neigbour vertices from the node list */ int *list1 = node_list + start_addr[u-1] + 1; int len1 = list_len[u]; int *list2 = node_list + start_addr[v-1] + 1; int len2 = list_len[v]; /* * Traversing both lists to find the common nodes. Each common node * will be counted as a triangle */ while ( j < len1 && k < len2) { if (list1[j] == list2[k]) { count++; j++; k++; } else if (list1[j] < list2[k]) { j++; } else { k++; } } countArray[i] = count; } /* * Creating data structure which stores all the edges */ __global__ void createEdgeList(edge_tuple_t *edge_list, int *start_addr) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= cuConstCounterParams.numNodes) { return; } if (i == 0) { return; } int *node_list = cuConstCounterParams.NodeList; int *list_len = cuConstCounterParams.ListLen; int start_index = start_addr[i-1] + 1; int *list = node_list + start_addr[i-1] + 1; int len = list_len[i]; for (int j=0; j<len; j++) { edge_list[start_index].u = i; edge_list[start_index].v = list[j]; start_index++; } } #define THRESHOLD 50000 __global__ void segregateList(edge_tuple_t *edge_list, int *small_edge, int *large_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; int *list_len = cuConstCounterParams.ListLen; if ( i >= cuConstCounterParams.numEdges) { return; } if (i == 0) { large_edge[i] = 0; small_edge[i] = 0; return; } int u = edge_list[i].u; int v = edge_list[i].v; if ((list_len[u] > THRESHOLD) || (list_len[v] > THRESHOLD)) { large_edge[i] = 1; small_edge[i] = 0; } else { large_edge[i] = 0; small_edge[i] = 1; } } __global__ void createSmallList(edge_tuple_t *edge_list, edge_tuple_t *small_edge_list, int *small_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= (cuConstCounterParams.numEdges)) { return; } if (small_edge[i] != small_edge[i+1]) { int index = small_edge[i]; small_edge_list[index] = edge_list[i]; } } __global__ void createLargeList(edge_tuple_t *edge_list, edge_tuple_t *large_edge_list, int *large_edge) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i >= (cuConstCounterParams.numEdges-1)) { return; } if (large_edge[i] != large_edge[i+1]) { int index = large_edge[i]; large_edge_list[index] = edge_list[i]; } } /* * Counts the number of triangles in the given graph. We first find out the * starting address of each list where list stores the neighbours of particular * node. We then create the list of all edges from the given nodes and their * neighbours. */ void CudaTriangleCounter::countTriangles() { dim3 blockdim = BLOCK_SIZE; dim3 griddim = (numEdges + BLOCK_SIZE)/BLOCK_SIZE; dim3 griddim1 = (numNodes + BLOCK_SIZE)/BLOCK_SIZE; int count; edge_tuple_t *edge_list, *small_edge_list, *large_edge_list; int *small_edge, *large_edge; int num_small_edges, num_large_edges; int *temp; /* Calculating start address of each neighbour list */ cudaMalloc(&cudaDeviceStartAddr, sizeof(int ) * numNodes); thrust::device_ptr<int> dev_ptr1(cudaDeviceListLen); thrust::device_ptr<int> output_ptr(cudaDeviceStartAddr); thrust::inclusive_scan(dev_ptr1, dev_ptr1 + numNodes, output_ptr); /* Create a list of all edges present in the graph */ cudaMalloc((void **)&edge_list, numEdges * sizeof(edge_tuple_t)); createEdgeList<<<griddim1, blockdim>>>(edge_list, cudaDeviceStartAddr); cudaDeviceSynchronize(); cudaMalloc(&small_edge, sizeof(int ) * numEdges); cudaMalloc(&large_edge, sizeof(int ) * numEdges); segregateList<<<griddim, blockdim>>>(edge_list, small_edge, large_edge); cudaDeviceSynchronize(); thrust::device_ptr<int> small_ptr(small_edge); thrust::inclusive_scan(small_ptr, small_ptr + numEdges, small_ptr); thrust::device_ptr<int> large_ptr(large_edge); thrust::inclusive_scan(large_ptr, large_ptr + numEdges, large_ptr); temp = (int *) malloc (numEdges * sizeof(int)); cudaMemcpy(temp, small_edge, sizeof(int) * numEdges, cudaMemcpyDeviceToHost); cudaMemcpy(&num_small_edges, &small_edge[numEdges-1], sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpy(&num_large_edges, &large_edge[numEdges-1], sizeof(int), cudaMemcpyDeviceToHost); cudaMalloc((void **)&small_edge_list, ( 1 +num_small_edges) * sizeof(edge_tuple_t)); cudaMalloc((void **)&large_edge_list, ( 1 + num_large_edges) * sizeof(edge_tuple_t)); createSmallList<<<griddim, blockdim>>>(edge_list, small_edge_list, small_edge); cudaDeviceSynchronize(); createLargeList<<<griddim, blockdim>>>(edge_list, large_edge_list, large_edge); cudaDeviceSynchronize(); int *countArraySmall, *countArrayLarge; cudaMalloc((void **)&countArraySmall, (2 + num_small_edges) * sizeof(int)); cudaMalloc((void **)&countArrayLarge, (2 + num_large_edges) * sizeof(int)); dim3 griddim2 = (num_small_edges + 1 + BLOCK_SIZE)/BLOCK_SIZE; /* Applying intersection rule on all small edges to find number of triangles */ countTriangleKernel<<<griddim2, blockdim>>>(countArraySmall, small_edge_list, cudaDeviceStartAddr, num_small_edges+1); cudaDeviceSynchronize(); thrust::device_ptr<int> dev_ptr2(countArraySmall); thrust::inclusive_scan(dev_ptr2, dev_ptr2 + num_small_edges+1, dev_ptr2); int count1, count2; cudaMemcpy(&count1, &countArraySmall[num_small_edges], sizeof(int), cudaMemcpyDeviceToHost); dim3 griddim3 = (num_large_edges + 1 + BLOCK_SIZE)/BLOCK_SIZE; /* Applying intersection rule on all large edges to find number of triangles */ countTriangleKernel<<<griddim3, blockdim>>>(countArrayLarge, large_edge_list, cudaDeviceStartAddr, num_large_edges+1); cudaDeviceSynchronize(); thrust::device_ptr<int> dev_ptr3(countArrayLarge); thrust::inclusive_scan(dev_ptr3, dev_ptr3 + num_large_edges + 1, dev_ptr3); cudaMemcpy(&count2, &countArrayLarge[num_large_edges], sizeof(int), cudaMemcpyDeviceToHost); count = count1 + count2; printf("count %d\n", count); }
eb12d96e7738a62fef5cd9e7f17e21811341fa65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <assert.h> #include <sys/time.h> #include <iostream> #include <string> #include <iostream> #include <fstream> #include <vector> #include "utility.h" #include "param.h" #include "kernel_hip.cuh" #include "data.h" // #define layer_timing using namespace std; int main() { int dev = 0; hipSetDevice(dev); const unsigned batch = 8; const unsigned output_size = 1000; const unsigned image_height = 224; const unsigned image_width = 224; const unsigned image_channel = 3; //=============== Get Input and Label ================= float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float)); unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned)); uin32* lowBit_image_gpu = images_quantization(images, batch, image_height, image_width, image_channel); //================ Get Weight ================= FILE* config_file = fopen("./resnet_imagenet.csv","r"); //================ Set Network ================= //Layer-0 Conv128LayerParam* bconv1 = new Conv128LayerParam("Conv1", image_height, image_width, 7, 7, 3, 64, batch,4,4,true,1,1,true); //save residual Conv128LayerParam* bconv1_gpu = bconv1->initialize(config_file, lowBit_image_gpu); //Layer-1, basic-block-1, conv1 Conv128LayerParam* l1b1c1 = new Conv128LayerParam("L1B1C1", bconv1->output_height, bconv1->output_width, 3, 3, 64, 64, batch); Conv128LayerParam* l1b1c1_gpu = l1b1c1->initialize(config_file, bconv1->get_output_gpu()); //Layer-1, basic-block-1, conv2 Conv128LayerParam* l1b1c2 = new Conv128LayerParam("L1B1C2", l1b1c1->output_height, l1b1c1->output_width, 3, 3, 64, 64, batch,1,1,true,1,1,false,true,true,64); Conv128LayerParam* l1b1c2_gpu = l1b1c2->initialize(config_file, l1b1c1->get_output_gpu(), bconv1->get_output_residual_gpu()); //Layer-1, basic-block-2, conv1 Conv128LayerParam* l1b2c1 = new Conv128LayerParam("L1B2C1", l1b1c2->output_height, l1b1c2->output_width, 3, 3, 64, 64, batch); Conv128LayerParam* l1b2c1_gpu = l1b2c1->initialize(config_file, l1b1c2->get_output_gpu()); //Layer-1, basic-block-2, conv2 Conv128LayerParam* l1b2c2 = new Conv128LayerParam("L1B2C2", l1b2c1->output_height, l1b2c1->output_width, 3, 3, 64, 64, batch,1,1,true,1,1,false,true,true,128); Conv128LayerParam* l1b2c2_gpu = l1b2c2->initialize(config_file, l1b2c1->get_output_gpu(), l1b1c2->get_output_residual_gpu()); //============= //Layer-2, basic-block-1, conv1 Conv128LayerParam* l2b1c1 = new Conv128LayerParam("L2B1C1", l1b2c2->output_height, l1b2c2->output_width, 3, 3, 64, 128, batch, 2, 2); Conv128LayerParam* l2b1c1_gpu = l2b1c1->initialize(config_file, l1b2c2->get_output_gpu()); //Layer-2, basic-block-1, conv2 Conv128LayerParam* l2b1c2 = new Conv128LayerParam("L2B1C2", l2b1c1->output_height, l2b1c1->output_width, 3, 3, 128, 128, batch,1,1,true,1,1,false,true,true,128,true); Conv128LayerParam* l2b1c2_gpu = l2b1c2->initialize(config_file, l2b1c1->get_output_gpu(), l1b2c2->get_output_residual_gpu()); //Layer-2, basic-block-2, conv1 Conv128LayerParam* l2b2c1 = new Conv128LayerParam("L2B2C1", l2b1c2->output_height, l2b1c2->output_width, 3, 3, 128, 128, batch, 1, 1); Conv128LayerParam* l2b2c1_gpu = l2b2c1->initialize(config_file, l2b1c2->get_output_gpu()); //Layer-2, basic-block-2, conv2 Conv128LayerParam* l2b2c2 = new Conv128LayerParam("L2B2C2", l2b2c1->output_height, l2b2c1->output_width, 3, 3, 128, 128, batch,1,1,true,1,1,false,true,true,128); Conv128LayerParam* l2b2c2_gpu = l2b2c2->initialize(config_file, l2b2c1->get_output_gpu(), l2b1c2->get_output_residual_gpu()); //============= //Layer-3, basic-block-1, conv1 Conv128LayerParam* l3b1c1 = new Conv128LayerParam("L3B1C1", l2b2c2->output_height, l2b2c2->output_width, 3, 3, 128, 256, batch, 2, 2); Conv128LayerParam* l3b1c1_gpu = l3b1c1->initialize(config_file, l2b2c2->get_output_gpu()); //Layer-3, basic-block-1, conv2 Conv128LayerParam* l3b1c2 = new Conv128LayerParam("L3B1C2", l3b1c1->output_height, l3b1c1->output_width, 3, 3, 256, 256, batch,1,1,true,1,1,false,true,true,128,true); Conv128LayerParam* l3b1c2_gpu = l3b1c2->initialize(config_file, l3b1c1->get_output_gpu(), l2b2c2->get_output_residual_gpu()); //Layer-3, basic-block-2, conv1 Conv128LayerParam* l3b2c1 = new Conv128LayerParam("L3B2C1", l3b1c2->output_height, l3b1c2->output_width, 3, 3, 256, 256, batch, 1, 1); Conv128LayerParam* l3b2c1_gpu = l3b2c1->initialize(config_file, l3b1c2->get_output_gpu()); //Layer-3, basic-block-2, conv2 Conv128LayerParam* l3b2c2 = new Conv128LayerParam("L3B2C2", l3b2c1->output_height, l3b2c1->output_width, 3, 3, 256, 256, batch,1,1,true,1,1,false,true,true,256); Conv128LayerParam* l3b2c2_gpu = l3b2c2->initialize(config_file, l3b2c1->get_output_gpu(), l3b1c2->get_output_residual_gpu()); //============= //Layer-4, basic-block-1, conv1 Conv128LayerParam* l4b1c1 = new Conv128LayerParam("L4B1C1", l3b2c2->output_height, l3b2c2->output_width, 3, 3, 256, 512, batch, 2, 2); Conv128LayerParam* l4b1c1_gpu = l4b1c1->initialize(config_file, l3b2c2->get_output_gpu()); //Layer-4, basic-block-1, conv2 Conv128LayerParam* l4b1c2 = new Conv128LayerParam("L4B1C2", l4b1c1->output_height, l4b1c1->output_width, 3, 3, 512, 512, batch,1,1,true,1,1,false,true,true,256,true); Conv128LayerParam* l4b1c2_gpu = l4b1c2->initialize(config_file, l4b1c1->get_output_gpu(), l3b2c2->get_output_residual_gpu()); //Layer-4, basic-block-2, conv1 Conv128LayerParam* l4b2c1 = new Conv128LayerParam("L4B2C1", l4b1c2->output_height, l4b1c2->output_width, 3, 3, 512, 512, batch, 1, 1); Conv128LayerParam* l4b2c1_gpu = l4b2c1->initialize(config_file, l4b1c2->get_output_gpu()); //Layer-4, basic-block-2, conv2 Conv128LayerParam* l4b2c2 = new Conv128LayerParam("L4B2C2", l4b2c1->output_height, l4b2c1->output_width, 3, 3, 512, 512, batch,1,1,true,1,1,true,false,true,512); Conv128LayerParam* l4b2c2_gpu = l4b2c2->initialize(config_file, l4b2c1->get_output_gpu(), l4b1c2->get_output_residual_gpu()); //============= //Layer-5 Fc128LayerParam* bfc1 = new Fc128LayerParam("Fc1", batch, (l4b2c2->output_height) *(l4b2c2->output_width)*512, 512); Fc128LayerParam* bfc1_gpu = bfc1->initialize(config_file, l4b2c2->get_output_gpu()); //Out Layer Out128LayerParam* bout = new Out128LayerParam("Fout", batch, 512, output_size); Out128LayerParam* bout_gpu = bout->initialize(config_file, bfc1->get_output_gpu()); //================ Setup Kernel ================= hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); int numThreads = 512; int numBlocks = deviceProp.multiProcessorCount;// 12; int shared_memory = 65536; hipFuncSetAttribute(Conv_new_global, hipFuncAttributeMaxDynamicSharedMemorySize, shared_memory); hipFuncSetAttribute(FC_new_global, hipFuncAttributeMaxDynamicSharedMemorySize, shared_memory); hipFuncSetAttribute(Output_new_global, hipFuncAttributeMaxDynamicSharedMemorySize, shared_memory); std::clock_t c_start = std::clock(); #ifdef layer_timing std::vector<std::clock_t> layer_time; std::clock_t t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, bconv1_gpu); hipDeviceSynchronize(); std::clock_t t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l1b1c1_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l1b1c2_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l1b2c1_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l1b2c2_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l2b1c1_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l2b1c2_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l2b2c1_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l2b2c2_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l3b1c1_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l3b1c2_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l3b2c1_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l3b2c2_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l4b1c1_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l4b1c2_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l4b2c1_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l4b2c2_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( FC_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, bfc1_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); hipLaunchKernelGGL(( Output_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, bout_gpu); hipDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); hipError_t err = hipGetLastError(); #else hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, bconv1_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l1b1c1_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l1b1c2_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l1b2c1_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l1b2c2_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l2b1c1_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l2b1c2_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l2b2c1_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l2b2c2_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l3b1c1_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l3b1c2_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l3b2c1_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l3b2c2_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l4b1c1_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l4b1c2_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l4b2c1_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Conv_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, l4b2c2_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( FC_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, bfc1_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( Output_new_global), dim3(numBlocks), dim3(numThreads), shared_memory, 0, bout_gpu); hipDeviceSynchronize(); hipError_t err = hipGetLastError(); #endif std::clock_t c_end = std::clock(); float time_elapsed_ms = 1000.0f * (c_end-c_start) / CLOCKS_PER_SEC; printf("\n==============\nResNet (ms): %.3f\n", time_elapsed_ms); #ifdef layer_timing for (int idx = 0; idx < layer_time.size(); idx++) { float time_elapsed_ms = 1000.0f * layer_time[idx] / CLOCKS_PER_SEC; printf("ResNet18 Layer-%d (ms): %.3f\n", idx, time_elapsed_ms); } #endif delete bconv1; delete l1b1c1; delete l1b1c2; delete l1b2c1; delete l1b2c2; delete l2b1c1; delete l2b1c2; delete l2b2c1; delete l2b2c2; delete l3b1c1; delete l3b1c2; delete l3b2c1; delete l3b2c2; delete l4b1c1; delete l4b1c2; delete l4b2c1; delete l4b2c2; delete bfc1; delete bout; return 0; }
eb12d96e7738a62fef5cd9e7f17e21811341fa65.cu
#include <stdio.h> #include <assert.h> #include <sys/time.h> #include <iostream> #include <string> #include <iostream> #include <fstream> #include <vector> #include "utility.h" #include "param.h" #include "kernel.cuh" #include "data.h" // #define layer_timing using namespace std; int main() { int dev = 0; cudaSetDevice(dev); const unsigned batch = 8; const unsigned output_size = 1000; const unsigned image_height = 224; const unsigned image_width = 224; const unsigned image_channel = 3; //=============== Get Input and Label ================= float* images = (float*)malloc(batch*image_height*image_width*image_channel*sizeof(float)); unsigned* image_labels = (unsigned*)malloc(batch*sizeof(unsigned)); uin32* lowBit_image_gpu = images_quantization(images, batch, image_height, image_width, image_channel); //================ Get Weight ================= FILE* config_file = fopen("./resnet_imagenet.csv","r"); //================ Set Network ================= //Layer-0 Conv128LayerParam* bconv1 = new Conv128LayerParam("Conv1", image_height, image_width, 7, 7, 3, 64, batch,4,4,true,1,1,true); //save residual Conv128LayerParam* bconv1_gpu = bconv1->initialize(config_file, lowBit_image_gpu); //Layer-1, basic-block-1, conv1 Conv128LayerParam* l1b1c1 = new Conv128LayerParam("L1B1C1", bconv1->output_height, bconv1->output_width, 3, 3, 64, 64, batch); Conv128LayerParam* l1b1c1_gpu = l1b1c1->initialize(config_file, bconv1->get_output_gpu()); //Layer-1, basic-block-1, conv2 Conv128LayerParam* l1b1c2 = new Conv128LayerParam("L1B1C2", l1b1c1->output_height, l1b1c1->output_width, 3, 3, 64, 64, batch,1,1,true,1,1,false,true,true,64); Conv128LayerParam* l1b1c2_gpu = l1b1c2->initialize(config_file, l1b1c1->get_output_gpu(), bconv1->get_output_residual_gpu()); //Layer-1, basic-block-2, conv1 Conv128LayerParam* l1b2c1 = new Conv128LayerParam("L1B2C1", l1b1c2->output_height, l1b1c2->output_width, 3, 3, 64, 64, batch); Conv128LayerParam* l1b2c1_gpu = l1b2c1->initialize(config_file, l1b1c2->get_output_gpu()); //Layer-1, basic-block-2, conv2 Conv128LayerParam* l1b2c2 = new Conv128LayerParam("L1B2C2", l1b2c1->output_height, l1b2c1->output_width, 3, 3, 64, 64, batch,1,1,true,1,1,false,true,true,128); Conv128LayerParam* l1b2c2_gpu = l1b2c2->initialize(config_file, l1b2c1->get_output_gpu(), l1b1c2->get_output_residual_gpu()); //============= //Layer-2, basic-block-1, conv1 Conv128LayerParam* l2b1c1 = new Conv128LayerParam("L2B1C1", l1b2c2->output_height, l1b2c2->output_width, 3, 3, 64, 128, batch, 2, 2); Conv128LayerParam* l2b1c1_gpu = l2b1c1->initialize(config_file, l1b2c2->get_output_gpu()); //Layer-2, basic-block-1, conv2 Conv128LayerParam* l2b1c2 = new Conv128LayerParam("L2B1C2", l2b1c1->output_height, l2b1c1->output_width, 3, 3, 128, 128, batch,1,1,true,1,1,false,true,true,128,true); Conv128LayerParam* l2b1c2_gpu = l2b1c2->initialize(config_file, l2b1c1->get_output_gpu(), l1b2c2->get_output_residual_gpu()); //Layer-2, basic-block-2, conv1 Conv128LayerParam* l2b2c1 = new Conv128LayerParam("L2B2C1", l2b1c2->output_height, l2b1c2->output_width, 3, 3, 128, 128, batch, 1, 1); Conv128LayerParam* l2b2c1_gpu = l2b2c1->initialize(config_file, l2b1c2->get_output_gpu()); //Layer-2, basic-block-2, conv2 Conv128LayerParam* l2b2c2 = new Conv128LayerParam("L2B2C2", l2b2c1->output_height, l2b2c1->output_width, 3, 3, 128, 128, batch,1,1,true,1,1,false,true,true,128); Conv128LayerParam* l2b2c2_gpu = l2b2c2->initialize(config_file, l2b2c1->get_output_gpu(), l2b1c2->get_output_residual_gpu()); //============= //Layer-3, basic-block-1, conv1 Conv128LayerParam* l3b1c1 = new Conv128LayerParam("L3B1C1", l2b2c2->output_height, l2b2c2->output_width, 3, 3, 128, 256, batch, 2, 2); Conv128LayerParam* l3b1c1_gpu = l3b1c1->initialize(config_file, l2b2c2->get_output_gpu()); //Layer-3, basic-block-1, conv2 Conv128LayerParam* l3b1c2 = new Conv128LayerParam("L3B1C2", l3b1c1->output_height, l3b1c1->output_width, 3, 3, 256, 256, batch,1,1,true,1,1,false,true,true,128,true); Conv128LayerParam* l3b1c2_gpu = l3b1c2->initialize(config_file, l3b1c1->get_output_gpu(), l2b2c2->get_output_residual_gpu()); //Layer-3, basic-block-2, conv1 Conv128LayerParam* l3b2c1 = new Conv128LayerParam("L3B2C1", l3b1c2->output_height, l3b1c2->output_width, 3, 3, 256, 256, batch, 1, 1); Conv128LayerParam* l3b2c1_gpu = l3b2c1->initialize(config_file, l3b1c2->get_output_gpu()); //Layer-3, basic-block-2, conv2 Conv128LayerParam* l3b2c2 = new Conv128LayerParam("L3B2C2", l3b2c1->output_height, l3b2c1->output_width, 3, 3, 256, 256, batch,1,1,true,1,1,false,true,true,256); Conv128LayerParam* l3b2c2_gpu = l3b2c2->initialize(config_file, l3b2c1->get_output_gpu(), l3b1c2->get_output_residual_gpu()); //============= //Layer-4, basic-block-1, conv1 Conv128LayerParam* l4b1c1 = new Conv128LayerParam("L4B1C1", l3b2c2->output_height, l3b2c2->output_width, 3, 3, 256, 512, batch, 2, 2); Conv128LayerParam* l4b1c1_gpu = l4b1c1->initialize(config_file, l3b2c2->get_output_gpu()); //Layer-4, basic-block-1, conv2 Conv128LayerParam* l4b1c2 = new Conv128LayerParam("L4B1C2", l4b1c1->output_height, l4b1c1->output_width, 3, 3, 512, 512, batch,1,1,true,1,1,false,true,true,256,true); Conv128LayerParam* l4b1c2_gpu = l4b1c2->initialize(config_file, l4b1c1->get_output_gpu(), l3b2c2->get_output_residual_gpu()); //Layer-4, basic-block-2, conv1 Conv128LayerParam* l4b2c1 = new Conv128LayerParam("L4B2C1", l4b1c2->output_height, l4b1c2->output_width, 3, 3, 512, 512, batch, 1, 1); Conv128LayerParam* l4b2c1_gpu = l4b2c1->initialize(config_file, l4b1c2->get_output_gpu()); //Layer-4, basic-block-2, conv2 Conv128LayerParam* l4b2c2 = new Conv128LayerParam("L4B2C2", l4b2c1->output_height, l4b2c1->output_width, 3, 3, 512, 512, batch,1,1,true,1,1,true,false,true,512); Conv128LayerParam* l4b2c2_gpu = l4b2c2->initialize(config_file, l4b2c1->get_output_gpu(), l4b1c2->get_output_residual_gpu()); //============= //Layer-5 Fc128LayerParam* bfc1 = new Fc128LayerParam("Fc1", batch, (l4b2c2->output_height) *(l4b2c2->output_width)*512, 512); Fc128LayerParam* bfc1_gpu = bfc1->initialize(config_file, l4b2c2->get_output_gpu()); //Out Layer Out128LayerParam* bout = new Out128LayerParam("Fout", batch, 512, output_size); Out128LayerParam* bout_gpu = bout->initialize(config_file, bfc1->get_output_gpu()); //================ Setup Kernel ================= cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); int numThreads = 512; int numBlocks = deviceProp.multiProcessorCount;// 12; int shared_memory = 65536; cudaFuncSetAttribute(Conv_new_global, cudaFuncAttributeMaxDynamicSharedMemorySize, shared_memory); cudaFuncSetAttribute(FC_new_global, cudaFuncAttributeMaxDynamicSharedMemorySize, shared_memory); cudaFuncSetAttribute(Output_new_global, cudaFuncAttributeMaxDynamicSharedMemorySize, shared_memory); std::clock_t c_start = std::clock(); #ifdef layer_timing std::vector<std::clock_t> layer_time; std::clock_t t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(bconv1_gpu); cudaDeviceSynchronize(); std::clock_t t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l1b1c1_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l1b1c2_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l1b2c1_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l1b2c2_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l2b1c1_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l2b1c2_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l2b2c1_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l2b2c2_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l3b1c1_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l3b1c2_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l3b2c1_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l3b2c2_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l4b1c1_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l4b1c2_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l4b2c1_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l4b2c2_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); FC_new_global<<<numBlocks, numThreads, shared_memory>>>(bfc1_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); t1 = std::clock(); Output_new_global<<<numBlocks, numThreads, shared_memory>>>(bout_gpu); cudaDeviceSynchronize(); t2 = std::clock(); layer_time.push_back(t2 - t1); cudaError_t err = cudaGetLastError(); #else Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(bconv1_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l1b1c1_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l1b1c2_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l1b2c1_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l1b2c2_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l2b1c1_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l2b1c2_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l2b2c1_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l2b2c2_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l3b1c1_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l3b1c2_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l3b2c1_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l3b2c2_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l4b1c1_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l4b1c2_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l4b2c1_gpu); cudaDeviceSynchronize(); Conv_new_global<<<numBlocks, numThreads, shared_memory>>>(l4b2c2_gpu); cudaDeviceSynchronize(); FC_new_global<<<numBlocks, numThreads, shared_memory>>>(bfc1_gpu); cudaDeviceSynchronize(); Output_new_global<<<numBlocks, numThreads, shared_memory>>>(bout_gpu); cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError(); #endif std::clock_t c_end = std::clock(); float time_elapsed_ms = 1000.0f * (c_end-c_start) / CLOCKS_PER_SEC; printf("\n==============\nResNet (ms): %.3f\n", time_elapsed_ms); #ifdef layer_timing for (int idx = 0; idx < layer_time.size(); idx++) { float time_elapsed_ms = 1000.0f * layer_time[idx] / CLOCKS_PER_SEC; printf("ResNet18 Layer-%d (ms): %.3f\n", idx, time_elapsed_ms); } #endif delete bconv1; delete l1b1c1; delete l1b1c2; delete l1b2c1; delete l1b2c2; delete l2b1c1; delete l2b1c2; delete l2b2c1; delete l2b2c2; delete l3b1c1; delete l3b1c2; delete l3b2c1; delete l3b2c2; delete l4b1c1; delete l4b1c2; delete l4b2c1; delete l4b2c2; delete bfc1; delete bout; return 0; }
c6cde57b9977cffebbe5a23c620d30f6a68c1038.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <stdio.h> #include <omp.h> #include <string> void vector_dot_product(double* dot_product_ptr, const double* a, const double* b, long N){ double sum = 0; #pragma omp parallel for schedule(static) reduction(+:sum) for (long i = 0; i < N; i++) sum += a[i] * b[i]; *dot_product_ptr = sum; } void Check_CUDA_Error(const char *message){ hipError_t error = hipGetLastError(); if(error!=hipSuccess) { fprintf(stderr,"ERROR: %s: %s\n", message, hipGetErrorString(error) ); exit(-1); } } #define BLOCK_SIZE 1024 // Warp divergence __global__ void reduction_kernel0(double* sum, const double* a, long N){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N) smem[threadIdx.x] = a[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x % 2 == 0) smem[threadIdx.x] += smem[threadIdx.x + 1]; __syncthreads(); if (threadIdx.x % 4 == 0) smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncthreads(); if (threadIdx.x % 8 == 0) smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncthreads(); if (threadIdx.x % 16 == 0) smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncthreads(); if (threadIdx.x % 32 == 0) smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncthreads(); if (threadIdx.x % 64 == 0) smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncthreads(); if (threadIdx.x % 128 == 0) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x % 256 == 0) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x % 512 == 0) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x] + smem[threadIdx.x + 512]; } // Shared memory bank conflicts __global__ void reduction_kernel1(double* sum, const double* a, long N){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N) smem[threadIdx.x] = a[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x * 2] += smem[threadIdx.x * 2 + 1]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x * 4] += smem[threadIdx.x * 4 + 2]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x * 8] += smem[threadIdx.x * 8 + 4]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x * 16] += smem[threadIdx.x * 16 + 8]; __syncthreads(); if (threadIdx.x < 32) smem[threadIdx.x * 32] += smem[threadIdx.x * 32 + 16]; __syncwarp(); if (threadIdx.x < 16) smem[threadIdx.x * 64] += smem[threadIdx.x * 64 + 32]; __syncwarp(); if (threadIdx.x < 8) smem[threadIdx.x * 128] += smem[threadIdx.x * 128 + 64]; __syncwarp(); if (threadIdx.x < 4) smem[threadIdx.x * 256] += smem[threadIdx.x * 256 + 128]; __syncwarp(); if (threadIdx.x < 2) smem[threadIdx.x * 512] += smem[threadIdx.x * 512 + 256]; __syncwarp(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[512]; } __global__ void reduction_kernel2(double* sum, const double* a, long N){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N) smem[threadIdx.x] = a[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x < 32) { smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1]; } } __global__ void vector_dot_product_kernel2(double* sum, const double* a, const double* b, long N){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N) smem[threadIdx.x] = a[idx] * b[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x < 32) { smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1]; } } int main() { long N = (1UL<<25); double *x1; hipHostMalloc((void**)&x1, N * sizeof(double)); double *x2; hipHostMalloc((void**)&x2, N * sizeof(double)); #pragma omp parallel for schedule(static) for (long i = 0; i < N; i++) { x1[i] = 1.0/(i+1); x2[i] = 1.0/(i+1); } double dot_product_ref, sum; double tt = omp_get_wtime(); //reduction(&sum_ref, x, N); vector_dot_product(&dot_product_ref, x1, x2, N); printf("CPU Bandwidth = %f GB/s\n", 1*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); double *x1_d, *x2_d, *y_d; hipMalloc(&x1_d, N*sizeof(double)); hipMalloc(&x2_d, N*sizeof(double)); long N_work = 1; for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i; hipMalloc(&y_d, N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks hipMemcpyAsync(x1_d, x1, N*sizeof(double), hipMemcpyHostToDevice); hipMemcpyAsync(x2_d, x2, N*sizeof(double), hipMemcpyHostToDevice); hipDeviceSynchronize(); tt = omp_get_wtime(); double* sum_d = y_d; long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); hipLaunchKernelGGL(( vector_dot_product_kernel2), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d, x1_d, x2_d, N); while (Nb > 1) { long N = Nb; Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE); hipLaunchKernelGGL(( reduction_kernel2), dim3(Nb),dim3(BLOCK_SIZE), 0, 0, sum_d + N, sum_d, N); sum_d += N; } hipMemcpyAsync(&sum, sum_d, 1*sizeof(double), hipMemcpyDeviceToHost); hipDeviceSynchronize(); printf("GPU Bandwidth = %f GB/s\n", 1*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); printf("Error = %f\n", fabs(sum-dot_product_ref)); hipFree(x1_d); hipFree(x1_d); hipFree(y_d); hipHostFree(x1); hipHostFree(x2); return 0; }
c6cde57b9977cffebbe5a23c620d30f6a68c1038.cu
#include <algorithm> #include <stdio.h> #include <omp.h> #include <string> void vector_dot_product(double* dot_product_ptr, const double* a, const double* b, long N){ double sum = 0; #pragma omp parallel for schedule(static) reduction(+:sum) for (long i = 0; i < N; i++) sum += a[i] * b[i]; *dot_product_ptr = sum; } void Check_CUDA_Error(const char *message){ cudaError_t error = cudaGetLastError(); if(error!=cudaSuccess) { fprintf(stderr,"ERROR: %s: %s\n", message, cudaGetErrorString(error) ); exit(-1); } } #define BLOCK_SIZE 1024 // Warp divergence __global__ void reduction_kernel0(double* sum, const double* a, long N){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N) smem[threadIdx.x] = a[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x % 2 == 0) smem[threadIdx.x] += smem[threadIdx.x + 1]; __syncthreads(); if (threadIdx.x % 4 == 0) smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncthreads(); if (threadIdx.x % 8 == 0) smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncthreads(); if (threadIdx.x % 16 == 0) smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncthreads(); if (threadIdx.x % 32 == 0) smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncthreads(); if (threadIdx.x % 64 == 0) smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncthreads(); if (threadIdx.x % 128 == 0) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x % 256 == 0) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x % 512 == 0) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[threadIdx.x] + smem[threadIdx.x + 512]; } // Shared memory bank conflicts __global__ void reduction_kernel1(double* sum, const double* a, long N){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N) smem[threadIdx.x] = a[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x * 2] += smem[threadIdx.x * 2 + 1]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x * 4] += smem[threadIdx.x * 4 + 2]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x * 8] += smem[threadIdx.x * 8 + 4]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x * 16] += smem[threadIdx.x * 16 + 8]; __syncthreads(); if (threadIdx.x < 32) smem[threadIdx.x * 32] += smem[threadIdx.x * 32 + 16]; __syncwarp(); if (threadIdx.x < 16) smem[threadIdx.x * 64] += smem[threadIdx.x * 64 + 32]; __syncwarp(); if (threadIdx.x < 8) smem[threadIdx.x * 128] += smem[threadIdx.x * 128 + 64]; __syncwarp(); if (threadIdx.x < 4) smem[threadIdx.x * 256] += smem[threadIdx.x * 256 + 128]; __syncwarp(); if (threadIdx.x < 2) smem[threadIdx.x * 512] += smem[threadIdx.x * 512 + 256]; __syncwarp(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[512]; } __global__ void reduction_kernel2(double* sum, const double* a, long N){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N) smem[threadIdx.x] = a[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x < 32) { smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1]; } } __global__ void vector_dot_product_kernel2(double* sum, const double* a, const double* b, long N){ __shared__ double smem[BLOCK_SIZE]; int idx = (blockIdx.x) * blockDim.x + threadIdx.x; if (idx < N) smem[threadIdx.x] = a[idx] * b[idx]; else smem[threadIdx.x] = 0; __syncthreads(); if (threadIdx.x < 512) smem[threadIdx.x] += smem[threadIdx.x + 512]; __syncthreads(); if (threadIdx.x < 256) smem[threadIdx.x] += smem[threadIdx.x + 256]; __syncthreads(); if (threadIdx.x < 128) smem[threadIdx.x] += smem[threadIdx.x + 128]; __syncthreads(); if (threadIdx.x < 64) smem[threadIdx.x] += smem[threadIdx.x + 64]; __syncthreads(); if (threadIdx.x < 32) { smem[threadIdx.x] += smem[threadIdx.x + 32]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 16]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 8]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 4]; __syncwarp(); smem[threadIdx.x] += smem[threadIdx.x + 2]; __syncwarp(); if (threadIdx.x == 0) sum[blockIdx.x] = smem[0] + smem[1]; } } int main() { long N = (1UL<<25); double *x1; cudaMallocHost((void**)&x1, N * sizeof(double)); double *x2; cudaMallocHost((void**)&x2, N * sizeof(double)); #pragma omp parallel for schedule(static) for (long i = 0; i < N; i++) { x1[i] = 1.0/(i+1); x2[i] = 1.0/(i+1); } double dot_product_ref, sum; double tt = omp_get_wtime(); //reduction(&sum_ref, x, N); vector_dot_product(&dot_product_ref, x1, x2, N); printf("CPU Bandwidth = %f GB/s\n", 1*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); double *x1_d, *x2_d, *y_d; cudaMalloc(&x1_d, N*sizeof(double)); cudaMalloc(&x2_d, N*sizeof(double)); long N_work = 1; for (long i = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); i > 1; i = (i+BLOCK_SIZE-1)/(BLOCK_SIZE)) N_work += i; cudaMalloc(&y_d, N_work*sizeof(double)); // extra memory buffer for reduction across thread-blocks cudaMemcpyAsync(x1_d, x1, N*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpyAsync(x2_d, x2, N*sizeof(double), cudaMemcpyHostToDevice); cudaDeviceSynchronize(); tt = omp_get_wtime(); double* sum_d = y_d; long Nb = (N+BLOCK_SIZE-1)/(BLOCK_SIZE); vector_dot_product_kernel2<<<Nb,BLOCK_SIZE>>>(sum_d, x1_d, x2_d, N); while (Nb > 1) { long N = Nb; Nb = (Nb+BLOCK_SIZE-1)/(BLOCK_SIZE); reduction_kernel2<<<Nb,BLOCK_SIZE>>>(sum_d + N, sum_d, N); sum_d += N; } cudaMemcpyAsync(&sum, sum_d, 1*sizeof(double), cudaMemcpyDeviceToHost); cudaDeviceSynchronize(); printf("GPU Bandwidth = %f GB/s\n", 1*N*sizeof(double) / (omp_get_wtime()-tt)/1e9); printf("Error = %f\n", fabs(sum-dot_product_ref)); cudaFree(x1_d); cudaFree(x1_d); cudaFree(y_d); cudaFreeHost(x1); cudaFreeHost(x2); return 0; }
a56c5fe6be02ddd20dd882733398d92e668faadb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cuda_help.h" #include "proj.h" #include "rand.h" using namespace Legion; namespace legate { namespace numpy { __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_uniform_rand_1d(const AccessorWO<double, 1> out, const Point<1> origin, const Point<1> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset; const unsigned long long key = x * strides[0]; out[x] = RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)); } __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_uniform_rand_2d(const AccessorWO<double, 2> out, const Point<2> origin, const Point<1> pitch, const Point<2> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + offset % pitch[0]; const unsigned long long key = x * strides[0] + y * strides[1]; out[x][y] = RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)); } __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_uniform_rand_3d(const AccessorWO<double, 3> out, const Point<3> origin, const Point<2> pitch, const Point<3> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1]; const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1]; const unsigned long long key = x * strides[0] + y * strides[1] + z * strides[2]; out[x][y][z] = RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)); } /*static*/ void RandUniformTask::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { LegateDeserializer derez(task->args, task->arglen); const unsigned epoch = derez.unpack_32bit_uint(); const int dim = derez.unpack_dimension(); switch (dim) { case 1: { const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez); if (rect.empty()) break; const Point<1> strides = derez.unpack_point<1>(); const AccessorWO<double, 1> out = derez.unpack_accessor_WO<double, 1>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( legate_uniform_rand_1d), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, rect.lo, strides, epoch, volume); break; } case 2: { const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez); if (rect.empty()) break; const Point<2> strides = derez.unpack_point<2>(); const AccessorWO<double, 2> out = derez.unpack_accessor_WO<double, 2>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t pitch = rect.hi[1] - rect.lo[1] + 1; hipLaunchKernelGGL(( legate_uniform_rand_2d), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, rect.lo, pitch, strides, epoch, volume); break; } case 3: { const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez); if (rect.empty()) break; const Point<3> strides = derez.unpack_point<3>(); const AccessorWO<double, 3> out = derez.unpack_accessor_WO<double, 3>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t diffy = rect.hi[1] - rect.lo[1] + 1; const coord_t diffz = rect.hi[2] - rect.lo[2] + 1; const coord_t pitch[2] = {diffy * diffz, diffz}; hipLaunchKernelGGL(( legate_uniform_rand_3d), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, rect.lo, Point<2>(pitch), strides, epoch, volume); break; } default: assert(false); } } __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_normal_rand_1d(const AccessorWO<double, 1> out, const Point<1> origin, const Point<1> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset; const unsigned long long key = x * strides[0]; out[x] = erfinv(2.0 * RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)) - 1.0); } __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_normal_rand_2d(const AccessorWO<double, 2> out, const Point<2> origin, const Point<1> pitch, const Point<2> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + offset % pitch[0]; const unsigned long long key = x * strides[0] + y * strides[1]; out[x][y] = erfinv(2.0 * RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)) - 1.0); } __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_normal_rand_3d(const AccessorWO<double, 3> out, const Point<3> origin, const Point<2> pitch, const Point<3> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1]; const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1]; const unsigned long long key = x * strides[0] + y * strides[1] + z * strides[2]; out[x][y][z] = erfinv(2.0 * RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)) - 1.0); } /*static*/ void RandNormalTask::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { LegateDeserializer derez(task->args, task->arglen); const unsigned epoch = derez.unpack_32bit_uint(); const int dim = derez.unpack_dimension(); switch (dim) { case 1: { const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez); if (rect.empty()) break; const Point<1> strides = derez.unpack_point<1>(); const AccessorWO<double, 1> out = derez.unpack_accessor_WO<double, 1>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( legate_normal_rand_1d), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, rect.lo, strides, epoch, volume); break; } case 2: { const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez); if (rect.empty()) break; const Point<2> strides = derez.unpack_point<2>(); const AccessorWO<double, 2> out = derez.unpack_accessor_WO<double, 2>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t pitch = rect.hi[1] - rect.lo[1] + 1; hipLaunchKernelGGL(( legate_normal_rand_2d), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, rect.lo, pitch, strides, epoch, volume); break; } case 3: { const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez); if (rect.empty()) break; const Point<3> strides = derez.unpack_point<3>(); const AccessorWO<double, 3> out = derez.unpack_accessor_WO<double, 3>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t diffy = rect.hi[1] - rect.lo[1] + 1; const coord_t diffz = rect.hi[2] - rect.lo[2] + 1; const coord_t pitch[2] = {diffy * diffz, diffz}; hipLaunchKernelGGL(( legate_normal_rand_3d), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, rect.lo, Point<2>(pitch), strides, epoch, volume); break; } default: assert(false); } } template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_integer_rand_1d(const AccessorWO<T, 1> out, const Point<1> origin, const Point<1> strides, const unsigned long long low, const unsigned long long diff, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset; const unsigned long long key = x * strides[0]; out[x] = low + RandomGenerator::rand_long(epoch, HI_BITS(key), LO_BITS(key), diff); } template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_integer_rand_2d(const AccessorWO<T, 2> out, const Point<2> origin, const Point<1> pitch, const Point<2> strides, const unsigned long long low, const unsigned long long diff, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + offset % pitch[0]; const unsigned long long key = x * strides[0] + y * strides[1]; out[x][y] = low + RandomGenerator::rand_long(epoch, HI_BITS(key), LO_BITS(key), diff); } template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_integer_rand_3d(const AccessorWO<T, 3> out, const Point<3> origin, const Point<2> pitch, const Point<3> strides, const unsigned long long low, const unsigned long long diff, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1]; const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1]; const unsigned long long key = x * strides[0] + y * strides[1] + z * strides[2]; out[x][y][z] = low + RandomGenerator::rand_long(epoch, HI_BITS(key), LO_BITS(key), diff); } template<typename T> /*static*/ void RandIntegerTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { LegateDeserializer derez(task->args, task->arglen); const unsigned epoch = derez.unpack_32bit_uint(); const T low = derez.unpack_value<T>(); const T high = derez.unpack_value<T>(); assert(low < high); const unsigned long long diff = high - low; const int dim = derez.unpack_dimension(); switch (dim) { case 1: { const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez); if (rect.empty()) break; const Point<1> strides = derez.unpack_point<1>(); const AccessorWO<T, 1> out = derez.unpack_accessor_WO<T, 1>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; hipLaunchKernelGGL(( legate_integer_rand_1d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, rect.lo, strides, low, diff, epoch, volume); break; } case 2: { const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez); if (rect.empty()) break; const Point<2> strides = derez.unpack_point<2>(); const AccessorWO<T, 2> out = derez.unpack_accessor_WO<T, 2>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t pitch = rect.hi[1] - rect.lo[1] + 1; hipLaunchKernelGGL(( legate_integer_rand_2d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, rect.lo, pitch, strides, low, diff, epoch, volume); break; } case 3: { const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez); if (rect.empty()) break; const Point<3> strides = derez.unpack_point<3>(); const AccessorWO<T, 3> out = derez.unpack_accessor_WO<T, 3>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t diffy = rect.hi[1] - rect.lo[1] + 1; const coord_t diffz = rect.hi[2] - rect.lo[2] + 1; const coord_t pitch[2] = {diffy * diffz, diffz}; hipLaunchKernelGGL(( legate_integer_rand_3d<T>), dim3(blocks), dim3(THREADS_PER_BLOCK), 0, 0, out, rect.lo, Point<2>(pitch), strides, low, diff, epoch, volume); break; } default: assert(false); } } INSTANTIATE_INT_VARIANT(RandIntegerTask, gpu_variant) } // namespace numpy } // namespace legate
a56c5fe6be02ddd20dd882733398d92e668faadb.cu
/* Copyright 2021 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cuda_help.h" #include "proj.h" #include "rand.h" using namespace Legion; namespace legate { namespace numpy { __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_uniform_rand_1d(const AccessorWO<double, 1> out, const Point<1> origin, const Point<1> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset; const unsigned long long key = x * strides[0]; out[x] = RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)); } __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_uniform_rand_2d(const AccessorWO<double, 2> out, const Point<2> origin, const Point<1> pitch, const Point<2> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + offset % pitch[0]; const unsigned long long key = x * strides[0] + y * strides[1]; out[x][y] = RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)); } __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_uniform_rand_3d(const AccessorWO<double, 3> out, const Point<3> origin, const Point<2> pitch, const Point<3> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1]; const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1]; const unsigned long long key = x * strides[0] + y * strides[1] + z * strides[2]; out[x][y][z] = RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)); } /*static*/ void RandUniformTask::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { LegateDeserializer derez(task->args, task->arglen); const unsigned epoch = derez.unpack_32bit_uint(); const int dim = derez.unpack_dimension(); switch (dim) { case 1: { const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez); if (rect.empty()) break; const Point<1> strides = derez.unpack_point<1>(); const AccessorWO<double, 1> out = derez.unpack_accessor_WO<double, 1>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; legate_uniform_rand_1d<<<blocks, THREADS_PER_BLOCK>>>(out, rect.lo, strides, epoch, volume); break; } case 2: { const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez); if (rect.empty()) break; const Point<2> strides = derez.unpack_point<2>(); const AccessorWO<double, 2> out = derez.unpack_accessor_WO<double, 2>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t pitch = rect.hi[1] - rect.lo[1] + 1; legate_uniform_rand_2d<<<blocks, THREADS_PER_BLOCK>>>(out, rect.lo, pitch, strides, epoch, volume); break; } case 3: { const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez); if (rect.empty()) break; const Point<3> strides = derez.unpack_point<3>(); const AccessorWO<double, 3> out = derez.unpack_accessor_WO<double, 3>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t diffy = rect.hi[1] - rect.lo[1] + 1; const coord_t diffz = rect.hi[2] - rect.lo[2] + 1; const coord_t pitch[2] = {diffy * diffz, diffz}; legate_uniform_rand_3d<<<blocks, THREADS_PER_BLOCK>>>(out, rect.lo, Point<2>(pitch), strides, epoch, volume); break; } default: assert(false); } } __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_normal_rand_1d(const AccessorWO<double, 1> out, const Point<1> origin, const Point<1> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset; const unsigned long long key = x * strides[0]; out[x] = erfinv(2.0 * RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)) - 1.0); } __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_normal_rand_2d(const AccessorWO<double, 2> out, const Point<2> origin, const Point<1> pitch, const Point<2> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + offset % pitch[0]; const unsigned long long key = x * strides[0] + y * strides[1]; out[x][y] = erfinv(2.0 * RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)) - 1.0); } __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_normal_rand_3d(const AccessorWO<double, 3> out, const Point<3> origin, const Point<2> pitch, const Point<3> strides, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1]; const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1]; const unsigned long long key = x * strides[0] + y * strides[1] + z * strides[2]; out[x][y][z] = erfinv(2.0 * RandomGenerator::rand_double(epoch, HI_BITS(key), LO_BITS(key)) - 1.0); } /*static*/ void RandNormalTask::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { LegateDeserializer derez(task->args, task->arglen); const unsigned epoch = derez.unpack_32bit_uint(); const int dim = derez.unpack_dimension(); switch (dim) { case 1: { const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez); if (rect.empty()) break; const Point<1> strides = derez.unpack_point<1>(); const AccessorWO<double, 1> out = derez.unpack_accessor_WO<double, 1>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; legate_normal_rand_1d<<<blocks, THREADS_PER_BLOCK>>>(out, rect.lo, strides, epoch, volume); break; } case 2: { const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez); if (rect.empty()) break; const Point<2> strides = derez.unpack_point<2>(); const AccessorWO<double, 2> out = derez.unpack_accessor_WO<double, 2>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t pitch = rect.hi[1] - rect.lo[1] + 1; legate_normal_rand_2d<<<blocks, THREADS_PER_BLOCK>>>(out, rect.lo, pitch, strides, epoch, volume); break; } case 3: { const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez); if (rect.empty()) break; const Point<3> strides = derez.unpack_point<3>(); const AccessorWO<double, 3> out = derez.unpack_accessor_WO<double, 3>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t diffy = rect.hi[1] - rect.lo[1] + 1; const coord_t diffz = rect.hi[2] - rect.lo[2] + 1; const coord_t pitch[2] = {diffy * diffz, diffz}; legate_normal_rand_3d<<<blocks, THREADS_PER_BLOCK>>>(out, rect.lo, Point<2>(pitch), strides, epoch, volume); break; } default: assert(false); } } template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_integer_rand_1d(const AccessorWO<T, 1> out, const Point<1> origin, const Point<1> strides, const unsigned long long low, const unsigned long long diff, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset; const unsigned long long key = x * strides[0]; out[x] = low + RandomGenerator::rand_long(epoch, HI_BITS(key), LO_BITS(key), diff); } template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_integer_rand_2d(const AccessorWO<T, 2> out, const Point<2> origin, const Point<1> pitch, const Point<2> strides, const unsigned long long low, const unsigned long long diff, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + offset % pitch[0]; const unsigned long long key = x * strides[0] + y * strides[1]; out[x][y] = low + RandomGenerator::rand_long(epoch, HI_BITS(key), LO_BITS(key), diff); } template<typename T> __global__ void __launch_bounds__(THREADS_PER_BLOCK, MIN_CTAS_PER_SM) legate_integer_rand_3d(const AccessorWO<T, 3> out, const Point<3> origin, const Point<2> pitch, const Point<3> strides, const unsigned long long low, const unsigned long long diff, const unsigned epoch, const size_t max) { const size_t offset = blockIdx.x * blockDim.x + threadIdx.x; if (offset >= max) return; const coord_t x = origin[0] + offset / pitch[0]; const coord_t y = origin[1] + (offset % pitch[0]) / pitch[1]; const coord_t z = origin[2] + (offset % pitch[0]) % pitch[1]; const unsigned long long key = x * strides[0] + y * strides[1] + z * strides[2]; out[x][y][z] = low + RandomGenerator::rand_long(epoch, HI_BITS(key), LO_BITS(key), diff); } template<typename T> /*static*/ void RandIntegerTask<T>::gpu_variant(const Task* task, const std::vector<PhysicalRegion>& regions, Context ctx, Runtime* runtime) { LegateDeserializer derez(task->args, task->arglen); const unsigned epoch = derez.unpack_32bit_uint(); const T low = derez.unpack_value<T>(); const T high = derez.unpack_value<T>(); assert(low < high); const unsigned long long diff = high - low; const int dim = derez.unpack_dimension(); switch (dim) { case 1: { const Rect<1> rect = NumPyProjectionFunctor::unpack_shape<1>(task, derez); if (rect.empty()) break; const Point<1> strides = derez.unpack_point<1>(); const AccessorWO<T, 1> out = derez.unpack_accessor_WO<T, 1>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; legate_integer_rand_1d<T><<<blocks, THREADS_PER_BLOCK>>>(out, rect.lo, strides, low, diff, epoch, volume); break; } case 2: { const Rect<2> rect = NumPyProjectionFunctor::unpack_shape<2>(task, derez); if (rect.empty()) break; const Point<2> strides = derez.unpack_point<2>(); const AccessorWO<T, 2> out = derez.unpack_accessor_WO<T, 2>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t pitch = rect.hi[1] - rect.lo[1] + 1; legate_integer_rand_2d<T><<<blocks, THREADS_PER_BLOCK>>>(out, rect.lo, pitch, strides, low, diff, epoch, volume); break; } case 3: { const Rect<3> rect = NumPyProjectionFunctor::unpack_shape<3>(task, derez); if (rect.empty()) break; const Point<3> strides = derez.unpack_point<3>(); const AccessorWO<T, 3> out = derez.unpack_accessor_WO<T, 3>(regions[0], rect); const size_t volume = rect.volume(); const size_t blocks = (volume + THREADS_PER_BLOCK - 1) / THREADS_PER_BLOCK; const coord_t diffy = rect.hi[1] - rect.lo[1] + 1; const coord_t diffz = rect.hi[2] - rect.lo[2] + 1; const coord_t pitch[2] = {diffy * diffz, diffz}; legate_integer_rand_3d<T><<<blocks, THREADS_PER_BLOCK>>>(out, rect.lo, Point<2>(pitch), strides, low, diff, epoch, volume); break; } default: assert(false); } } INSTANTIATE_INT_VARIANT(RandIntegerTask, gpu_variant) } // namespace numpy } // namespace legate
43091477433bf32bab0285fb59912ee59d4490be.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <stdlib.h> #include <algorithm> #include "radixselect.cuh" //#include "radixselectNormalInplaceWorking.cuh" #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <fstream> #include <random> //#include <random> // #define Enabletest 1 using namespace std; typedef unsigned int data_t; typedef int index_t; int compare (const void * a, const void * b) { return ( *(int*)a - *(int*)b );//in ascending order } template<typename data_t,typename index_t> index_t power(index_t x,index_t n) { index_t number=1; for (index_t i=0; i<n ;i++) { number*=x; } return number; } void getminmax(data_t* arr,index_t n,data_t& max,data_t& min) { for (index_t i=1;i<n;i++) { if (arr[i]> max) { max=arr[i]; } if (arr[i]<min) { min=arr[i]; } } return; } template<typename data_t,typename index_t> bool IsPowerof2(index_t x) { return (x != 0) && ((x & (x - 1)) == 0); } int main(int argc,char** argv) { cout<<"./exe num_element k NBitsPerDigit beta"<<endl; cout<<"Size of unsigned int"<<sizeof(unsigned int)<<endl; if (argc != 5) {cout<<"wrong input"<<endl;exit(-1);} index_t num_pow = atol(argv[1]); index_t base=2; index_t num_element = power<data_t,index_t>(base,num_pow); index_t k= atol(argv[2]); index_t NBits=atol(argv[3]);//atol(argv[3]); int sd[]={10,100000,1000000,100,100000000}; int beta=atoi(argv[4]);//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value // H_ERR(hipSetDevice(1)); data_t* vec= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element]; data_t* vec1= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element]; std::random_device rd; std::mt19937 gen(rd()); unsigned int value; int over; int minvalue=2147483643; bool test=false; index_t alpha=0.5*(num_pow-log(k)/log(2)+3); bool defaultContribution=true; if (alpha <=5) defaultContribution=false; index_t SubRangesize=pow(2,alpha); // for (int dis=3;dis<4;dis++) { // std::uniform_int_distribution <unsigned int> d(0, 2147483643); int minvalue=2147483643; // std::uniform_int_distribution <unsigned int> d_Lower(0, 100000000);//(0 to 100 million) std::uniform_int_distribution <unsigned int> d(0, 4294967295); // for (int dis=3;dis<4;dis++) // { for (index_t i=0;i<num_element;i++) { // vec[i]=rand()%2147483648;//2^31 -1 value=d(gen); if (minvalue > value) { minvalue=value; } // if (value > 2147483650) test=true; if (value > 4294967295) { cout<<"Overflow of unsigned int detected"<<endl; return -1; } vec[i]=value; vec1[i]=vec[i]; } if (minvalue < 0) { cout<<"-ve value detected:"<<minvalue<<endl; return -1; } cout<<"Minimum value:"<<minvalue<<endl; if (test) cout<<"Data generated Ok"<<endl; else cout<<"Data generated not Ok"<<endl; // sort(vec, vec + num_element); // for (int Kiteration=atol(argv[2]);Kiteration<536870912;Kiteration=Kiteration*2) { // k=Kiteration; // index_t alpha=atol(argv[4]); // int beta=3;//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value index_t num_bucket=1<<NBits; int CurrentDigit=(sizeof(data_t)*8/NBits)-1; index_t NSubranges=num_element/SubRangesize; int NthreadstoworkInreduction=32; if (SubRangesize<32) { NthreadstoworkInreduction=SubRangesize; } cout<<"Number of Subranges:"<<NSubranges<<endl; if (NSubranges<k) { cout<<"Small number of subranges!. Decrease the value of alpha!"<<endl; // exit(-1); } if ((!IsPowerof2<data_t,index_t>(NBits)) || (NBits > sizeof(data_t)*8)) { cout<<"Enter correct number of bits per digit"<<endl; return -1; } // data_t* vec= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element]; // data_t* vec1= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element]; // std::random_device rd; // std::mt19937 gen(rd()); // float value; // float minvalue=10000000; // for (index_t i=0;i<num_element;i++) // { // std::normal_distribution<float> d(10000000, sd[d]);//Mean =100 mill , sd=100 // // vec[i]=rand()%2147483648;//2^31 -1 // value=d(gen); // if (minvalue > value) // { // minvalue=value; // } // if (value > 4294967295) // { // cout<<"Overflow of unsigned int detected"<<endl; // } // vec[i]=value; // vec1[i]=vec[i]; // } // cout<<endl; // if (minvalue < 0) // { // cout<<"-ve value detected"<<endl; // } cout<<"Starting TopK with Npow:"<<num_pow<<" K:"<<k<<" alpha:"<<alpha<<"DistributionU(0,2^31-1)"<<endl; std::fstream statusLog; // timeLog.open("timeRadixSampleOCT11_N_K_alphaVaried.csv",std::fstream::out | std::fstream::app); cout<<vec[0]; cout<<endl; data_t* TopArray=new data_t[k]; data_t TopKElement=0; data_t* vec_d; H_ERR(hipMalloc((void**) &vec_d,sizeof(data_t)*num_element)); H_ERR(hipMemcpy(vec_d,vec,sizeof(data_t)*num_element,hipMemcpyHostToDevice)); // raelse dix_select_inplace<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit,0); double timeforMaxsample=0;double timeforFirstTopk=0;double timeforSecondTopk=0;double timeforNormalRadixSelect=0;double timeforConcatenation=0; data_t* Max_d; H_ERR(hipMalloc((void**) &Max_d,sizeof(data_t)*NSubranges*beta));// updated for Beta index_t* SubrangeId_d; H_ERR(hipMalloc((void**) &SubrangeId_d,sizeof(index_t)*NSubranges*beta));//updated for beta int NThreadsPerBlock=256;//only shared memory // int NThreadsPerBlock=1024;//Shared memory with subwarp int SizeOfSubWarp=8; int pow_size_Subwarp=3; // int NSharedMemoryElements=NThreadsPerBlock<<alpha;//only shared Memory int NSharedMemoryElements=NThreadsPerBlock<<5;//3 is giving best result in different values of SubWarp size //Each thread responsible for 32 elements and contribute to 8 Subranges from a group of 4 elements int SizeOfAllocation=NSharedMemoryElements+(NSharedMemoryElements >> 5); //sampleMax_multirange<data_t,index_t><<<4096,512>>>(A_d,Max_d,N,NSubranges,SubRangesize,alpha,SubrangeId_d,Nthreadstowork, NSubrangesperWarp, SubWarpSize,NThreadsPerSubRange); int NumberOfSpace_WithPadding=NSharedMemoryElements+(NSharedMemoryElements >>5); int NSubRangesPerBlock=NSharedMemoryElements/SizeOfSubWarp;//can be in CPU int NSubWarps_InBlock=NThreadsPerBlock >> pow_size_Subwarp;// Can be in CPU //Note NTotalVirtualSubWarpsInBlock=NSubrangesDealtBy1Block as 1 subwarp is responsible for 1 Subrange int NElementsPerBlock_ReadFromGlobal=NSubRangesPerBlock*SubRangesize;//1 Subwarp works for 1 subrange --> Can be in CPU int TotalBlocksrequired=num_element/NElementsPerBlock_ReadFromGlobal; if (TotalBlocksrequired<1) { cout<<"reduce blockDim or sizeofSubrange(alpha), for the kernel to work"<<endl; exit(-1); } cout<<"Size of shared memory per block:"<<SizeOfAllocation*sizeof(data_t)/1024.0 <<"KB"<<endl; // statusLog.open("Status_alpha_0_3_4_5_TotalSOK_Radix.csv",std::fstream::out | std::fstream::app); statusLog.open("StatusFile.csv",std::fstream::out | std::fstream::app); statusLog<<endl<<endl<<"Started Radix select with:2^"<<num_pow<<" elements "<<k<<" as Kth element and "<<alpha<<"as alpha!."<<"Distribution:U(0,2^31-1)"<<endl; index_t* SelectedSubrangeId_d; H_ERR(hipMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*k*beta));//updated *3 for beta index_t* CountSelectedSubrange_d; index_t* CountLonelyElements_d; H_ERR(hipMalloc((void**) &CountSelectedSubrange_d,sizeof(index_t))); H_ERR(hipMalloc((void**) &CountLonelyElements_d,sizeof(index_t))); H_ERR(hipMemset(CountSelectedSubrange_d, 0, sizeof(index_t))); H_ERR(hipMemset(CountLonelyElements_d, 0, sizeof(index_t))); data_t* ConcatenatedRange_d; index_t* write_pos_d; H_ERR(hipMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize)); H_ERR(hipMalloc((void**) &write_pos_d,sizeof(index_t))); double start=wtime(); if (alpha==0) { timeforNormalRadixSelect=wtime(); radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit); timeforNormalRadixSelect=wtime()-timeforNormalRadixSelect; } else// if(NSubranges > k) { sample_radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit,NSubranges,SubRangesize,alpha,timeforMaxsample,timeforFirstTopk,timeforSecondTopk,timeforConcatenation,Max_d,SubrangeId_d,NSharedMemoryElements,SizeOfSubWarp,pow_size_Subwarp,NSubWarps_InBlock,NSubRangesPerBlock,NElementsPerBlock_ReadFromGlobal,TotalBlocksrequired,SizeOfAllocation,NThreadsPerBlock,beta,defaultContribution,NthreadstoworkInreduction,SelectedSubrangeId_d,CountLonelyElements_d,write_pos_d,ConcatenatedRange_d,CountSelectedSubrange_d); } // else // { // timeforNormalRadixSelect=wtime(); // radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit); // timeforNormalRadixSelect=wtime()-timeforNormalRadixSelect; // } double totalTime=wtime()-start; cout<<"The kth element from top is:"<<TopKElement<<endl; #ifdef Enabletest sort(vec1, vec1 + num_element); cout<<endl; if (vec1[num_element-k]==TopKElement) { cout<<"Success!"<<endl; } else { cout<<"Not Success!"<<endl; } cout<<"Required value"<<vec1[num_element-k]<<endl; assert(vec1[num_element-k]==TopKElement); #endif statusLog<<"Successfully Finished Radix select with:2^"<<num_pow<<" elements "<<k<<" as Kth element and "<<alpha<<"as alpha!."<<endl; statusLog.close(); cout<<"Sampling Time:"<<timeforMaxsample*1000<<" ms"<<endl; cout<<"Time for First TopK:"<<timeforFirstTopk*1000<<" ms"<<endl; cout<<"Time for Concatenation:"<<timeforConcatenation*1000<<" ms"<<endl; cout<<"Time for Second TopK:"<<timeforSecondTopk*1000<<" ms"<<endl; cout<<"Time for Normal Radix Select:"<<timeforNormalRadixSelect*1000<<" ms"<<endl; cout<<"Total Time:"<<totalTime*1000<<" ms"<<endl; std::fstream timeLog; // timeLog.open("N_29UniformDistributedAutoTuneAdaptive22Feb_TitanSorted_Unsorted.csv",std::fstream::out | std::fstream::app); // timeLog.open("N_29TestingFor2^32.csv",std::fstream::out | std::fstream::app); timeLog.open("Skipped3Digits_Radix.csv",std::fstream::out | std::fstream::app); if (defaultContribution) { timeLog<<"D"<<";"; } else { timeLog<<"B"<<";"; } timeLog<<" "<<num_pow<<";"<<k<<";"<<alpha<<";"<<beta<<";"<<timeforMaxsample*1000<<";"<<timeforFirstTopk*1000<<";"<<timeforConcatenation*1000<<";"<<timeforSecondTopk*1000<<";"<<timeforNormalRadixSelect*1000<<";"<<totalTime*1000<<endl; timeLog.close(); // H_ERR(hipFree(vec_d));H_ERR(hipFree(Max_d));H_ERR(hipFree(SubrangeId_d)); } } // free(vec);free(vec1); return 0; }
43091477433bf32bab0285fb59912ee59d4490be.cu
#include <iostream> #include <stdlib.h> #include <algorithm> #include "radixselect.cuh" //#include "radixselectNormalInplaceWorking.cuh" #include <cuda.h> #include <cuda_runtime_api.h> #include <fstream> #include <random> //#include <random> // #define Enabletest 1 using namespace std; typedef unsigned int data_t; typedef int index_t; int compare (const void * a, const void * b) { return ( *(int*)a - *(int*)b );//in ascending order } template<typename data_t,typename index_t> index_t power(index_t x,index_t n) { index_t number=1; for (index_t i=0; i<n ;i++) { number*=x; } return number; } void getminmax(data_t* arr,index_t n,data_t& max,data_t& min) { for (index_t i=1;i<n;i++) { if (arr[i]> max) { max=arr[i]; } if (arr[i]<min) { min=arr[i]; } } return; } template<typename data_t,typename index_t> bool IsPowerof2(index_t x) { return (x != 0) && ((x & (x - 1)) == 0); } int main(int argc,char** argv) { cout<<"./exe num_element k NBitsPerDigit beta"<<endl; cout<<"Size of unsigned int"<<sizeof(unsigned int)<<endl; if (argc != 5) {cout<<"wrong input"<<endl;exit(-1);} index_t num_pow = atol(argv[1]); index_t base=2; index_t num_element = power<data_t,index_t>(base,num_pow); index_t k= atol(argv[2]); index_t NBits=atol(argv[3]);//atol(argv[3]); int sd[]={10,100000,1000000,100,100000000}; int beta=atoi(argv[4]);//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value // H_ERR(cudaSetDevice(1)); data_t* vec= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element]; data_t* vec1= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element]; std::random_device rd; std::mt19937 gen(rd()); unsigned int value; int over; int minvalue=2147483643; bool test=false; index_t alpha=0.5*(num_pow-log(k)/log(2)+3); bool defaultContribution=true; if (alpha <=5) defaultContribution=false; index_t SubRangesize=pow(2,alpha); // for (int dis=3;dis<4;dis++) { // std::uniform_int_distribution <unsigned int> d(0, 2147483643); int minvalue=2147483643; // std::uniform_int_distribution <unsigned int> d_Lower(0, 100000000);//(0 to 100 million) std::uniform_int_distribution <unsigned int> d(0, 4294967295); // for (int dis=3;dis<4;dis++) // { for (index_t i=0;i<num_element;i++) { // vec[i]=rand()%2147483648;//2^31 -1 value=d(gen); if (minvalue > value) { minvalue=value; } // if (value > 2147483650) test=true; if (value > 4294967295) { cout<<"Overflow of unsigned int detected"<<endl; return -1; } vec[i]=value; vec1[i]=vec[i]; } if (minvalue < 0) { cout<<"-ve value detected:"<<minvalue<<endl; return -1; } cout<<"Minimum value:"<<minvalue<<endl; if (test) cout<<"Data generated Ok"<<endl; else cout<<"Data generated not Ok"<<endl; // sort(vec, vec + num_element); // for (int Kiteration=atol(argv[2]);Kiteration<536870912;Kiteration=Kiteration*2) { // k=Kiteration; // index_t alpha=atol(argv[4]); // int beta=3;//SampleBeta function is designed for Beta=3 only. So we need to update the SampleBetafunction in radix select if we want to change Beta value index_t num_bucket=1<<NBits; int CurrentDigit=(sizeof(data_t)*8/NBits)-1; index_t NSubranges=num_element/SubRangesize; int NthreadstoworkInreduction=32; if (SubRangesize<32) { NthreadstoworkInreduction=SubRangesize; } cout<<"Number of Subranges:"<<NSubranges<<endl; if (NSubranges<k) { cout<<"Small number of subranges!. Decrease the value of alpha!"<<endl; // exit(-1); } if ((!IsPowerof2<data_t,index_t>(NBits)) || (NBits > sizeof(data_t)*8)) { cout<<"Enter correct number of bits per digit"<<endl; return -1; } // data_t* vec= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element]; // data_t* vec1= (data_t*)malloc(sizeof(data_t)*num_element);//new data_t[num_element]; // std::random_device rd; // std::mt19937 gen(rd()); // float value; // float minvalue=10000000; // for (index_t i=0;i<num_element;i++) // { // std::normal_distribution<float> d(10000000, sd[d]);//Mean =100 mill , sd=100 // // vec[i]=rand()%2147483648;//2^31 -1 // value=d(gen); // if (minvalue > value) // { // minvalue=value; // } // if (value > 4294967295) // { // cout<<"Overflow of unsigned int detected"<<endl; // } // vec[i]=value; // vec1[i]=vec[i]; // } // cout<<endl; // if (minvalue < 0) // { // cout<<"-ve value detected"<<endl; // } cout<<"Starting TopK with Npow:"<<num_pow<<" K:"<<k<<" alpha:"<<alpha<<"DistributionU(0,2^31-1)"<<endl; std::fstream statusLog; // timeLog.open("timeRadixSampleOCT11_N_K_alphaVaried.csv",std::fstream::out | std::fstream::app); cout<<vec[0]; cout<<endl; data_t* TopArray=new data_t[k]; data_t TopKElement=0; data_t* vec_d; H_ERR(cudaMalloc((void**) &vec_d,sizeof(data_t)*num_element)); H_ERR(cudaMemcpy(vec_d,vec,sizeof(data_t)*num_element,cudaMemcpyHostToDevice)); // raelse dix_select_inplace<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit,0); double timeforMaxsample=0;double timeforFirstTopk=0;double timeforSecondTopk=0;double timeforNormalRadixSelect=0;double timeforConcatenation=0; data_t* Max_d; H_ERR(cudaMalloc((void**) &Max_d,sizeof(data_t)*NSubranges*beta));// updated for Beta index_t* SubrangeId_d; H_ERR(cudaMalloc((void**) &SubrangeId_d,sizeof(index_t)*NSubranges*beta));//updated for beta int NThreadsPerBlock=256;//only shared memory // int NThreadsPerBlock=1024;//Shared memory with subwarp int SizeOfSubWarp=8; int pow_size_Subwarp=3; // int NSharedMemoryElements=NThreadsPerBlock<<alpha;//only shared Memory int NSharedMemoryElements=NThreadsPerBlock<<5;//3 is giving best result in different values of SubWarp size //Each thread responsible for 32 elements and contribute to 8 Subranges from a group of 4 elements int SizeOfAllocation=NSharedMemoryElements+(NSharedMemoryElements >> 5); //sampleMax_multirange<data_t,index_t><<<4096,512>>>(A_d,Max_d,N,NSubranges,SubRangesize,alpha,SubrangeId_d,Nthreadstowork, NSubrangesperWarp, SubWarpSize,NThreadsPerSubRange); int NumberOfSpace_WithPadding=NSharedMemoryElements+(NSharedMemoryElements >>5); int NSubRangesPerBlock=NSharedMemoryElements/SizeOfSubWarp;//can be in CPU int NSubWarps_InBlock=NThreadsPerBlock >> pow_size_Subwarp;// Can be in CPU //Note NTotalVirtualSubWarpsInBlock=NSubrangesDealtBy1Block as 1 subwarp is responsible for 1 Subrange int NElementsPerBlock_ReadFromGlobal=NSubRangesPerBlock*SubRangesize;//1 Subwarp works for 1 subrange --> Can be in CPU int TotalBlocksrequired=num_element/NElementsPerBlock_ReadFromGlobal; if (TotalBlocksrequired<1) { cout<<"reduce blockDim or sizeofSubrange(alpha), for the kernel to work"<<endl; exit(-1); } cout<<"Size of shared memory per block:"<<SizeOfAllocation*sizeof(data_t)/1024.0 <<"KB"<<endl; // statusLog.open("Status_alpha_0_3_4_5_TotalSOK_Radix.csv",std::fstream::out | std::fstream::app); statusLog.open("StatusFile.csv",std::fstream::out | std::fstream::app); statusLog<<endl<<endl<<"Started Radix select with:2^"<<num_pow<<" elements "<<k<<" as Kth element and "<<alpha<<"as alpha!."<<"Distribution:U(0,2^31-1)"<<endl; index_t* SelectedSubrangeId_d; H_ERR(cudaMalloc((void**) &SelectedSubrangeId_d,sizeof(index_t)*k*beta));//updated *3 for beta index_t* CountSelectedSubrange_d; index_t* CountLonelyElements_d; H_ERR(cudaMalloc((void**) &CountSelectedSubrange_d,sizeof(index_t))); H_ERR(cudaMalloc((void**) &CountLonelyElements_d,sizeof(index_t))); H_ERR(cudaMemset(CountSelectedSubrange_d, 0, sizeof(index_t))); H_ERR(cudaMemset(CountLonelyElements_d, 0, sizeof(index_t))); data_t* ConcatenatedRange_d; index_t* write_pos_d; H_ERR(cudaMalloc((void**) &ConcatenatedRange_d,sizeof(data_t)*k*SubRangesize)); H_ERR(cudaMalloc((void**) &write_pos_d,sizeof(index_t))); double start=wtime(); if (alpha==0) { timeforNormalRadixSelect=wtime(); radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit); timeforNormalRadixSelect=wtime()-timeforNormalRadixSelect; } else// if(NSubranges > k) { sample_radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit,NSubranges,SubRangesize,alpha,timeforMaxsample,timeforFirstTopk,timeforSecondTopk,timeforConcatenation,Max_d,SubrangeId_d,NSharedMemoryElements,SizeOfSubWarp,pow_size_Subwarp,NSubWarps_InBlock,NSubRangesPerBlock,NElementsPerBlock_ReadFromGlobal,TotalBlocksrequired,SizeOfAllocation,NThreadsPerBlock,beta,defaultContribution,NthreadstoworkInreduction,SelectedSubrangeId_d,CountLonelyElements_d,write_pos_d,ConcatenatedRange_d,CountSelectedSubrange_d); } // else // { // timeforNormalRadixSelect=wtime(); // radix_select<data_t,index_t>(vec_d,num_element,k,num_bucket,TopKElement,NBits,CurrentDigit); // timeforNormalRadixSelect=wtime()-timeforNormalRadixSelect; // } double totalTime=wtime()-start; cout<<"The kth element from top is:"<<TopKElement<<endl; #ifdef Enabletest sort(vec1, vec1 + num_element); cout<<endl; if (vec1[num_element-k]==TopKElement) { cout<<"Success!"<<endl; } else { cout<<"Not Success!"<<endl; } cout<<"Required value"<<vec1[num_element-k]<<endl; assert(vec1[num_element-k]==TopKElement); #endif statusLog<<"Successfully Finished Radix select with:2^"<<num_pow<<" elements "<<k<<" as Kth element and "<<alpha<<"as alpha!."<<endl; statusLog.close(); cout<<"Sampling Time:"<<timeforMaxsample*1000<<" ms"<<endl; cout<<"Time for First TopK:"<<timeforFirstTopk*1000<<" ms"<<endl; cout<<"Time for Concatenation:"<<timeforConcatenation*1000<<" ms"<<endl; cout<<"Time for Second TopK:"<<timeforSecondTopk*1000<<" ms"<<endl; cout<<"Time for Normal Radix Select:"<<timeforNormalRadixSelect*1000<<" ms"<<endl; cout<<"Total Time:"<<totalTime*1000<<" ms"<<endl; std::fstream timeLog; // timeLog.open("N_29UniformDistributedAutoTuneAdaptive22Feb_TitanSorted_Unsorted.csv",std::fstream::out | std::fstream::app); // timeLog.open("N_29TestingFor2^32.csv",std::fstream::out | std::fstream::app); timeLog.open("Skipped3Digits_Radix.csv",std::fstream::out | std::fstream::app); if (defaultContribution) { timeLog<<"D"<<";"; } else { timeLog<<"B"<<";"; } timeLog<<" "<<num_pow<<";"<<k<<";"<<alpha<<";"<<beta<<";"<<timeforMaxsample*1000<<";"<<timeforFirstTopk*1000<<";"<<timeforConcatenation*1000<<";"<<timeforSecondTopk*1000<<";"<<timeforNormalRadixSelect*1000<<";"<<totalTime*1000<<endl; timeLog.close(); // H_ERR(cudaFree(vec_d));H_ERR(cudaFree(Max_d));H_ERR(cudaFree(SubrangeId_d)); } } // free(vec);free(vec1); return 0; }
5b8dfb1bc28497c26a95d055c0c166e2092c8140.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, WRONG MEMORY ACCESS // srad kernel __global__ void srad( fp d_lambda, int d_Nr, int d_Nc, long d_Ne, int *d_iN, int *d_iS, int *d_jE, int *d_jW, fp *d_dN, fp *d_dS, fp *d_dE, fp *d_dW, fp d_q0sqr, fp *d_c, fp *d_I){ // indexes int bx = blockIdx.x; // get current horizontal block index int tx = threadIdx.x; // get current horizontal thread index int ei = bx*NUMBER_THREADS+tx; // more threads than actual elements !!! int row; // column, x position int col; // row, y position // variables fp d_Jc; fp d_dN_loc, d_dS_loc, d_dW_loc, d_dE_loc; fp d_c_loc; fp d_G2,d_L,d_num,d_den,d_qsqr; // figure out row/col location in new matrix row = (ei+1) % d_Nr - 1; // (0-n) row col = (ei+1) / d_Nr + 1 - 1; // (0-n) column if((ei+1) % d_Nr == 0){ row = d_Nr - 1; col = col - 1; } if(ei<d_Ne){ // make sure that only threads matching jobs run // directional derivatives, ICOV, diffusion coefficent d_Jc = d_I[ei]; // get value of the current element // directional derivates (every element of IMAGE)(try to copy to shared memory or temp files) d_dN_loc = sub(d_I[d_iN[row] + d_Nr*col],d_Jc); // north direction derivative d_dS_loc = sub(d_I[d_iS[row] + d_Nr*col],d_Jc); // south direction derivative d_dW_loc = sub(d_I[row + d_Nr*d_jW[col]],d_Jc); // west direction derivative d_dE_loc = sub(d_I[row + d_Nr*d_jE[col]],d_Jc); // east direction derivative // normalized discrete gradient mag squared (equ 52,53) d_G2 = AB_DIV(sum(sum(mul(d_dN_loc,d_dN_loc),mul(d_dS_loc,d_dS_loc)), sum(mul(d_dW_loc,d_dW_loc),mul(d_dE_loc,d_dE_loc))), mul(d_Jc,d_Jc)); // gradient (based on derivatives) // normalized discrete laplacian (equ 54) d_L = AB_DIV(sum(sum(d_dN_loc,d_dS_loc),sum(d_dW_loc,d_dE_loc)), d_Jc); // laplacian (based on derivatives) // ICOV (equ 31/35) d_num = sub(mul(0.5,d_G2),mul(X_DIV(16.0),mul(d_L,d_L))) ; // num (based on gradient and laplacian) d_den = 1 + mul(0.25,d_L); // den (based on laplacian) d_qsqr = AB_DIV(d_num,mul(d_den,d_den)); // qsqr (based on num and den) // diffusion coefficent (equ 33) (every element of IMAGE) d_den = AB_DIV(sub(d_qsqr,d_q0sqr), mul(d_q0sqr,(1+d_q0sqr))) ; // den (based on qsqr and q0sqr) d_c_loc = X_DIV(sum(1.0,d_den)) ; // diffusion coefficient (based on den) // saturate diffusion coefficent to 0-1 range if (d_c_loc < 0){ // if diffusion coefficient < 0 d_c_loc = 0; // ... set to 0 } else if (d_c_loc > 1){ // if diffusion coefficient > 1 d_c_loc = 1; // ... set to 1 } // save data to global memory d_dN[ei] = d_dN_loc; d_dS[ei] = d_dS_loc; d_dW[ei] = d_dW_loc; d_dE[ei] = d_dE_loc; d_c[ei] = d_c_loc; } }
5b8dfb1bc28497c26a95d055c0c166e2092c8140.cu
// BUG IN SRAD APPLICATIONS SEEMS TO BE SOMEWHERE IN THIS CODE, WRONG MEMORY ACCESS // srad kernel __global__ void srad( fp d_lambda, int d_Nr, int d_Nc, long d_Ne, int *d_iN, int *d_iS, int *d_jE, int *d_jW, fp *d_dN, fp *d_dS, fp *d_dE, fp *d_dW, fp d_q0sqr, fp *d_c, fp *d_I){ // indexes int bx = blockIdx.x; // get current horizontal block index int tx = threadIdx.x; // get current horizontal thread index int ei = bx*NUMBER_THREADS+tx; // more threads than actual elements !!! int row; // column, x position int col; // row, y position // variables fp d_Jc; fp d_dN_loc, d_dS_loc, d_dW_loc, d_dE_loc; fp d_c_loc; fp d_G2,d_L,d_num,d_den,d_qsqr; // figure out row/col location in new matrix row = (ei+1) % d_Nr - 1; // (0-n) row col = (ei+1) / d_Nr + 1 - 1; // (0-n) column if((ei+1) % d_Nr == 0){ row = d_Nr - 1; col = col - 1; } if(ei<d_Ne){ // make sure that only threads matching jobs run // directional derivatives, ICOV, diffusion coefficent d_Jc = d_I[ei]; // get value of the current element // directional derivates (every element of IMAGE)(try to copy to shared memory or temp files) d_dN_loc = sub(d_I[d_iN[row] + d_Nr*col],d_Jc); // north direction derivative d_dS_loc = sub(d_I[d_iS[row] + d_Nr*col],d_Jc); // south direction derivative d_dW_loc = sub(d_I[row + d_Nr*d_jW[col]],d_Jc); // west direction derivative d_dE_loc = sub(d_I[row + d_Nr*d_jE[col]],d_Jc); // east direction derivative // normalized discrete gradient mag squared (equ 52,53) d_G2 = AB_DIV(sum(sum(mul(d_dN_loc,d_dN_loc),mul(d_dS_loc,d_dS_loc)), sum(mul(d_dW_loc,d_dW_loc),mul(d_dE_loc,d_dE_loc))), mul(d_Jc,d_Jc)); // gradient (based on derivatives) // normalized discrete laplacian (equ 54) d_L = AB_DIV(sum(sum(d_dN_loc,d_dS_loc),sum(d_dW_loc,d_dE_loc)), d_Jc); // laplacian (based on derivatives) // ICOV (equ 31/35) d_num = sub(mul(0.5,d_G2),mul(X_DIV(16.0),mul(d_L,d_L))) ; // num (based on gradient and laplacian) d_den = 1 + mul(0.25,d_L); // den (based on laplacian) d_qsqr = AB_DIV(d_num,mul(d_den,d_den)); // qsqr (based on num and den) // diffusion coefficent (equ 33) (every element of IMAGE) d_den = AB_DIV(sub(d_qsqr,d_q0sqr), mul(d_q0sqr,(1+d_q0sqr))) ; // den (based on qsqr and q0sqr) d_c_loc = X_DIV(sum(1.0,d_den)) ; // diffusion coefficient (based on den) // saturate diffusion coefficent to 0-1 range if (d_c_loc < 0){ // if diffusion coefficient < 0 d_c_loc = 0; // ... set to 0 } else if (d_c_loc > 1){ // if diffusion coefficient > 1 d_c_loc = 1; // ... set to 1 } // save data to global memory d_dN[ei] = d_dN_loc; d_dS[ei] = d_dS_loc; d_dW[ei] = d_dW_loc; d_dE[ei] = d_dE_loc; d_c[ei] = d_c_loc; } }
f15de506ed94c29b467c7b3b07b7021e995e5a58.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <armadillo> #include <vector> #include <iomanip> #include<assert.h> #include <cusolverDn.h> using namespace std; using namespace arma; void EigenSolve(int m, double *d_A, double *d_W) { hipError_t cudaStat1, cudaStat3; cusolverStatus_t cusolver_status; hipsolverDnHandle_t cusolverH = NULL; cusolver_status = CUSOLVER_STATUS_SUCCESS; int lwork = 0, info_gpu = 0, *devInfo = NULL; double *d_work = NULL; cudaStat3 = hipMalloc ((void**)&devInfo, sizeof(int)); assert(hipSuccess == cudaStat3); cusolver_status = hipsolverDnCreate(&cusolverH); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); int lda=m; hipsolverEigMode_t jobz = HIPSOLVER_EIG_MODE_VECTOR; // compute eigenvalues and eigenvectors. hipblasFillMode_t uplo = HIPBLAS_FILL_MODE_LOWER; cusolver_status = hipsolverDnDsyevd_bufferSize( cusolverH, jobz, uplo, m, d_A, lda, d_W, &lwork); assert (cusolver_status == CUSOLVER_STATUS_SUCCESS); cudaStat1 = hipMalloc((void**)&d_work, sizeof(double)*lwork); assert(hipSuccess == cudaStat1); cusolver_status = hipsolverDnDsyevd( cusolverH, jobz, uplo, m, d_A, lda, d_W, d_work, lwork, devInfo); cudaStat1 = hipDeviceSynchronize(); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); assert(hipSuccess == cudaStat1); cudaStat3 = hipMemcpy(&info_gpu, devInfo, sizeof(int), hipMemcpyDeviceToHost); assert(0 == info_gpu); if (devInfo ) hipFree(devInfo); if (d_work ) hipFree(d_work ); if (cusolverH) hipsolverDnDestroy(cusolverH); } void transpose(int height, int width, double *A, double *B) { double const alpha(1.0); double const beta(0.0); hipblasHandle_t handle; hipblasCreate(&handle); hipblasDgeam(handle, HIPBLAS_OP_T, HIPBLAS_OP_N, height, width, &alpha, (const double*)A, width , &beta , (const double*)A, height, B, height); hipblasDestroy(handle); } __global__ void build_tridiag_ker(int m, double *alpha, double *beta, double *M) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; M[col*m+row] = (row==col)? (alpha[col]):( (col==row+1 || col==row-1)? (beta[min(col,row)]):0 ); } void build_tridiag(int m, double *alpha, double *beta, double *M) { dim3 dimBlock(m, m); dim3 dimGrid(1, 1); hipLaunchKernelGGL(( build_tridiag_ker), dim3(dimGrid),dim3(dimBlock), 0, 0, m, alpha, beta, M); hipDeviceSynchronize(); } double norm2(int len, double *A) { hipblasStatus_t stat; hipblasHandle_t manija; stat=hipblasCreate(&manija); double result; stat = hipblasDnrm2(manija, len, A, 1, &result); if (stat != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! norm2 error\n"); exit(1); } return result; } void Dgemm(int N, int K, int M, double *alpha, double *beta, double *A, double *B, double *C) { hipblasStatus_t stat; hipblasHandle_t manija; stat=hipblasCreate(&manija); stat = hipblasDgemm(manija,HIPBLAS_OP_N,HIPBLAS_OP_N, N,M,K, alpha, (const double*)A,N, (const double*)B,K, beta, C,N); if (stat != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! Dgemm error\n"); exit(1); } } void Dgemm(int N, int K, int M, double alpha, double beta, double *A, double *B, double *C) { double al=alpha, bet=beta; Dgemm(N, K, M, &al, &bet, A, B, C); } void Dscal(int len, double alpha, double *v) { hipblasStatus_t stat; hipblasHandle_t manija; stat=hipblasCreate(&manija); const double al=alpha; stat = hipblasDscal(manija, len, &al, v, 1); if (stat != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! Daxpy error\n"); exit(1); } } void apply(int end, int m1, int m2, int m_op, int d, double *s1, double *s_out, double *s_op, double *left, double *right) { double *aux0, *aux1; hipMalloc((void**)&aux0, m1*d*m2*m_op*sizeof(double) ); hipMalloc((void**)&aux1, m1*d*m2*m_op*sizeof(double) ); int m1L, m1R, m2L, m2R, wL, wR; if(end==-1) {m1L=1; wL=1; m2L=1;} else {m1L=m1; wL=m_op; m2L=m2;} if(end== 1) {m1R=1; wR=1; m2R=1;} else {m1R=m1; wR=m_op; m2R=m2;} Dgemm(m2L* wL, m1L, d*m1R, 1,0, left, s1, aux1); transpose(m1R, m2L* wL*d, aux1, aux0); Dgemm(m1R*m2L, wL*d, d*wR, 1,0, aux0, s_op, aux1); transpose(m2L*d*wR, m1R, aux1, aux0); transpose(wR*m1R, m2R, right, aux1); Dgemm( m2L*d, wR*m1R, m2R, 1,0, aux0, aux1, s_out); hipFree(aux0); hipFree(aux1); } void Daxpy(int len, double alpha, double *A, double *B) { hipblasStatus_t stat; hipblasHandle_t manija; stat=hipblasCreate(&manija); double al = alpha; stat = hipblasDaxpy(manija, len, &al, A, 1, B, 1); if (stat != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! Daxpy error\n"); exit(1); } } double matnorm(mat &m) { int nrows=m.n_rows, ncols=m.n_cols; m.reshape(1,nrows*ncols); mat aux(1,1); aux=m*m.t(); m.reshape(nrows,ncols); return(sqrt(aux(0,0))); } double cubenorm(cube &m) { int nrows=m.n_rows, ncols=m.n_cols, nslices=m.n_slices; m.reshape(1,1,nrows*ncols*nslices); mat aux(1,1); aux=m.tube(0,0); aux=aux*aux.t(); m.reshape(nrows,ncols,nslices); return(aux(0,0)); } void decomp(mat &Q, mat &R, mat &C, int mtrunc=0) { if(mtrunc>C.n_cols){cout<<"cannot truncate to bigger dimension -> QR decomposition assumed instead"<<endl; mtrunc=0;} mat left, right; int dim; if(mtrunc==0) { qr(left, right, C); dim=C.n_cols; } else { dim=mtrunc; vec ss; svd(left, ss, right, C); vec vaux=zeros<vec>(dim); int nrows=ss.n_rows; int copy=min(dim,nrows); vaux.head(copy)=ss.head(copy); mat aux=diagmat(vaux); right=right.head_cols(dim); right=aux*right.t(); } Q=zeros<mat>(C.n_rows,dim); R=zeros<mat>(dim,C.n_cols); int cols2copy=min(Q.n_cols,left.n_cols); int rows2copy=min(R.n_rows,right.n_rows); Q.head_cols(cols2copy)=left.head_cols(cols2copy); R.head_rows(rows2copy)=right.head_rows(rows2copy); } struct MPS { int n, m, d, link; mat s0, sf; vector<cube> C; bool canonizedQ; MPS(int n, int m, int d):n(n), m(m), d(d) { canonizedQ=false; s0=mat(d,m,fill::randn); sf=mat(m,d,fill::randn); C.resize(n-2); for (int i = 0; i < n-2; i++) { C[i]=cube(m,d,m,fill::randn); } } void lgauge(int sitio, int mtrunc=0) { if(sitio==0)return; if(mtrunc>=m) mtrunc=0; int o=(mtrunc!=0) ? mtrunc : m; mat Q0(d,o), R(o,m), Q(o*d,o), aux(o,d*m); decomp(Q0,R,s0,mtrunc); s0=Q0; for (int i = 0; i < n-2; i++) { C[i].reshape(m,d*m,1); aux=R*(C[i].slice(0)); if(i+1==sitio){C[i].slice(0)=aux; C[i].reshape(m,d,m); return;} aux.reshape(o*d,m); decomp(Q,R,aux,mtrunc); C[i]=cube(o*d,o,1); C[i].slice(0)=Q; C[i].reshape(o,d,o); } sf=R*sf; } void truncate(int mtrunc) { if(mtrunc>=m) cout<<"cannot truncate to bigger dimension"<<endl; if(!canonizedQ || link!=0) cout<<"MPS must be 0-canonized for being truncated"<<endl; lgauge(n-1,mtrunc); m=mtrunc; canonize(0); } void rgauge(int sitio) { mat Q0(d,m), R(m,m), aux(m,d*m); if(sitio==n-1)return; mat tr=sf.t(); decomp(Q0,R,tr); sf=Q0.t(); mat Q(m*d,m); for (int i = n-3; i >= 0; i--) { C[i].reshape(m*d,m,1); aux=(C[i].slice(0))*R.t(); if(i+1==sitio){C[i].slice(0)=aux; C[i].reshape(m,d,m); return;} aux.reshape(m,d*m); aux=aux.t(); decomp(Q,R,aux); C[i].reshape(m,d*m,1); C[i].slice(0)=Q.t(); C[i].reshape(m,d,m); } s0=s0*R.t(); } void canonize(int sitio) { lgauge(sitio); rgauge(sitio); canonizedQ=true; link=sitio; } void sweep(bool forwardQ) { if(!canonizedQ){cout<<"cannot sweep a non-canonized MPS"<<endl; return;} if((forwardQ && link==n-1) || (!forwardQ && link==0) ){cout<<"cannot sweep out bounds"<<endl; return;} int i=link-1; if(forwardQ){link++;}else{link--;} if(i==-1){lgauge(1); return;} if(i==n-2){rgauge(n-2); return;} if(forwardQ) { mat Q(m*d,m), R(m,m), aux(m*d,m); C[i].reshape(m*d,m,1); aux=C[i].slice(0); decomp(Q,R,aux); C[i].slice(0)=Q; C[i].reshape(m,d,m); if(i==n-3){sf=R*sf; return;} C[i+1].reshape(m,d*m,1); aux=R*(C[i+1].slice(0)); C[i+1].slice(0)=aux; C[i+1].reshape(m,d,m); } else { mat Q(m*d,m), R(m,m), aux(m,d*m); C[i].reshape(m,d*m,1); aux=C[i].slice(0); aux=aux.t(); decomp(Q,R,aux); C[i].slice(0)=Q.t(); C[i].reshape(m,d,m); if(i==0){s0=s0*R.t(); return;} C[i-1].reshape(m*d,m,1); aux=(C[i-1].slice(0))*R.t(); C[i-1].slice(0)=aux; C[i-1].reshape(m,d,m); } } double project(vector<int> state) { if(state.size()!=n)cout<<"cannot project into a different-sized state"<<endl; int index=state[0]; mat v(1,m); v=(s0.row(index)); mat aux(m,m); for (int i = 0; i < n-2; ++i) { index=state[i+1]; for (int j = 0; j < m; j++) { for (int k = 0; k < m; k++) { aux(j,k)=C[i](j,index,k); } } v=v*aux; } index=state[n-1]; v=v*(sf.col(index)); return(v(0,0)); } double MPSnorm() { if(!canonizedQ){cout<<"cannot calculate the norm of a non-canonized MPS"<<endl; return(0);} if(link==0)return(matnorm(s0)); if(link==n-1)return(matnorm(sf)); int index=link-1; return(cubenorm(C[index]) ); } void normalize() { if(!canonizedQ){cout<<"cannot normalize a non-canonized MPS"<<endl; return;} double fact=MPSnorm(); if(link==0) {s0/=fact; return;} if(link==n-1) {sf/=fact; return;} C[link-1]/=fact; } }; double firstoverlap(MPS &v1, MPS &v2) { if(!v1.canonizedQ || !v2.canonizedQ){cout<<"cannot overlap a non-canonized MPS"<<endl; return 0;} if(v1.link!=v2.link || v1.n!=v2.n || v1.m!=v2.m || v1.d!=v2.d){cout<<"cannot calculate overlap between different-shaped MPSs"<<endl; return 0;} int link=v1.link, n=v1.n, m=v1.m, d=v1.d; mat Q(m,m), R(m,m), aux(m,m); double sum; Q=(v1.s0).t()*(v2.s0); for(int site=0; site<link-1; ++site) { for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) { sum=0; for(int k=0; k<m; ++k) for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) { sum+=Q(k,l)*v1.C[site](k,o,i)*v2.C[site](l,o,j); } aux(i,j)=sum; } Q=aux; } R=(v1.sf)*(v2.sf).t(); for(int site=n-3; site>link-1; --site) { for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) { sum=0; for(int k=0; k<m; ++k) for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) { sum+=R(k,l)*v1.C[site](i,o,k)*v2.C[site](j,o,l); } aux(i,j)=sum; } R=aux; } sum=0; for(int k=0; k<m; ++k) for(int l=0; l<m; ++l) for(int kk=0; kk<m; ++kk) for(int ll=0; ll<m; ++ll) for(int o=0; o<d; ++o) { sum+= v1.C[link-1](k,o,kk)*v2.C[link-1](l,o,ll)*Q(k,l)*R(kk,ll); } return(sum); } double firstsandwich(MPS &v1, MPS &v2, MPS &op) { if( !v1.canonizedQ || !v2.canonizedQ || !op.canonizedQ ){cout<<"cannot sandwich non-canonized MPS or MPO"<<endl; return 0;} if( (v1.link!=op.link || v2.link!=op.link) || (v1.n!=op.n || v2.n!=op.n) || (v1.m!=op.m || v2.m!=op.m) || (v1.d!=sqrt(op.d) || v2.d!=sqrt(op.d)) ){cout<<"cannot sandwich different-shaped MPS or MPO"<<endl; return 0;} int link=v1.link, n=v1.n, m=v1.m, d=v1.d; cube Q(m,m,m), R(m,m,m), aux(m,m,m); double sum; for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) for(int k=0; k<m; ++k) { sum=0; for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum+=v1.s0(o,i)*op.s0(d*o+p,j)*v2.s0(p,k); } aux(i,j,k)=sum; } Q=aux; for(int site=0; site<link-1; ++site) { for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) for(int k=0; k<m; ++k) { sum=0; for(int ii=0; ii<m; ++ii) for(int jj=0; jj<m; ++jj) for(int kk=0; kk<m; ++kk) for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum+=Q(ii,jj,kk)*v1.C[site](ii,o,i)*op.C[site](jj,d*o+p,j)*v2.C[site](kk,p,k); } aux(i,j,k)=sum; } Q=aux; } for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) for(int k=0; k<m; ++k) { sum=0; for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum+=v1.sf(i,o)*op.sf(j,d*o+p)*v2.sf(k,p); } aux(i,j,k)=sum; } R=aux; for(int site=n-3; site>link-1; --site) { for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) for(int k=0; k<m; ++k) { sum=0; for(int ii=0; ii<m; ++ii) for(int jj=0; jj<m; ++jj) for(int kk=0; kk<m; ++kk) for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum+=R(ii,jj,kk)*v1.C[site](i,o,ii)*op.C[site](j,d*o+p,jj)*v2.C[site](k,p,kk); } aux(i,j,k)=sum; } R=aux; } sum=0; for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) for(int k=0; k<m; ++k) for(int ii=0; ii<m; ++ii) for(int jj=0; jj<m; ++jj) for(int kk=0; kk<m; ++kk) for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum+= Q(i,j,k)*v1.C[link-1](i,o,ii)*op.C[link-1](j,d*o+p,jj)*v2.C[link-1](k,p,kk)*R(ii,jj,kk); } return(sum); } MPS I_op(int n, int d) { MPS O(n,1,d*d); O.canonizedQ=true; mat aux(d,d,fill::eye); aux.reshape(d*d,1); O.s0=aux; aux.reshape(1,d*d); O.sf=aux; O.C.resize(n-2); cube cubo(1,d*d,1); cubo.slice(0)=aux; for (int i = 0; i < n-2; i++) { O.C[i]=cubo; } return(O); } void Apply_Heff(const cube &L, const cube &R, const cube &W, const cube &s, cube &z, int m, int m_W, int d, int mm=0) { if(mm==0) mm=m; cube E(m*mm,m_W,d), A(m*m,m_W,d); double sum; /* for(int ii=0; ii<m; ii++) for(int k=0; k<mm; k++) for(int jj=0; jj<m_W; jj++) for(int p=0; p<d; p++) { sum=0; for(int kk=0; kk<mm; kk++) sum+=R(ii,jj,kk)*s(k,p,kk); A(mm*ii+k,jj,p)=sum; } */ const mat Rm=mat((double *)R.memptr(),m*m_W,m,false); const mat Sm=mat((double *)s.memptr(),m*d,m,false); mat Am=Rm*Sm.t(); for(int ii=0; ii<m; ii++) for(int k=0; k<mm; k++) for(int jj=0; jj<m_W; jj++) for(int p=0; p<d; p++) { A(mm*ii+k,jj,p)=Am(ii+m*jj,k+p*m); } for(int ii=0; ii<m; ii++) for(int k=0; k<mm; k++) for(int j=0; j<m_W; j++) for(int o=0; o<d; o++) { sum=0; for(int jj=0; jj<m_W; jj++) for(int p=0; p<d; p++) sum+=A(mm*ii+k,jj,p)*W(j,d*o+p,jj); E(mm*ii+k,j,o)=sum; } for(int ii=0; ii<m; ii++) for(int i=0; i<m; i++)for(int o=0; o<d; o++) { sum=0; for(int k=0; k<m; k++) for(int j=0; j<m_W; j++) sum+=E(mm*ii+k,j,o)*L(i,j,k); z(i,o,ii)=sum; } } void Apply_Heff_ext(cube &supercube, mat &W, mat &s, mat &z, int m, int m_W, int d, bool leftQ) { cube supercube_ext(m,d*d,m); double sum; for (int i=0; i<m; ++i) for (int k=0; k<m; ++k) for (int o=0; o<d; ++o) for (int p=0; p<d; ++p) { sum=0; for (int j=0; j<m_W; ++j) sum+=(leftQ ? W(d*o+p,j) : W(j,d*o+p) )*supercube(i,j,k); supercube_ext(i,d*o+p,k)=sum; } for (int i=0; i<m; ++i) for (int o=0; o<d; ++o) { sum=0; for (int k=0; k<m; ++k) for (int p=0; p<d; ++p) sum+= supercube_ext(i,d*o+p,k)*( leftQ ? s(p,k) : s(k,p) ); if(leftQ) z(o,i)=sum; else z(i,o)=sum; } } void contract_0(cube &supercube, mat &s1, mat &s2, mat &W, int m, int m_W, int d, bool leftQ, int mm=0) { if(mm==0) mm=m; double sum; for(int i=0; i<m; ++i) for(int j=0; j<m_W; ++j) for(int k=0; k<mm; ++k) { sum=0; for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) sum+= (leftQ ? s1(o,i) : s1(i,o)) *(leftQ ? W(d*o+p,j) : W(j,d*o+p))*(leftQ ? s2(p,k) : s2(k,p)); supercube(i,j,k)=sum; } } void contract_step(cube &supercube, cube &newsupercube, cube &s1, cube &s2, cube &W, int m, int m_W, int d, bool leftQ, int mm=0) { if(mm==0) mm=m; cube auxL(m*m_W,d,mm), auxC(m*mm,d*d,m_W); double sum; for(int i=0; i<m; ++i) for(int jj=0; jj<m_W; ++jj) for(int kk=0; kk<mm; ++kk) for(int o=0; o<d; ++o) { sum=0; for(int ii=0; ii<m; ++ii) sum+=supercube(ii,jj,kk)*(leftQ ? s1(ii,o,i) : s1(i,o,ii) ); auxL(m_W*i+jj,o,kk)=sum; } for(int i=0; i<m; ++i) for(int jj=0; jj<m_W; ++jj) for(int k=0; k<mm; ++k) for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum=0; for(int kk=0; kk<mm; ++kk) sum+= auxL(m_W*i+jj,o,kk)*(leftQ ? s2(kk,p,k) : s2(k,p,kk) ); auxC(mm*i+k,d*o+p,jj)=sum; } for(int i=0; i<m; ++i) for(int j=0; j<m_W; ++j) for(int k=0; k<mm; ++k) { sum=0; for(int jj=0; jj<m_W; ++jj) for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) sum+= auxC(mm*i+k,d*o+p,jj)*(leftQ ? W(jj,d*o+p,j) : W(j,d*o+p,jj) ) ; newsupercube(i,j,k)=sum; } } double contract_site(cube &s1, cube &s2, int m, int d) { double sum=0; for(int i=0; i<m; ++i) for(int ii=0; ii<m; ++ii) for(int o=0; o<d; ++o) sum+= s1(i,o,ii)*s2(i,o,ii); return sum; } double sandwich(MPS &v1, MPS &v2, MPS &op) //doesnt work with end-canonization ! { if( (v1.n!=op.n || v2.n!=op.n) || (v1.d!=sqrt(op.d) || v2.d!=sqrt(op.d)) ){cout<<"cannot sandwich different-shaped MPS or MPO"<<endl; return 0;} int link=v1.link, n=v1.n, d=v1.d; cube Q(v1.m,op.m,v2.m), R(v1.m,op.m,v2.m), z(v1.m,d,v1.m); contract_0(Q, v1.s0, v2.s0, op.s0, v1.m, op.m, d, true, v2.m); contract_0(R, v1.sf, v2.sf, op.sf, v1.m, op.m, d, false, v2.m); for(int site=0; site<link-1; ++site) contract_step(Q, Q, v1.C[site], v2.C[site], op.C[site], v1.m, op.m, d, true, v2.m); for(int site=n-3; site>link-1; --site) contract_step(R, R, v1.C[site], v2.C[site], op.C[site], v1.m, op.m, d, false, v2.m); Apply_Heff(Q, R, op.C[link-1], v2.C[link-1], z, v1.m, op.m, d) ; return contract_site(v1.C[link-1], z, v1.m, d); } double overlap(MPS &v1, MPS &v2) { if(!v1.canonizedQ || !v2.canonizedQ){cout<<"cannot overlap a non-canonized MPS"<<endl; return 0;} if(v1.link!=v2.link || v1.n!=v2.n || v1.m!=v2.m || v1.d!=v2.d){cout<<"cannot calculate overlap between different-shaped MPSs"<<endl; return 0;} int link=v1.link, n=v1.n, m=v1.m, d=v1.d; mat Q(m,m), R(m,m); cube aux(m,d,m), aux2(m,d,m); double sum; Q=(v1.s0).t()*(v2.s0); for(int site=0; site<link-1; ++site) { for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) for(int i=0; i<m; ++i) { sum=0; for(int k=0; k<m; ++k) sum+=Q(k,l)*v1.C[site](k,o,i); aux(l,o,i)=sum; } for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) { sum=0; for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) sum+=aux(l,o,i)*v2.C[site](l,o,j); Q(i,j)=sum; } } R=(v1.sf)*(v2.sf).t(); for(int site=n-3; site>link-1; --site) { for(int i=0; i<m; ++i) for(int o=0; o<d; ++o) for(int l=0; l<m; ++l) { sum=0; for(int k=0; k<m; ++k) sum+=R(k,l)*v1.C[site](i,o,k); aux(i,o,l)=sum; } for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) { sum=0; for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) sum+=aux(i,o,l)*v2.C[site](j,o,l); R(i,j)=sum; } } for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) for(int kk=0; kk<m; ++kk) { sum=0; for(int k=0; k<m; ++k) sum+= v1.C[link-1](k,o,kk)*Q(k,l); aux(l,o,kk)=sum; } for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) for(int ll=0; ll<m; ++ll) { sum=0; for(int kk=0; kk<m; ++kk) sum+= aux(l,o,kk)*R(kk,ll); aux2(l,o,ll)=sum; } sum=0; for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) for(int ll=0; ll<m; ++ll) sum+=aux2(l,o,ll)*v2.C[link-1](l,o,ll); return(sum); } double MatLanczos(mat M, double epsilum, int d_red) { int dim=M.n_cols; mat u(dim,1,fill::randu), evec(d_red,d_red), v(dim, d_red), v_prev, w(dim,1); vec eval(d_red), a(d_red), b(d_red-1); double E; bool first=true; while(true) { v.col(0)=u/matnorm(u); u=M*v.col(0); a(0)=dot(u,v.col(0)); w=u-a[0]*v.col(0); for (int i = 1; i < d_red; ++i) { b(i-1)=matnorm(w); v.col(i)=w/b(i-1); u=M*v.col(i); a(i)=dot(u,v.col(i)); w=u-a(i)*v.col(i)-b(i-1)*v.col(i-1); } mat Mred=diagmat(a)+diagmat(b,1)+diagmat(b,-1); eig_sym(eval,evec,Mred); //cout<<eval(0)<<endl; if(!first && abs(E-eval(0))<epsilum) return(eval(0)); E=eval(0); u=v*evec.col(0); first=false; } } void d_Lanczos(int end, int m, int m_op, int d, double *s_eff, double *h_eff, double *left, double *right) { double epsilum=0.001; int K_dim=3; int max_iter=100; int dim=(end==0)?(m*d*m):(m*d); double *T, *v, *eval, *a, *b; hipMalloc((void **)&T, K_dim*K_dim*sizeof(double) ); hipMalloc((void **)&eval, K_dim*sizeof(double) ); hipMalloc((void **)& v, (K_dim+1)*dim*sizeof(double) ); hipMalloc((void **)& a, K_dim *sizeof(double) ); hipMalloc((void **)& b, K_dim *sizeof(double) ); double E; bool first=true; for(int iter=0; iter<max_iter; iter++) { Dscal(dim, 1/norm2(dim, s_eff), s_eff ); hipMemcpy(v+0, s_eff, dim*sizeof(double), hipMemcpyDeviceToDevice); for (int i = 0; i < K_dim; ++i) { apply(end, m, m, m_op, d, v+i*dim, v+(i+1)*dim, h_eff, left, right); Dgemm(1,dim,1, 1,0, v+(i+1)*dim, v+i*dim, a+i); if(i==K_dim-1) break; double h_aux; hipMemcpy(&h_aux, a+i, sizeof(double), hipMemcpyDeviceToHost); Daxpy(dim, -h_aux, v+i*dim, v+(i+1)*dim ); if(i>0) { hipMemcpy(&h_aux, b+(i-1), sizeof(double), hipMemcpyDeviceToHost); Daxpy(dim, -h_aux, v+(i-1)*dim, v+(i+1)*dim ); } double beta = norm2(dim, v+(i+1)*dim); hipMemcpy(b+i, &beta, sizeof(double), hipMemcpyHostToDevice); Dscal(dim, 1/beta, v+(i+1)*dim ); } build_tridiag(K_dim, a, b, T); EigenSolve(K_dim, T, eval); Dgemm(dim, K_dim, 1, 1,0, v, T+0, s_eff); double min_val; hipMemcpy(&min_val, eval+0, sizeof(double), hipMemcpyDeviceToHost); if(!first && abs(E- min_val) < epsilum ) break; E=min_val; first=false; } hipFree(T); hipFree(v); hipFree(eval); hipFree(a); hipFree(b); } void vec_to_array(cube &cubo, double *d_ptr) { int dim=cubo.n_rows; double *h_aux; h_aux = (double *) malloc(dim*sizeof(double) ); for(int i=0; i<dim; i++) { h_aux[i] = cubo(i,0,0); } hipMemcpy(d_ptr, h_aux, dim*sizeof(double), hipMemcpyHostToDevice); } void array_to_vec(cube &cubo, double *d_ptr) { int dim=cubo.n_rows; double *h_aux; h_aux = (double *) malloc(dim*sizeof(double) ); hipMemcpy( h_aux, d_ptr, dim*sizeof(double), hipMemcpyDeviceToHost); for(int i=0; i<dim; i++) { cubo(i,0,0) = h_aux[i] ; } } void Lanczos_gpu(cube &L, cube &R, cube &H, cube &S, bool ext, bool leftQ=false) { //cout<<"GPU_Lanczos"<<"\t"; int end = ext? (leftQ? -1:1) :0; int m =(ext && leftQ)? S.n_cols : S.n_rows, m_op=(ext && leftQ)? H.n_cols : H.n_rows, d =(ext && leftQ)? S.n_rows : S.n_cols; int mL, mR, wL, wR; if(end==-1) {mL=1; wL=1;} else {mL=m; wL=m_op;} if(end== 1) {mR=1; wR=1;} else {mR=m; wR=m_op;} int Sdim = mL*d*mR, Hdim = wL*d*d*wR, Ldim = mL*wL*mL, Rdim = mR*wR*mR; cube auxL, auxR, auxH=H, auxS=S; auxH.reshape(Hdim, 1, 1); auxS.reshape(Sdim, 1, 1); double *d_L, *d_R, *d_H, *d_S; hipMalloc((void **)&d_L, Ldim*sizeof(double) ); hipMalloc((void **)&d_R, Rdim*sizeof(double) ); hipMalloc((void **)&d_H, Hdim*sizeof(double) ); hipMalloc((void **)&d_S, Sdim*sizeof(double) ); double val=1; if(ext && leftQ) hipMemcpy(d_L, &val, sizeof(double), hipMemcpyHostToDevice); else {auxL=L; auxL.reshape(Ldim, 1, 1); vec_to_array(auxL, d_L);} if(ext && !leftQ) hipMemcpy(d_R, &val, sizeof(double), hipMemcpyHostToDevice); else {auxR=R; auxR.reshape(Rdim, 1, 1); vec_to_array(auxR, d_R);} vec_to_array(auxH, d_H); vec_to_array(auxS, d_S); d_Lanczos(end, m, m_op, d, d_S, d_H, d_L, d_R); array_to_vec(auxS, d_S); auxS.reshape(mL, d, mR); S=auxS; } void Lanczos_cpu(cube &L, cube &R, cube &H, cube &S, bool ext, bool leftQ=false) { //cout<<"CPU_Lanczos"<<"\t"; double epsilum=0.001; int d_red=3; int max_iter=1000; int m= (ext && leftQ) ? S.n_cols : S.n_rows, m_H=(ext && leftQ) ? H.n_cols : H.n_rows, d= S.size()/(ext ? m : m*m), dim=ext ? m*d : m*d*m; mat evec(d_red,d_red), v(dim, d_red), u(dim,1), w(dim,1); vec eval(d_red), a(d_red), b(d_red-1); mat auxm(m,d ); if(leftQ){auxm.reshape(d,m);} cube auxc(m,d,m); double E; bool first=true; if(ext) auxm=S.slice(0); else auxc=S; if(ext) auxm.reshape(dim,1); else auxc.reshape(dim,1,1); if(ext) u=auxm; else u=auxc.slice(0); for(int iter=0; iter<max_iter; iter++) { u/=matnorm(u); v.col(0)=u; if(ext) auxm=u; else auxc.slice(0)=u; if(ext) {if(leftQ) auxm.reshape(d,m); else auxm.reshape(m,d);} else auxc.reshape(m,d,m); if(ext) Apply_Heff_ext(L, H.slice(0), auxm, auxm, m, m_H, d, leftQ); else Apply_Heff(L, R, H, auxc, auxc, m, m_H, d); if(ext) auxm.reshape(dim,1); else auxc.reshape(dim,1,1); if(ext) u=auxm; else u=auxc.slice(0); a(0)=dot(u,v.col(0)); w=u-a[0]*v.col(0); for (int i = 1; i < d_red; ++i) { b(i-1)=matnorm(w); v.col(i)=w/b(i-1); if(ext) auxm=v.col(i); else auxc.slice(0)=v.col(i); if(ext) {if(leftQ) auxm.reshape(d,m); else auxm.reshape(m,d);} else auxc.reshape(m,d,m); if(ext) Apply_Heff_ext(L, H.slice(0), auxm, auxm, m, m_H, d, leftQ); else Apply_Heff(L, R, H, auxc, auxc, m, m_H, d); if(ext) auxm.reshape(dim,1); else auxc.reshape(dim,1,1); if(ext) u=auxm; else u=auxc.slice(0); a(i)=dot(u,v.col(i)); w=u-a(i)*v.col(i)-b(i-1)*v.col(i-1); } mat Mred=diagmat(a)+diagmat(b,1)+diagmat(b,-1); eig_sym(eval,evec,Mred); u=v*evec.col(0); //cout<<eval(0)<<endl; if(!first && abs(E-eval(0))<epsilum) break; E=eval(0); first=false; } if(ext) auxm=u/matnorm(u); else auxc.slice(0)=u/matnorm(u); if(ext) {if(leftQ) auxm.reshape(d,m); else auxm.reshape(m,d);} else auxc.reshape(m,d,m); if(ext) S.slice(0)=auxm; else S=auxc; } #define Lanczos Lanczos_cpu double tb_exact(int L){ double e=0, k, ek; for(int a=0;a<L;a++){ k=M_PI*(a+1)/(L+1); ek=-2*cos(k); if(ek<0){ e+=ek; } } return(e); } double dot(mat u, mat v) { mat aux=u*v.t(); return(aux(0,0)); } MPS MPS_add(MPS &v1, MPS &v2) { assert(v1.n==v2.n && v1.d==v2.d); int n=v1.n, d=v1.d, m1=v1.m, m2=v2.m; MPS add(n,m1+m2,d); for(int o=0; o<d; o++) { for(int i=0; i<m1; i++) { add.s0(o,i) =v1.s0(o,i); add.sf(i,o) =v1.sf(i,o); } for(int i=0; i<m2; i++) { add.s0(o,i+m1)=v2.s0(o,i); add.sf(i+m1,o)=v2.sf(i,o); } } for(int site=0; site<n-2; site++) { add.C[site]=zeros<cube>(m1+m2,d,m1+m2); for(int o=0; o<d; o++) { for(int i=0; i<m1; i++) for(int j=0; j<m1; j++) add.C[site](i,o,j)=v1.C[site](i,o,j); for(int i=0; i<m2; i++) for(int j=0; j<m2; j++) add.C[site](i+m1,o,j+m1)= v2.C[site](i,o,j); } } return add; } MPS MPO_multiply(MPS &H1, MPS &H2) { if(H1.n!=H2.n || H1.d!=H2.d) cout<<"cannot multiply different-shaped MPOs"<<endl; int n=H1.n, d=sqrt(H1.d), m1=H1.m, m2=H2.m; MPS mult(n,m1*m2,d*d); double sum, sum2; for(int o=0; o<d; o++) for(int p=0; p<d; p++) for(int ii=0; ii<m1; ii++) for(int jj=0; jj<m2; jj++) { sum=0, sum2=0; for(int q=0; q<d; q++) {sum+=H1.s0(d*o+q,ii)*H2.s0(d*q+p,jj); sum2+=H1.sf(ii,d*o+q)*H2.sf(jj,d*q+p);} mult.s0(d*o+p,m2*ii+jj)=sum; mult.sf(m2*ii+jj,d*o+p)=sum2; } for(int site=0; site<n-2; site++) { for(int o=0; o<d; o++) for(int p=0; p<d; p++) for(int i=0; i<m1; i++) for(int j=0; j<m2; j++) for(int ii=0; ii<m1; ii++) for(int jj=0; jj<m2; jj++) { sum=0; for(int q=0; q<d; q++) sum+=H1.C[site](i,d*o+q,ii)*H2.C[site](j,d*q+p,jj); mult.C[site](m2*i+j,d*o+p,m2*ii+jj)=sum; } } return mult; } void MPS_scale(MPS &S, double k) { k=pow(k,(double)1/S.n); S.s0*=k; S.sf*=k; for(int i=0;i<S.n-2;i++) S.C[i]*=k; } MPS Ci(int n, int Ni, int plusQ) { int o=(plusQ ? 2 : 1); MPS O=I_op(n,2); if(Ni==0) {O.s0=zeros<mat>(4,1); O.s0(o, 0 )=1; return O;} if(Ni==n-1){O.sf=zeros<mat>(1,4); O.sf(0, o )=1; return O;} O.C[Ni-1]=zeros<cube>(1,4,1); O.C[Ni-1](0,o,0)=1; return O; } MPS Sgi(int n, int Ni) { MPS O=I_op(n,2); if(Ni==0) {O.s0(0, 0 )=-1; return O;} if(Ni==n-1){O.sf(0, 0 )=-1; return O;} O.C[Ni-1](0,0,0)=-1; return O; } MPS Ni(int n, int Ni) { MPS O=I_op(n,2); if(Ni==0) {O.s0(3,0)=0; return O;} if(Ni==n-1){O.sf(0,3)=0; return O;} O.C[Ni-1](0,3,0)=0; return O; } MPS hopp(int n, int s1, int s2) { MPS h1=I_op(n,2), h2=h1; if(s1==0) { h1.s0=zeros<mat>(4,1); h2.s0=zeros<mat>(4,1); h1.s0(2, 0 )=1; h2.s0(1, 0 )=1; } else { h1.C[s1-1]=zeros<cube>(1,4,1); h2.C[s1-1]=zeros<cube>(1,4,1); h1.C[s1-1](0,2,0)=1; h2.C[s1-1](0,1,0)=1; } if(s2==n-1) { h1.sf=zeros<mat>(1,4); h2.sf=zeros<mat>(1,4); h1.sf(0, 1 )=1; h2.sf(0, 2 )=1; } else { h1.C[s2-1]=zeros<cube>(1,4,1); h2.C[s2-1]=zeros<cube>(1,4,1); h1.C[s2-1](0,1,0)=1; h2.C[s2-1](0,2,0)=1; } for (int i = s1+1; i < s2; ++i) {h1.C[i-1](0,0,0)=-1; h2.C[i-1](0,0,0)=-1;} MPS H=MPS_add(h1,h2); return(H); } MPS TB(int n) { MPS Hi=hopp(n,0,1), H=Hi; for(int i=1; i<n-1; i++) {Hi=hopp(n,i,i+1) ;H=MPS_add(H,Hi);} return H; } MPS DC0(int L, double t1, double t2, double U, double V) { int n=4*L; MPS N0=Ni(n,0), N1=Ni(n,1), N2=Ni(n,2), N3=Ni(n,3); MPS H01=MPO_multiply(N0,N1); MPS H23=MPO_multiply(N2,N3); MPS Hi=MPS_add(H01,H23); MPS_scale(Hi,U); MPS H=Hi; H01=MPS_add(N0,N1); H23=MPS_add(N2,N3); Hi=MPO_multiply(H01,H23); MPS_scale(Hi,V); H=MPS_add(H,Hi); for(int i=1;i<L;i++) { for(int j=0;j<4;j++) {Hi=hopp(n,4*(i-1)+j,4*i+j); H=MPS_add(H,Hi);} N0=Ni(n,4*i); N1=Ni(n,4*i+1); N2=Ni(n,4*i+2); N3=Ni(n,4*i+3); H01=MPO_multiply(N0,N1); H23=MPO_multiply(N2,N3); Hi=MPS_add(H01,H23); MPS_scale(Hi,U); H=MPS_add(H,Hi); H01=MPS_add(N0,N1); H23=MPS_add(N2,N3); Hi=MPO_multiply(H01,H23); MPS_scale(Hi,V); H=MPS_add(H,Hi); } return(H); } MPS DC(int L, double t1, double t2, double U, double V) { int n=4*L; MPS N0=Ni(n,0), N1=Ni(n,L), N2=Ni(n,2*L), N3=Ni(n,3*L); MPS H01=MPO_multiply(N0,N1); MPS H23=MPO_multiply(N2,N3); MPS Hi=MPS_add(H01,H23); MPS_scale(Hi,U); MPS H=Hi; H01=MPS_add(N0,N1); H23=MPS_add(N2,N3); Hi=MPO_multiply(H01,H23); MPS_scale(Hi,V); H=MPS_add(H,Hi); for(int i=1;i<L;i++) { for(int j=0;j<4;j++) {Hi=hopp(n,(i-1)+j*L,i+j*L); MPS_scale(Hi,((j<2) ? t1 : t2)); H=MPS_add(H,Hi);} N0=Ni(n,i); N1=Ni(n,i+L); N2=Ni(n,i+2*L); N3=Ni(n,i+3*L); H01=MPO_multiply(N0,N1); H23=MPO_multiply(N2,N3); Hi=MPS_add(H01,H23); MPS_scale(Hi,U); H=MPS_add(H,Hi); H01=MPS_add(N0,N1); H23=MPS_add(N2,N3); Hi=MPO_multiply(H01,H23); MPS_scale(Hi,V); H=MPS_add(H,Hi); } return(H); } double ground(mat B){ vec eval; mat evec; eig_sym(eval,evec,B); double minenerg =eval(0); return(minenerg); } mat I_mat(int l){ int d=1<<l; mat I(d,d,fill::eye); return(I); } mat N_mat(int n, int Ni) { mat N(2,2,fill::zeros); N(0,0)=1; mat M=kron(kron(I_mat(Ni),N), I_mat(n-Ni-1)); return M; } mat NiNj(int n, int N1, int N2) { mat N(2,2,fill::zeros); N(0,0)=1; mat M=kron(kron(kron(I_mat(N1),N), kron(I_mat(N2-N1-1),N) ), I_mat(n-N2-1)); return M; } mat hopp_mat(int n, int n1, int n2) { mat Cp(2,2,fill::zeros), Cm(2,2,fill::zeros); Cp(0,1)=1; Cm(1,0)=1; mat h1=kron( I_mat(n1), kron( kron(Cm,I_mat(n2-n1-1)), kron(Cp,I_mat(n-n2-1)) ) ); mat h2=kron( I_mat(n1), kron( kron(Cp,I_mat(n2-n1-1)), kron(Cm,I_mat(n-n2-1)) ) ); mat h=h1+h2; return h; } mat DC_mat(int L, double t1, double t2, double U, double V) { int n=4*L; //int dim=1<<n; int n0=0, n1=L, n2=2*L, n3=3*L; mat H =U*( NiNj(n,n0,n1) + NiNj(n,n2,n3)); H+=V*( NiNj(n,n0,n2) + NiNj(n,n0,n3) + NiNj(n,n1,n2) + NiNj(n,n1,n3) ); for(int i=1;i<L;i++) { for(int j=0;j<4;j++) H+=((j<2) ? t1 : t2)*hopp_mat(n,(i-1)+j*L,i+j*L); n0=i; n1=i+L; n2=i+2*L; n3=i+3*L; H+=U*( NiNj(n,n0,n1) + NiNj(n,n2,n3)); H+=V*( NiNj(n,n0,n2) + NiNj(n,n0,n3) + NiNj(n,n1,n2) + NiNj(n,n1,n3) ); } return(H); } double DMRG(MPS H, int m, int nloops) { int n=H.n, d=sqrt(H.d), m_H=H.m; H.canonize(1); MPS S(n, m, d); S.canonize(0); S.normalize(); vector<cube> L, R; L.resize(n); R.resize(n); cube Hcube(d*d,m_H,1), Scube(d,m,1); for(int i=0; i<n; i++) {L[i]=cube(m,m_H,m); R[i]=cube(m,m_H,m);} contract_0( R[n-2], S.sf, S.sf, H.sf, m, m_H, d, false); for(int i=n-3; i>=0; i--) contract_step(R[i+1], R[i], S.C[i], S.C[i], H.C[i], m, m_H, d, false); for(int lap=0; lap<nloops; lap++) { if(lap%1==0) cout<<"############################## "<<"[sweep = "<<lap<<"]"<<endl; Hcube.reshape(d*d,m_H,1); Scube.reshape(d,m,1); Hcube.slice(0)=H.s0; Scube.slice(0)=S.s0; Lanczos(R[0], R[0], Hcube, Scube, true, true); S.sweep(true); contract_0( L[1], S.s0, S.s0, H.s0, m, m_H, d, true); for(int site=1; site<n-1; site++) { cout<<"(sitio = "<<site<<")\t"; cout<< sandwich(S, S, H) <<endl; Lanczos(L[site], R[site], H.C[site-1], S.C[site-1], false); S.sweep(true); contract_step(L[site], L[site+1], S.C[site-1], S.C[site-1], H.C[site-1], m, m_H, d, true); } Hcube.reshape(m_H,d*d,1); Scube.reshape(m,d,1); Hcube.slice(0)=H.sf; Scube.slice(0)=S.sf; Lanczos(L[n-1], L[n-1], Hcube, Scube, true, false); S.sweep(false); contract_0( R[n-2], S.sf, S.sf, H.sf, m, m_H, d, false); for(int site=n-2; site>0; site--) { cout<<"(sitio = "<<site<<")\t"; cout<< sandwich(S, S, H) <<endl; Lanczos(L[site], R[site], H.C[site-1], S.C[site-1], false); S.sweep(false); contract_step(R[site], R[site-1], S.C[site-1], S.C[site-1], H.C[site-1], m, m_H, d, false); } } S.sweep(true); return(sandwich(S, S, H)); } int main(int argc, char const *argv[]) { srand(time(0)); arma_rng::set_seed(rand()); int L=3; double t1=1, t2=1, U=0, V=0; int mMPS=10, nsweeps=10; if(argc==2) mMPS= atof(argv[1]); MPS H=DC(L, t1, t2, U, V); //MPS H=TB(L); cout<<endl<<"Energy = \t"<<DMRG(H, mMPS, nsweeps)<<endl; cout<<endl<<"4 * TB_exact = \t"<<4*tb_exact(L)<<endl; return 0; }
f15de506ed94c29b467c7b3b07b7021e995e5a58.cu
#include <iostream> #include <armadillo> #include <vector> #include <iomanip> #include<assert.h> #include <cusolverDn.h> using namespace std; using namespace arma; void EigenSolve(int m, double *d_A, double *d_W) { cudaError_t cudaStat1, cudaStat3; cusolverStatus_t cusolver_status; cusolverDnHandle_t cusolverH = NULL; cusolver_status = CUSOLVER_STATUS_SUCCESS; int lwork = 0, info_gpu = 0, *devInfo = NULL; double *d_work = NULL; cudaStat3 = cudaMalloc ((void**)&devInfo, sizeof(int)); assert(cudaSuccess == cudaStat3); cusolver_status = cusolverDnCreate(&cusolverH); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); int lda=m; cusolverEigMode_t jobz = CUSOLVER_EIG_MODE_VECTOR; // compute eigenvalues and eigenvectors. cublasFillMode_t uplo = CUBLAS_FILL_MODE_LOWER; cusolver_status = cusolverDnDsyevd_bufferSize( cusolverH, jobz, uplo, m, d_A, lda, d_W, &lwork); assert (cusolver_status == CUSOLVER_STATUS_SUCCESS); cudaStat1 = cudaMalloc((void**)&d_work, sizeof(double)*lwork); assert(cudaSuccess == cudaStat1); cusolver_status = cusolverDnDsyevd( cusolverH, jobz, uplo, m, d_A, lda, d_W, d_work, lwork, devInfo); cudaStat1 = cudaDeviceSynchronize(); assert(CUSOLVER_STATUS_SUCCESS == cusolver_status); assert(cudaSuccess == cudaStat1); cudaStat3 = cudaMemcpy(&info_gpu, devInfo, sizeof(int), cudaMemcpyDeviceToHost); assert(0 == info_gpu); if (devInfo ) cudaFree(devInfo); if (d_work ) cudaFree(d_work ); if (cusolverH) cusolverDnDestroy(cusolverH); } void transpose(int height, int width, double *A, double *B) { double const alpha(1.0); double const beta(0.0); cublasHandle_t handle; cublasCreate(&handle); cublasDgeam(handle, CUBLAS_OP_T, CUBLAS_OP_N, height, width, &alpha, (const double*)A, width , &beta , (const double*)A, height, B, height); cublasDestroy(handle); } __global__ void build_tridiag_ker(int m, double *alpha, double *beta, double *M) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; M[col*m+row] = (row==col)? (alpha[col]):( (col==row+1 || col==row-1)? (beta[min(col,row)]):0 ); } void build_tridiag(int m, double *alpha, double *beta, double *M) { dim3 dimBlock(m, m); dim3 dimGrid(1, 1); build_tridiag_ker<<<dimGrid,dimBlock>>>(m, alpha, beta, M); cudaDeviceSynchronize(); } double norm2(int len, double *A) { cublasStatus_t stat; cublasHandle_t manija; stat=cublasCreate(&manija); double result; stat = cublasDnrm2(manija, len, A, 1, &result); if (stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! norm2 error\n"); exit(1); } return result; } void Dgemm(int N, int K, int M, double *alpha, double *beta, double *A, double *B, double *C) { cublasStatus_t stat; cublasHandle_t manija; stat=cublasCreate(&manija); stat = cublasDgemm(manija,CUBLAS_OP_N,CUBLAS_OP_N, N,M,K, alpha, (const double*)A,N, (const double*)B,K, beta, C,N); if (stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! Dgemm error\n"); exit(1); } } void Dgemm(int N, int K, int M, double alpha, double beta, double *A, double *B, double *C) { double al=alpha, bet=beta; Dgemm(N, K, M, &al, &bet, A, B, C); } void Dscal(int len, double alpha, double *v) { cublasStatus_t stat; cublasHandle_t manija; stat=cublasCreate(&manija); const double al=alpha; stat = cublasDscal(manija, len, &al, v, 1); if (stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! Daxpy error\n"); exit(1); } } void apply(int end, int m1, int m2, int m_op, int d, double *s1, double *s_out, double *s_op, double *left, double *right) { double *aux0, *aux1; cudaMalloc((void**)&aux0, m1*d*m2*m_op*sizeof(double) ); cudaMalloc((void**)&aux1, m1*d*m2*m_op*sizeof(double) ); int m1L, m1R, m2L, m2R, wL, wR; if(end==-1) {m1L=1; wL=1; m2L=1;} else {m1L=m1; wL=m_op; m2L=m2;} if(end== 1) {m1R=1; wR=1; m2R=1;} else {m1R=m1; wR=m_op; m2R=m2;} Dgemm(m2L* wL, m1L, d*m1R, 1,0, left, s1, aux1); transpose(m1R, m2L* wL*d, aux1, aux0); Dgemm(m1R*m2L, wL*d, d*wR, 1,0, aux0, s_op, aux1); transpose(m2L*d*wR, m1R, aux1, aux0); transpose(wR*m1R, m2R, right, aux1); Dgemm( m2L*d, wR*m1R, m2R, 1,0, aux0, aux1, s_out); cudaFree(aux0); cudaFree(aux1); } void Daxpy(int len, double alpha, double *A, double *B) { cublasStatus_t stat; cublasHandle_t manija; stat=cublasCreate(&manija); double al = alpha; stat = cublasDaxpy(manija, len, &al, A, 1, B, 1); if (stat != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! Daxpy error\n"); exit(1); } } double matnorm(mat &m) { int nrows=m.n_rows, ncols=m.n_cols; m.reshape(1,nrows*ncols); mat aux(1,1); aux=m*m.t(); m.reshape(nrows,ncols); return(sqrt(aux(0,0))); } double cubenorm(cube &m) { int nrows=m.n_rows, ncols=m.n_cols, nslices=m.n_slices; m.reshape(1,1,nrows*ncols*nslices); mat aux(1,1); aux=m.tube(0,0); aux=aux*aux.t(); m.reshape(nrows,ncols,nslices); return(aux(0,0)); } void decomp(mat &Q, mat &R, mat &C, int mtrunc=0) { if(mtrunc>C.n_cols){cout<<"cannot truncate to bigger dimension -> QR decomposition assumed instead"<<endl; mtrunc=0;} mat left, right; int dim; if(mtrunc==0) { qr(left, right, C); dim=C.n_cols; } else { dim=mtrunc; vec ss; svd(left, ss, right, C); vec vaux=zeros<vec>(dim); int nrows=ss.n_rows; int copy=min(dim,nrows); vaux.head(copy)=ss.head(copy); mat aux=diagmat(vaux); right=right.head_cols(dim); right=aux*right.t(); } Q=zeros<mat>(C.n_rows,dim); R=zeros<mat>(dim,C.n_cols); int cols2copy=min(Q.n_cols,left.n_cols); int rows2copy=min(R.n_rows,right.n_rows); Q.head_cols(cols2copy)=left.head_cols(cols2copy); R.head_rows(rows2copy)=right.head_rows(rows2copy); } struct MPS { int n, m, d, link; mat s0, sf; vector<cube> C; bool canonizedQ; MPS(int n, int m, int d):n(n), m(m), d(d) { canonizedQ=false; s0=mat(d,m,fill::randn); sf=mat(m,d,fill::randn); C.resize(n-2); for (int i = 0; i < n-2; i++) { C[i]=cube(m,d,m,fill::randn); } } void lgauge(int sitio, int mtrunc=0) { if(sitio==0)return; if(mtrunc>=m) mtrunc=0; int o=(mtrunc!=0) ? mtrunc : m; mat Q0(d,o), R(o,m), Q(o*d,o), aux(o,d*m); decomp(Q0,R,s0,mtrunc); s0=Q0; for (int i = 0; i < n-2; i++) { C[i].reshape(m,d*m,1); aux=R*(C[i].slice(0)); if(i+1==sitio){C[i].slice(0)=aux; C[i].reshape(m,d,m); return;} aux.reshape(o*d,m); decomp(Q,R,aux,mtrunc); C[i]=cube(o*d,o,1); C[i].slice(0)=Q; C[i].reshape(o,d,o); } sf=R*sf; } void truncate(int mtrunc) { if(mtrunc>=m) cout<<"cannot truncate to bigger dimension"<<endl; if(!canonizedQ || link!=0) cout<<"MPS must be 0-canonized for being truncated"<<endl; lgauge(n-1,mtrunc); m=mtrunc; canonize(0); } void rgauge(int sitio) { mat Q0(d,m), R(m,m), aux(m,d*m); if(sitio==n-1)return; mat tr=sf.t(); decomp(Q0,R,tr); sf=Q0.t(); mat Q(m*d,m); for (int i = n-3; i >= 0; i--) { C[i].reshape(m*d,m,1); aux=(C[i].slice(0))*R.t(); if(i+1==sitio){C[i].slice(0)=aux; C[i].reshape(m,d,m); return;} aux.reshape(m,d*m); aux=aux.t(); decomp(Q,R,aux); C[i].reshape(m,d*m,1); C[i].slice(0)=Q.t(); C[i].reshape(m,d,m); } s0=s0*R.t(); } void canonize(int sitio) { lgauge(sitio); rgauge(sitio); canonizedQ=true; link=sitio; } void sweep(bool forwardQ) { if(!canonizedQ){cout<<"cannot sweep a non-canonized MPS"<<endl; return;} if((forwardQ && link==n-1) || (!forwardQ && link==0) ){cout<<"cannot sweep out bounds"<<endl; return;} int i=link-1; if(forwardQ){link++;}else{link--;} if(i==-1){lgauge(1); return;} if(i==n-2){rgauge(n-2); return;} if(forwardQ) { mat Q(m*d,m), R(m,m), aux(m*d,m); C[i].reshape(m*d,m,1); aux=C[i].slice(0); decomp(Q,R,aux); C[i].slice(0)=Q; C[i].reshape(m,d,m); if(i==n-3){sf=R*sf; return;} C[i+1].reshape(m,d*m,1); aux=R*(C[i+1].slice(0)); C[i+1].slice(0)=aux; C[i+1].reshape(m,d,m); } else { mat Q(m*d,m), R(m,m), aux(m,d*m); C[i].reshape(m,d*m,1); aux=C[i].slice(0); aux=aux.t(); decomp(Q,R,aux); C[i].slice(0)=Q.t(); C[i].reshape(m,d,m); if(i==0){s0=s0*R.t(); return;} C[i-1].reshape(m*d,m,1); aux=(C[i-1].slice(0))*R.t(); C[i-1].slice(0)=aux; C[i-1].reshape(m,d,m); } } double project(vector<int> state) { if(state.size()!=n)cout<<"cannot project into a different-sized state"<<endl; int index=state[0]; mat v(1,m); v=(s0.row(index)); mat aux(m,m); for (int i = 0; i < n-2; ++i) { index=state[i+1]; for (int j = 0; j < m; j++) { for (int k = 0; k < m; k++) { aux(j,k)=C[i](j,index,k); } } v=v*aux; } index=state[n-1]; v=v*(sf.col(index)); return(v(0,0)); } double MPSnorm() { if(!canonizedQ){cout<<"cannot calculate the norm of a non-canonized MPS"<<endl; return(0);} if(link==0)return(matnorm(s0)); if(link==n-1)return(matnorm(sf)); int index=link-1; return(cubenorm(C[index]) ); } void normalize() { if(!canonizedQ){cout<<"cannot normalize a non-canonized MPS"<<endl; return;} double fact=MPSnorm(); if(link==0) {s0/=fact; return;} if(link==n-1) {sf/=fact; return;} C[link-1]/=fact; } }; double firstoverlap(MPS &v1, MPS &v2) { if(!v1.canonizedQ || !v2.canonizedQ){cout<<"cannot overlap a non-canonized MPS"<<endl; return 0;} if(v1.link!=v2.link || v1.n!=v2.n || v1.m!=v2.m || v1.d!=v2.d){cout<<"cannot calculate overlap between different-shaped MPSs"<<endl; return 0;} int link=v1.link, n=v1.n, m=v1.m, d=v1.d; mat Q(m,m), R(m,m), aux(m,m); double sum; Q=(v1.s0).t()*(v2.s0); for(int site=0; site<link-1; ++site) { for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) { sum=0; for(int k=0; k<m; ++k) for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) { sum+=Q(k,l)*v1.C[site](k,o,i)*v2.C[site](l,o,j); } aux(i,j)=sum; } Q=aux; } R=(v1.sf)*(v2.sf).t(); for(int site=n-3; site>link-1; --site) { for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) { sum=0; for(int k=0; k<m; ++k) for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) { sum+=R(k,l)*v1.C[site](i,o,k)*v2.C[site](j,o,l); } aux(i,j)=sum; } R=aux; } sum=0; for(int k=0; k<m; ++k) for(int l=0; l<m; ++l) for(int kk=0; kk<m; ++kk) for(int ll=0; ll<m; ++ll) for(int o=0; o<d; ++o) { sum+= v1.C[link-1](k,o,kk)*v2.C[link-1](l,o,ll)*Q(k,l)*R(kk,ll); } return(sum); } double firstsandwich(MPS &v1, MPS &v2, MPS &op) { if( !v1.canonizedQ || !v2.canonizedQ || !op.canonizedQ ){cout<<"cannot sandwich non-canonized MPS or MPO"<<endl; return 0;} if( (v1.link!=op.link || v2.link!=op.link) || (v1.n!=op.n || v2.n!=op.n) || (v1.m!=op.m || v2.m!=op.m) || (v1.d!=sqrt(op.d) || v2.d!=sqrt(op.d)) ){cout<<"cannot sandwich different-shaped MPS or MPO"<<endl; return 0;} int link=v1.link, n=v1.n, m=v1.m, d=v1.d; cube Q(m,m,m), R(m,m,m), aux(m,m,m); double sum; for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) for(int k=0; k<m; ++k) { sum=0; for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum+=v1.s0(o,i)*op.s0(d*o+p,j)*v2.s0(p,k); } aux(i,j,k)=sum; } Q=aux; for(int site=0; site<link-1; ++site) { for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) for(int k=0; k<m; ++k) { sum=0; for(int ii=0; ii<m; ++ii) for(int jj=0; jj<m; ++jj) for(int kk=0; kk<m; ++kk) for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum+=Q(ii,jj,kk)*v1.C[site](ii,o,i)*op.C[site](jj,d*o+p,j)*v2.C[site](kk,p,k); } aux(i,j,k)=sum; } Q=aux; } for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) for(int k=0; k<m; ++k) { sum=0; for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum+=v1.sf(i,o)*op.sf(j,d*o+p)*v2.sf(k,p); } aux(i,j,k)=sum; } R=aux; for(int site=n-3; site>link-1; --site) { for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) for(int k=0; k<m; ++k) { sum=0; for(int ii=0; ii<m; ++ii) for(int jj=0; jj<m; ++jj) for(int kk=0; kk<m; ++kk) for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum+=R(ii,jj,kk)*v1.C[site](i,o,ii)*op.C[site](j,d*o+p,jj)*v2.C[site](k,p,kk); } aux(i,j,k)=sum; } R=aux; } sum=0; for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) for(int k=0; k<m; ++k) for(int ii=0; ii<m; ++ii) for(int jj=0; jj<m; ++jj) for(int kk=0; kk<m; ++kk) for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum+= Q(i,j,k)*v1.C[link-1](i,o,ii)*op.C[link-1](j,d*o+p,jj)*v2.C[link-1](k,p,kk)*R(ii,jj,kk); } return(sum); } MPS I_op(int n, int d) { MPS O(n,1,d*d); O.canonizedQ=true; mat aux(d,d,fill::eye); aux.reshape(d*d,1); O.s0=aux; aux.reshape(1,d*d); O.sf=aux; O.C.resize(n-2); cube cubo(1,d*d,1); cubo.slice(0)=aux; for (int i = 0; i < n-2; i++) { O.C[i]=cubo; } return(O); } void Apply_Heff(const cube &L, const cube &R, const cube &W, const cube &s, cube &z, int m, int m_W, int d, int mm=0) { if(mm==0) mm=m; cube E(m*mm,m_W,d), A(m*m,m_W,d); double sum; /* for(int ii=0; ii<m; ii++) for(int k=0; k<mm; k++) for(int jj=0; jj<m_W; jj++) for(int p=0; p<d; p++) { sum=0; for(int kk=0; kk<mm; kk++) sum+=R(ii,jj,kk)*s(k,p,kk); A(mm*ii+k,jj,p)=sum; } */ const mat Rm=mat((double *)R.memptr(),m*m_W,m,false); const mat Sm=mat((double *)s.memptr(),m*d,m,false); mat Am=Rm*Sm.t(); for(int ii=0; ii<m; ii++) for(int k=0; k<mm; k++) for(int jj=0; jj<m_W; jj++) for(int p=0; p<d; p++) { A(mm*ii+k,jj,p)=Am(ii+m*jj,k+p*m); } for(int ii=0; ii<m; ii++) for(int k=0; k<mm; k++) for(int j=0; j<m_W; j++) for(int o=0; o<d; o++) { sum=0; for(int jj=0; jj<m_W; jj++) for(int p=0; p<d; p++) sum+=A(mm*ii+k,jj,p)*W(j,d*o+p,jj); E(mm*ii+k,j,o)=sum; } for(int ii=0; ii<m; ii++) for(int i=0; i<m; i++)for(int o=0; o<d; o++) { sum=0; for(int k=0; k<m; k++) for(int j=0; j<m_W; j++) sum+=E(mm*ii+k,j,o)*L(i,j,k); z(i,o,ii)=sum; } } void Apply_Heff_ext(cube &supercube, mat &W, mat &s, mat &z, int m, int m_W, int d, bool leftQ) { cube supercube_ext(m,d*d,m); double sum; for (int i=0; i<m; ++i) for (int k=0; k<m; ++k) for (int o=0; o<d; ++o) for (int p=0; p<d; ++p) { sum=0; for (int j=0; j<m_W; ++j) sum+=(leftQ ? W(d*o+p,j) : W(j,d*o+p) )*supercube(i,j,k); supercube_ext(i,d*o+p,k)=sum; } for (int i=0; i<m; ++i) for (int o=0; o<d; ++o) { sum=0; for (int k=0; k<m; ++k) for (int p=0; p<d; ++p) sum+= supercube_ext(i,d*o+p,k)*( leftQ ? s(p,k) : s(k,p) ); if(leftQ) z(o,i)=sum; else z(i,o)=sum; } } void contract_0(cube &supercube, mat &s1, mat &s2, mat &W, int m, int m_W, int d, bool leftQ, int mm=0) { if(mm==0) mm=m; double sum; for(int i=0; i<m; ++i) for(int j=0; j<m_W; ++j) for(int k=0; k<mm; ++k) { sum=0; for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) sum+= (leftQ ? s1(o,i) : s1(i,o)) *(leftQ ? W(d*o+p,j) : W(j,d*o+p))*(leftQ ? s2(p,k) : s2(k,p)); supercube(i,j,k)=sum; } } void contract_step(cube &supercube, cube &newsupercube, cube &s1, cube &s2, cube &W, int m, int m_W, int d, bool leftQ, int mm=0) { if(mm==0) mm=m; cube auxL(m*m_W,d,mm), auxC(m*mm,d*d,m_W); double sum; for(int i=0; i<m; ++i) for(int jj=0; jj<m_W; ++jj) for(int kk=0; kk<mm; ++kk) for(int o=0; o<d; ++o) { sum=0; for(int ii=0; ii<m; ++ii) sum+=supercube(ii,jj,kk)*(leftQ ? s1(ii,o,i) : s1(i,o,ii) ); auxL(m_W*i+jj,o,kk)=sum; } for(int i=0; i<m; ++i) for(int jj=0; jj<m_W; ++jj) for(int k=0; k<mm; ++k) for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) { sum=0; for(int kk=0; kk<mm; ++kk) sum+= auxL(m_W*i+jj,o,kk)*(leftQ ? s2(kk,p,k) : s2(k,p,kk) ); auxC(mm*i+k,d*o+p,jj)=sum; } for(int i=0; i<m; ++i) for(int j=0; j<m_W; ++j) for(int k=0; k<mm; ++k) { sum=0; for(int jj=0; jj<m_W; ++jj) for(int o=0; o<d; ++o) for(int p=0; p<d; ++p) sum+= auxC(mm*i+k,d*o+p,jj)*(leftQ ? W(jj,d*o+p,j) : W(j,d*o+p,jj) ) ; newsupercube(i,j,k)=sum; } } double contract_site(cube &s1, cube &s2, int m, int d) { double sum=0; for(int i=0; i<m; ++i) for(int ii=0; ii<m; ++ii) for(int o=0; o<d; ++o) sum+= s1(i,o,ii)*s2(i,o,ii); return sum; } double sandwich(MPS &v1, MPS &v2, MPS &op) //doesnt work with end-canonization ! { if( (v1.n!=op.n || v2.n!=op.n) || (v1.d!=sqrt(op.d) || v2.d!=sqrt(op.d)) ){cout<<"cannot sandwich different-shaped MPS or MPO"<<endl; return 0;} int link=v1.link, n=v1.n, d=v1.d; cube Q(v1.m,op.m,v2.m), R(v1.m,op.m,v2.m), z(v1.m,d,v1.m); contract_0(Q, v1.s0, v2.s0, op.s0, v1.m, op.m, d, true, v2.m); contract_0(R, v1.sf, v2.sf, op.sf, v1.m, op.m, d, false, v2.m); for(int site=0; site<link-1; ++site) contract_step(Q, Q, v1.C[site], v2.C[site], op.C[site], v1.m, op.m, d, true, v2.m); for(int site=n-3; site>link-1; --site) contract_step(R, R, v1.C[site], v2.C[site], op.C[site], v1.m, op.m, d, false, v2.m); Apply_Heff(Q, R, op.C[link-1], v2.C[link-1], z, v1.m, op.m, d) ; return contract_site(v1.C[link-1], z, v1.m, d); } double overlap(MPS &v1, MPS &v2) { if(!v1.canonizedQ || !v2.canonizedQ){cout<<"cannot overlap a non-canonized MPS"<<endl; return 0;} if(v1.link!=v2.link || v1.n!=v2.n || v1.m!=v2.m || v1.d!=v2.d){cout<<"cannot calculate overlap between different-shaped MPSs"<<endl; return 0;} int link=v1.link, n=v1.n, m=v1.m, d=v1.d; mat Q(m,m), R(m,m); cube aux(m,d,m), aux2(m,d,m); double sum; Q=(v1.s0).t()*(v2.s0); for(int site=0; site<link-1; ++site) { for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) for(int i=0; i<m; ++i) { sum=0; for(int k=0; k<m; ++k) sum+=Q(k,l)*v1.C[site](k,o,i); aux(l,o,i)=sum; } for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) { sum=0; for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) sum+=aux(l,o,i)*v2.C[site](l,o,j); Q(i,j)=sum; } } R=(v1.sf)*(v2.sf).t(); for(int site=n-3; site>link-1; --site) { for(int i=0; i<m; ++i) for(int o=0; o<d; ++o) for(int l=0; l<m; ++l) { sum=0; for(int k=0; k<m; ++k) sum+=R(k,l)*v1.C[site](i,o,k); aux(i,o,l)=sum; } for(int i=0; i<m; ++i) for(int j=0; j<m; ++j) { sum=0; for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) sum+=aux(i,o,l)*v2.C[site](j,o,l); R(i,j)=sum; } } for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) for(int kk=0; kk<m; ++kk) { sum=0; for(int k=0; k<m; ++k) sum+= v1.C[link-1](k,o,kk)*Q(k,l); aux(l,o,kk)=sum; } for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) for(int ll=0; ll<m; ++ll) { sum=0; for(int kk=0; kk<m; ++kk) sum+= aux(l,o,kk)*R(kk,ll); aux2(l,o,ll)=sum; } sum=0; for(int l=0; l<m; ++l) for(int o=0; o<d; ++o) for(int ll=0; ll<m; ++ll) sum+=aux2(l,o,ll)*v2.C[link-1](l,o,ll); return(sum); } double MatLanczos(mat M, double epsilum, int d_red) { int dim=M.n_cols; mat u(dim,1,fill::randu), evec(d_red,d_red), v(dim, d_red), v_prev, w(dim,1); vec eval(d_red), a(d_red), b(d_red-1); double E; bool first=true; while(true) { v.col(0)=u/matnorm(u); u=M*v.col(0); a(0)=dot(u,v.col(0)); w=u-a[0]*v.col(0); for (int i = 1; i < d_red; ++i) { b(i-1)=matnorm(w); v.col(i)=w/b(i-1); u=M*v.col(i); a(i)=dot(u,v.col(i)); w=u-a(i)*v.col(i)-b(i-1)*v.col(i-1); } mat Mred=diagmat(a)+diagmat(b,1)+diagmat(b,-1); eig_sym(eval,evec,Mred); //cout<<eval(0)<<endl; if(!first && abs(E-eval(0))<epsilum) return(eval(0)); E=eval(0); u=v*evec.col(0); first=false; } } void d_Lanczos(int end, int m, int m_op, int d, double *s_eff, double *h_eff, double *left, double *right) { double epsilum=0.001; int K_dim=3; int max_iter=100; int dim=(end==0)?(m*d*m):(m*d); double *T, *v, *eval, *a, *b; cudaMalloc((void **)&T, K_dim*K_dim*sizeof(double) ); cudaMalloc((void **)&eval, K_dim*sizeof(double) ); cudaMalloc((void **)& v, (K_dim+1)*dim*sizeof(double) ); cudaMalloc((void **)& a, K_dim *sizeof(double) ); cudaMalloc((void **)& b, K_dim *sizeof(double) ); double E; bool first=true; for(int iter=0; iter<max_iter; iter++) { Dscal(dim, 1/norm2(dim, s_eff), s_eff ); cudaMemcpy(v+0, s_eff, dim*sizeof(double), cudaMemcpyDeviceToDevice); for (int i = 0; i < K_dim; ++i) { apply(end, m, m, m_op, d, v+i*dim, v+(i+1)*dim, h_eff, left, right); Dgemm(1,dim,1, 1,0, v+(i+1)*dim, v+i*dim, a+i); if(i==K_dim-1) break; double h_aux; cudaMemcpy(&h_aux, a+i, sizeof(double), cudaMemcpyDeviceToHost); Daxpy(dim, -h_aux, v+i*dim, v+(i+1)*dim ); if(i>0) { cudaMemcpy(&h_aux, b+(i-1), sizeof(double), cudaMemcpyDeviceToHost); Daxpy(dim, -h_aux, v+(i-1)*dim, v+(i+1)*dim ); } double beta = norm2(dim, v+(i+1)*dim); cudaMemcpy(b+i, &beta, sizeof(double), cudaMemcpyHostToDevice); Dscal(dim, 1/beta, v+(i+1)*dim ); } build_tridiag(K_dim, a, b, T); EigenSolve(K_dim, T, eval); Dgemm(dim, K_dim, 1, 1,0, v, T+0, s_eff); double min_val; cudaMemcpy(&min_val, eval+0, sizeof(double), cudaMemcpyDeviceToHost); if(!first && abs(E- min_val) < epsilum ) break; E=min_val; first=false; } cudaFree(T); cudaFree(v); cudaFree(eval); cudaFree(a); cudaFree(b); } void vec_to_array(cube &cubo, double *d_ptr) { int dim=cubo.n_rows; double *h_aux; h_aux = (double *) malloc(dim*sizeof(double) ); for(int i=0; i<dim; i++) { h_aux[i] = cubo(i,0,0); } cudaMemcpy(d_ptr, h_aux, dim*sizeof(double), cudaMemcpyHostToDevice); } void array_to_vec(cube &cubo, double *d_ptr) { int dim=cubo.n_rows; double *h_aux; h_aux = (double *) malloc(dim*sizeof(double) ); cudaMemcpy( h_aux, d_ptr, dim*sizeof(double), cudaMemcpyDeviceToHost); for(int i=0; i<dim; i++) { cubo(i,0,0) = h_aux[i] ; } } void Lanczos_gpu(cube &L, cube &R, cube &H, cube &S, bool ext, bool leftQ=false) { //cout<<"GPU_Lanczos"<<"\t"; int end = ext? (leftQ? -1:1) :0; int m =(ext && leftQ)? S.n_cols : S.n_rows, m_op=(ext && leftQ)? H.n_cols : H.n_rows, d =(ext && leftQ)? S.n_rows : S.n_cols; int mL, mR, wL, wR; if(end==-1) {mL=1; wL=1;} else {mL=m; wL=m_op;} if(end== 1) {mR=1; wR=1;} else {mR=m; wR=m_op;} int Sdim = mL*d*mR, Hdim = wL*d*d*wR, Ldim = mL*wL*mL, Rdim = mR*wR*mR; cube auxL, auxR, auxH=H, auxS=S; auxH.reshape(Hdim, 1, 1); auxS.reshape(Sdim, 1, 1); double *d_L, *d_R, *d_H, *d_S; cudaMalloc((void **)&d_L, Ldim*sizeof(double) ); cudaMalloc((void **)&d_R, Rdim*sizeof(double) ); cudaMalloc((void **)&d_H, Hdim*sizeof(double) ); cudaMalloc((void **)&d_S, Sdim*sizeof(double) ); double val=1; if(ext && leftQ) cudaMemcpy(d_L, &val, sizeof(double), cudaMemcpyHostToDevice); else {auxL=L; auxL.reshape(Ldim, 1, 1); vec_to_array(auxL, d_L);} if(ext && !leftQ) cudaMemcpy(d_R, &val, sizeof(double), cudaMemcpyHostToDevice); else {auxR=R; auxR.reshape(Rdim, 1, 1); vec_to_array(auxR, d_R);} vec_to_array(auxH, d_H); vec_to_array(auxS, d_S); d_Lanczos(end, m, m_op, d, d_S, d_H, d_L, d_R); array_to_vec(auxS, d_S); auxS.reshape(mL, d, mR); S=auxS; } void Lanczos_cpu(cube &L, cube &R, cube &H, cube &S, bool ext, bool leftQ=false) { //cout<<"CPU_Lanczos"<<"\t"; double epsilum=0.001; int d_red=3; int max_iter=1000; int m= (ext && leftQ) ? S.n_cols : S.n_rows, m_H=(ext && leftQ) ? H.n_cols : H.n_rows, d= S.size()/(ext ? m : m*m), dim=ext ? m*d : m*d*m; mat evec(d_red,d_red), v(dim, d_red), u(dim,1), w(dim,1); vec eval(d_red), a(d_red), b(d_red-1); mat auxm(m,d ); if(leftQ){auxm.reshape(d,m);} cube auxc(m,d,m); double E; bool first=true; if(ext) auxm=S.slice(0); else auxc=S; if(ext) auxm.reshape(dim,1); else auxc.reshape(dim,1,1); if(ext) u=auxm; else u=auxc.slice(0); for(int iter=0; iter<max_iter; iter++) { u/=matnorm(u); v.col(0)=u; if(ext) auxm=u; else auxc.slice(0)=u; if(ext) {if(leftQ) auxm.reshape(d,m); else auxm.reshape(m,d);} else auxc.reshape(m,d,m); if(ext) Apply_Heff_ext(L, H.slice(0), auxm, auxm, m, m_H, d, leftQ); else Apply_Heff(L, R, H, auxc, auxc, m, m_H, d); if(ext) auxm.reshape(dim,1); else auxc.reshape(dim,1,1); if(ext) u=auxm; else u=auxc.slice(0); a(0)=dot(u,v.col(0)); w=u-a[0]*v.col(0); for (int i = 1; i < d_red; ++i) { b(i-1)=matnorm(w); v.col(i)=w/b(i-1); if(ext) auxm=v.col(i); else auxc.slice(0)=v.col(i); if(ext) {if(leftQ) auxm.reshape(d,m); else auxm.reshape(m,d);} else auxc.reshape(m,d,m); if(ext) Apply_Heff_ext(L, H.slice(0), auxm, auxm, m, m_H, d, leftQ); else Apply_Heff(L, R, H, auxc, auxc, m, m_H, d); if(ext) auxm.reshape(dim,1); else auxc.reshape(dim,1,1); if(ext) u=auxm; else u=auxc.slice(0); a(i)=dot(u,v.col(i)); w=u-a(i)*v.col(i)-b(i-1)*v.col(i-1); } mat Mred=diagmat(a)+diagmat(b,1)+diagmat(b,-1); eig_sym(eval,evec,Mred); u=v*evec.col(0); //cout<<eval(0)<<endl; if(!first && abs(E-eval(0))<epsilum) break; E=eval(0); first=false; } if(ext) auxm=u/matnorm(u); else auxc.slice(0)=u/matnorm(u); if(ext) {if(leftQ) auxm.reshape(d,m); else auxm.reshape(m,d);} else auxc.reshape(m,d,m); if(ext) S.slice(0)=auxm; else S=auxc; } #define Lanczos Lanczos_cpu double tb_exact(int L){ double e=0, k, ek; for(int a=0;a<L;a++){ k=M_PI*(a+1)/(L+1); ek=-2*cos(k); if(ek<0){ e+=ek; } } return(e); } double dot(mat u, mat v) { mat aux=u*v.t(); return(aux(0,0)); } MPS MPS_add(MPS &v1, MPS &v2) { assert(v1.n==v2.n && v1.d==v2.d); int n=v1.n, d=v1.d, m1=v1.m, m2=v2.m; MPS add(n,m1+m2,d); for(int o=0; o<d; o++) { for(int i=0; i<m1; i++) { add.s0(o,i) =v1.s0(o,i); add.sf(i,o) =v1.sf(i,o); } for(int i=0; i<m2; i++) { add.s0(o,i+m1)=v2.s0(o,i); add.sf(i+m1,o)=v2.sf(i,o); } } for(int site=0; site<n-2; site++) { add.C[site]=zeros<cube>(m1+m2,d,m1+m2); for(int o=0; o<d; o++) { for(int i=0; i<m1; i++) for(int j=0; j<m1; j++) add.C[site](i,o,j)=v1.C[site](i,o,j); for(int i=0; i<m2; i++) for(int j=0; j<m2; j++) add.C[site](i+m1,o,j+m1)= v2.C[site](i,o,j); } } return add; } MPS MPO_multiply(MPS &H1, MPS &H2) { if(H1.n!=H2.n || H1.d!=H2.d) cout<<"cannot multiply different-shaped MPOs"<<endl; int n=H1.n, d=sqrt(H1.d), m1=H1.m, m2=H2.m; MPS mult(n,m1*m2,d*d); double sum, sum2; for(int o=0; o<d; o++) for(int p=0; p<d; p++) for(int ii=0; ii<m1; ii++) for(int jj=0; jj<m2; jj++) { sum=0, sum2=0; for(int q=0; q<d; q++) {sum+=H1.s0(d*o+q,ii)*H2.s0(d*q+p,jj); sum2+=H1.sf(ii,d*o+q)*H2.sf(jj,d*q+p);} mult.s0(d*o+p,m2*ii+jj)=sum; mult.sf(m2*ii+jj,d*o+p)=sum2; } for(int site=0; site<n-2; site++) { for(int o=0; o<d; o++) for(int p=0; p<d; p++) for(int i=0; i<m1; i++) for(int j=0; j<m2; j++) for(int ii=0; ii<m1; ii++) for(int jj=0; jj<m2; jj++) { sum=0; for(int q=0; q<d; q++) sum+=H1.C[site](i,d*o+q,ii)*H2.C[site](j,d*q+p,jj); mult.C[site](m2*i+j,d*o+p,m2*ii+jj)=sum; } } return mult; } void MPS_scale(MPS &S, double k) { k=pow(k,(double)1/S.n); S.s0*=k; S.sf*=k; for(int i=0;i<S.n-2;i++) S.C[i]*=k; } MPS Ci(int n, int Ni, int plusQ) { int o=(plusQ ? 2 : 1); MPS O=I_op(n,2); if(Ni==0) {O.s0=zeros<mat>(4,1); O.s0(o, 0 )=1; return O;} if(Ni==n-1){O.sf=zeros<mat>(1,4); O.sf(0, o )=1; return O;} O.C[Ni-1]=zeros<cube>(1,4,1); O.C[Ni-1](0,o,0)=1; return O; } MPS Sgi(int n, int Ni) { MPS O=I_op(n,2); if(Ni==0) {O.s0(0, 0 )=-1; return O;} if(Ni==n-1){O.sf(0, 0 )=-1; return O;} O.C[Ni-1](0,0,0)=-1; return O; } MPS Ni(int n, int Ni) { MPS O=I_op(n,2); if(Ni==0) {O.s0(3,0)=0; return O;} if(Ni==n-1){O.sf(0,3)=0; return O;} O.C[Ni-1](0,3,0)=0; return O; } MPS hopp(int n, int s1, int s2) { MPS h1=I_op(n,2), h2=h1; if(s1==0) { h1.s0=zeros<mat>(4,1); h2.s0=zeros<mat>(4,1); h1.s0(2, 0 )=1; h2.s0(1, 0 )=1; } else { h1.C[s1-1]=zeros<cube>(1,4,1); h2.C[s1-1]=zeros<cube>(1,4,1); h1.C[s1-1](0,2,0)=1; h2.C[s1-1](0,1,0)=1; } if(s2==n-1) { h1.sf=zeros<mat>(1,4); h2.sf=zeros<mat>(1,4); h1.sf(0, 1 )=1; h2.sf(0, 2 )=1; } else { h1.C[s2-1]=zeros<cube>(1,4,1); h2.C[s2-1]=zeros<cube>(1,4,1); h1.C[s2-1](0,1,0)=1; h2.C[s2-1](0,2,0)=1; } for (int i = s1+1; i < s2; ++i) {h1.C[i-1](0,0,0)=-1; h2.C[i-1](0,0,0)=-1;} MPS H=MPS_add(h1,h2); return(H); } MPS TB(int n) { MPS Hi=hopp(n,0,1), H=Hi; for(int i=1; i<n-1; i++) {Hi=hopp(n,i,i+1) ;H=MPS_add(H,Hi);} return H; } MPS DC0(int L, double t1, double t2, double U, double V) { int n=4*L; MPS N0=Ni(n,0), N1=Ni(n,1), N2=Ni(n,2), N3=Ni(n,3); MPS H01=MPO_multiply(N0,N1); MPS H23=MPO_multiply(N2,N3); MPS Hi=MPS_add(H01,H23); MPS_scale(Hi,U); MPS H=Hi; H01=MPS_add(N0,N1); H23=MPS_add(N2,N3); Hi=MPO_multiply(H01,H23); MPS_scale(Hi,V); H=MPS_add(H,Hi); for(int i=1;i<L;i++) { for(int j=0;j<4;j++) {Hi=hopp(n,4*(i-1)+j,4*i+j); H=MPS_add(H,Hi);} N0=Ni(n,4*i); N1=Ni(n,4*i+1); N2=Ni(n,4*i+2); N3=Ni(n,4*i+3); H01=MPO_multiply(N0,N1); H23=MPO_multiply(N2,N3); Hi=MPS_add(H01,H23); MPS_scale(Hi,U); H=MPS_add(H,Hi); H01=MPS_add(N0,N1); H23=MPS_add(N2,N3); Hi=MPO_multiply(H01,H23); MPS_scale(Hi,V); H=MPS_add(H,Hi); } return(H); } MPS DC(int L, double t1, double t2, double U, double V) { int n=4*L; MPS N0=Ni(n,0), N1=Ni(n,L), N2=Ni(n,2*L), N3=Ni(n,3*L); MPS H01=MPO_multiply(N0,N1); MPS H23=MPO_multiply(N2,N3); MPS Hi=MPS_add(H01,H23); MPS_scale(Hi,U); MPS H=Hi; H01=MPS_add(N0,N1); H23=MPS_add(N2,N3); Hi=MPO_multiply(H01,H23); MPS_scale(Hi,V); H=MPS_add(H,Hi); for(int i=1;i<L;i++) { for(int j=0;j<4;j++) {Hi=hopp(n,(i-1)+j*L,i+j*L); MPS_scale(Hi,((j<2) ? t1 : t2)); H=MPS_add(H,Hi);} N0=Ni(n,i); N1=Ni(n,i+L); N2=Ni(n,i+2*L); N3=Ni(n,i+3*L); H01=MPO_multiply(N0,N1); H23=MPO_multiply(N2,N3); Hi=MPS_add(H01,H23); MPS_scale(Hi,U); H=MPS_add(H,Hi); H01=MPS_add(N0,N1); H23=MPS_add(N2,N3); Hi=MPO_multiply(H01,H23); MPS_scale(Hi,V); H=MPS_add(H,Hi); } return(H); } double ground(mat B){ vec eval; mat evec; eig_sym(eval,evec,B); double minenerg =eval(0); return(minenerg); } mat I_mat(int l){ int d=1<<l; mat I(d,d,fill::eye); return(I); } mat N_mat(int n, int Ni) { mat N(2,2,fill::zeros); N(0,0)=1; mat M=kron(kron(I_mat(Ni),N), I_mat(n-Ni-1)); return M; } mat NiNj(int n, int N1, int N2) { mat N(2,2,fill::zeros); N(0,0)=1; mat M=kron(kron(kron(I_mat(N1),N), kron(I_mat(N2-N1-1),N) ), I_mat(n-N2-1)); return M; } mat hopp_mat(int n, int n1, int n2) { mat Cp(2,2,fill::zeros), Cm(2,2,fill::zeros); Cp(0,1)=1; Cm(1,0)=1; mat h1=kron( I_mat(n1), kron( kron(Cm,I_mat(n2-n1-1)), kron(Cp,I_mat(n-n2-1)) ) ); mat h2=kron( I_mat(n1), kron( kron(Cp,I_mat(n2-n1-1)), kron(Cm,I_mat(n-n2-1)) ) ); mat h=h1+h2; return h; } mat DC_mat(int L, double t1, double t2, double U, double V) { int n=4*L; //int dim=1<<n; int n0=0, n1=L, n2=2*L, n3=3*L; mat H =U*( NiNj(n,n0,n1) + NiNj(n,n2,n3)); H+=V*( NiNj(n,n0,n2) + NiNj(n,n0,n3) + NiNj(n,n1,n2) + NiNj(n,n1,n3) ); for(int i=1;i<L;i++) { for(int j=0;j<4;j++) H+=((j<2) ? t1 : t2)*hopp_mat(n,(i-1)+j*L,i+j*L); n0=i; n1=i+L; n2=i+2*L; n3=i+3*L; H+=U*( NiNj(n,n0,n1) + NiNj(n,n2,n3)); H+=V*( NiNj(n,n0,n2) + NiNj(n,n0,n3) + NiNj(n,n1,n2) + NiNj(n,n1,n3) ); } return(H); } double DMRG(MPS H, int m, int nloops) { int n=H.n, d=sqrt(H.d), m_H=H.m; H.canonize(1); MPS S(n, m, d); S.canonize(0); S.normalize(); vector<cube> L, R; L.resize(n); R.resize(n); cube Hcube(d*d,m_H,1), Scube(d,m,1); for(int i=0; i<n; i++) {L[i]=cube(m,m_H,m); R[i]=cube(m,m_H,m);} contract_0( R[n-2], S.sf, S.sf, H.sf, m, m_H, d, false); for(int i=n-3; i>=0; i--) contract_step(R[i+1], R[i], S.C[i], S.C[i], H.C[i], m, m_H, d, false); for(int lap=0; lap<nloops; lap++) { if(lap%1==0) cout<<"############################## "<<"[sweep = "<<lap<<"]"<<endl; Hcube.reshape(d*d,m_H,1); Scube.reshape(d,m,1); Hcube.slice(0)=H.s0; Scube.slice(0)=S.s0; Lanczos(R[0], R[0], Hcube, Scube, true, true); S.sweep(true); contract_0( L[1], S.s0, S.s0, H.s0, m, m_H, d, true); for(int site=1; site<n-1; site++) { cout<<"(sitio = "<<site<<")\t"; cout<< sandwich(S, S, H) <<endl; Lanczos(L[site], R[site], H.C[site-1], S.C[site-1], false); S.sweep(true); contract_step(L[site], L[site+1], S.C[site-1], S.C[site-1], H.C[site-1], m, m_H, d, true); } Hcube.reshape(m_H,d*d,1); Scube.reshape(m,d,1); Hcube.slice(0)=H.sf; Scube.slice(0)=S.sf; Lanczos(L[n-1], L[n-1], Hcube, Scube, true, false); S.sweep(false); contract_0( R[n-2], S.sf, S.sf, H.sf, m, m_H, d, false); for(int site=n-2; site>0; site--) { cout<<"(sitio = "<<site<<")\t"; cout<< sandwich(S, S, H) <<endl; Lanczos(L[site], R[site], H.C[site-1], S.C[site-1], false); S.sweep(false); contract_step(R[site], R[site-1], S.C[site-1], S.C[site-1], H.C[site-1], m, m_H, d, false); } } S.sweep(true); return(sandwich(S, S, H)); } int main(int argc, char const *argv[]) { srand(time(0)); arma_rng::set_seed(rand()); int L=3; double t1=1, t2=1, U=0, V=0; int mMPS=10, nsweeps=10; if(argc==2) mMPS= atof(argv[1]); MPS H=DC(L, t1, t2, U, V); //MPS H=TB(L); cout<<endl<<"Energy = \t"<<DMRG(H, mMPS, nsweeps)<<endl; cout<<endl<<"4 * TB_exact = \t"<<4*tb_exact(L)<<endl; return 0; }
dd1b6ec0dc6130a3d2d0814d142052f2b28ead96.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zgeellrtmv.cu normal z -> c, Tue Sep 2 12:38:32 2014 */ #include "common_magma.h" //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void cgeellrtmv_kernel_32( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d_x[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i]; } } } } //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void cgeellrtmv_kernel_16( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d_x[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i]; } } } } //F. Vzquez, G. Ortega, J.J. Fernndez, E.M. Garzn, Almeria University __global__ void cgeellrtmv_kernel_8( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d_x[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i]; } } } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLRT. The ideas are taken from "Improving the performance of the sparse matrix vector product with GPUs", (CIT 2010), and modified to provide correct values. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows @param n magma_int_t number of columns @param nnz_per_row magma_int_t max number of nonzeros in a row @param alpha magmaFloatComplex scalar alpha @param d_val magmaFloatComplex* val array @param d_colind magma_int_t* col indices @param d_rowlength magma_int_t* number of elements in each row @param d_x magmaFloatComplex* input vector x @param beta magmaFloatComplex scalar beta @param d_y magmaFloatComplex* output vector y @param blocksize magma_int_t threads per block @param alignment magma_int_t threads assigned to each row @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgeellrtmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y, magma_int_t alignment, magma_int_t blocksize ){ int num_blocks = ( (m+blocksize-1)/blocksize); int num_threads = alignment*blocksize; int real_row_length = ((int)(nnz_per_row+alignment-1)/alignment) *alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = sqrt(num_blocks); int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = alignment * blocksize * sizeof( magmaFloatComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if( alignment == 32 ){ hipLaunchKernelGGL(( cgeellrtmv_kernel_32), dim3(grid), dim3(num_threads) , Ms, magma_stream , m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y, alignment, real_row_length ); } else if( alignment == 16 ){ hipLaunchKernelGGL(( cgeellrtmv_kernel_16), dim3(grid), dim3(num_threads) , Ms, magma_stream , m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y, alignment, real_row_length ); } else if( alignment == 8 ){ hipLaunchKernelGGL(( cgeellrtmv_kernel_8), dim3(grid), dim3(num_threads) , Ms, magma_stream , m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y, alignment, real_row_length ); } else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } return MAGMA_SUCCESS; }
dd1b6ec0dc6130a3d2d0814d142052f2b28ead96.cu
/* -- MAGMA (version 1.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date September 2014 @generated from zgeellrtmv.cu normal z -> c, Tue Sep 2 12:38:32 2014 */ #include "common_magma.h" //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void cgeellrtmv_kernel_32( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d_x[ col ]; } shared[idb] = dot; if( idp < 16 ){ shared[idb]+=shared[idb+16]; if( idp < 8 ) shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i]; } } } } //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void cgeellrtmv_kernel_16( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d_x[ col ]; } shared[idb] = dot; if( idp < 8 ){ shared[idb]+=shared[idb+8]; if( idp < 4 ) shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i]; } } } } //F. Vázquez, G. Ortega, J.J. Fernández, E.M. Garzón, Almeria University __global__ void cgeellrtmv_kernel_8( int num_rows, int num_cols, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y, int T, int alignment ) { int idx = blockIdx.y * gridDim.x * blockDim.x + blockDim.x * blockIdx.x + threadIdx.x ; // global thread index int idb = threadIdx.x ; // local thread index int idp = idb%T; // number of threads assigned to one row int i = idx/T; // row index extern __shared__ magmaFloatComplex shared[]; if(i < num_rows ){ magmaFloatComplex dot = MAGMA_C_MAKE(0.0, 0.0); int max_ = (d_rowlength[i]+T-1)/T; // number of elements each thread handles for ( int k = 0; k < max_ ; k++ ){ // original code in paper (not working for me) //magmaFloatComplex val = d_val[ k*(T*alignment)+(i*T)+idp ]; //int col = d_colind [ k*(T*alignment)+(i*T)+idp ]; // new code (working for me) magmaFloatComplex val = d_val[ k*(T)+(i*alignment)+idp ]; int col = d_colind [ k*(T)+(i*alignment)+idp ]; dot += val * d_x[ col ]; } shared[idb] = dot; if( idp < 4 ){ shared[idb]+=shared[idb+4]; if( idp < 2 ) shared[idb]+=shared[idb+2]; if( idp == 0 ) { d_y[i] = (shared[idb]+shared[idb+1])*alpha + beta*d_y [i]; } } } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. Input format is ELLRT. The ideas are taken from "Improving the performance of the sparse matrix vector product with GPUs", (CIT 2010), and modified to provide correct values. Arguments --------- @param transA magma_trans_t transposition parameter for A @param m magma_int_t number of rows @param n magma_int_t number of columns @param nnz_per_row magma_int_t max number of nonzeros in a row @param alpha magmaFloatComplex scalar alpha @param d_val magmaFloatComplex* val array @param d_colind magma_int_t* col indices @param d_rowlength magma_int_t* number of elements in each row @param d_x magmaFloatComplex* input vector x @param beta magmaFloatComplex scalar beta @param d_y magmaFloatComplex* output vector y @param blocksize magma_int_t threads per block @param alignment magma_int_t threads assigned to each row @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_cgeellrtmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t nnz_per_row, magmaFloatComplex alpha, magmaFloatComplex *d_val, magma_index_t *d_colind, magma_index_t *d_rowlength, magmaFloatComplex *d_x, magmaFloatComplex beta, magmaFloatComplex *d_y, magma_int_t alignment, magma_int_t blocksize ){ int num_blocks = ( (m+blocksize-1)/blocksize); int num_threads = alignment*blocksize; int real_row_length = ((int)(nnz_per_row+alignment-1)/alignment) *alignment; magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 && num_threads > 256 ) printf("error: too much shared memory requested.\n"); int dimgrid1 = sqrt(num_blocks); int dimgrid2 = (num_blocks + dimgrid1 -1 ) / dimgrid1; dim3 grid( dimgrid1, dimgrid2, 1); int Ms = alignment * blocksize * sizeof( magmaFloatComplex ); // printf("launch kernel: %dx%d %d %d\n", grid.x, grid.y, num_threads , Ms); if( alignment == 32 ){ cgeellrtmv_kernel_32<<< grid, num_threads , Ms, magma_stream >>> ( m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y, alignment, real_row_length ); } else if( alignment == 16 ){ cgeellrtmv_kernel_16<<< grid, num_threads , Ms, magma_stream >>> ( m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y, alignment, real_row_length ); } else if( alignment == 8 ){ cgeellrtmv_kernel_8<<< grid, num_threads , Ms, magma_stream >>> ( m, n, alpha, d_val, d_colind, d_rowlength, d_x, beta, d_y, alignment, real_row_length ); } else{ printf("error: alignment %d not supported.\n", alignment); exit(-1); } return MAGMA_SUCCESS; }
max_pool_with_index.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/max_pool_with_index_gpu.h" #include "caffe2/utils/conversions.h" namespace caffe2 { namespace { /*** * Note: CUDA kernels are minor changes from those at: * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/pooling_layer.cu * Originally licensed under BSD **/ template <typename Dtype> __global__ void MaxPoolForward( const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask) { CUDA_1D_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); float maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (convert::To<Dtype, float>(bottom_slice[h * width + w]) > maxval) { maxidx = h * width + w; maxval = convert::To<Dtype, float>(bottom_slice[maxidx]); } } } top_data[index] = convert::To<float, Dtype>(maxval); mask[index] = maxidx; } } template <typename Dtype> __global__ void MaxPoolBackward( const int nthreads, const Dtype* const top_diff, const int* const mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); float gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += convert::To<Dtype, float>(top_diff_slice[ph * pooled_width + pw]); } } } bottom_diff[index] = convert::To<float, Dtype>(gradient); } } }; template <typename T> bool MaxPoolWithIndexOp::DoRunWithType() { auto& X = Input(0); auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, X.dim32(1)); auto* Y = Output(0, sizes, at::dtype<T>()); int output_size = Y->numel(); auto* mask = Output(1, {output_size}, at::dtype<int>()); hipLaunchKernelGGL(( MaxPoolForward<T>) , dim3(CAFFE_GET_BLOCKS(output_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), output_size, X.data<T>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<T>(), mask->template mutable_data<int>()); C10_HIP_KERNEL_LAUNCH_CHECK(); return true; } bool MaxPoolWithIndexOp::RunOnDevice() { auto& X = Input(0); CAFFE_ENFORCE(X.dim() == 4, "Operator only supports 4D tensors"); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported input type"); } } template <typename T> bool MaxPoolWithIndexGradientOp::DoRunWithType() { auto& X = Input(0); auto& dY = Input(1); auto& mask = Input(2); CAFFE_ENFORCE(X.dim() == 4, "Operator only supports 4D tensors"); auto* dX = Output(0, X.sizes(), at::dtype<T>()); ConvPoolOpBase<CUDAContext>::ComputePads(vector<int>{X.dim32(2), X.dim32(3)}); hipLaunchKernelGGL(( MaxPoolBackward<T>), dim3(CAFFE_GET_BLOCKS(X.numel())), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), X.numel(), dY.data<T>(), mask.data<int>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<T>()); C10_HIP_KERNEL_LAUNCH_CHECK(); return true; } bool MaxPoolWithIndexGradientOp::RunOnDevice() { auto& X = Input(0); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported input type"); } } namespace { REGISTER_CUDA_OPERATOR(MaxPoolWithIndex, MaxPoolWithIndexOp); REGISTER_CUDA_OPERATOR(MaxPoolWithIndexGradient, MaxPoolWithIndexGradientOp); class GetMaxPoolWithIndexGradient : public GradientMakerBase { using GradientMakerBase::GradientMakerBase; vector<OperatorDef> GetGradientDefs() override { return SingleGradientDef( "MaxPoolWithIndexGradient", "", vector<string>{I(0), GO(0), O(1)}, vector<string>{GI(0)}); } }; REGISTER_GRADIENT(MaxPoolWithIndex, GetMaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndex) .NumInputs(1) .NumOutputs(2) .TensorInferenceFunction(ConvPoolOpBase<CPUContext>::TensorInferenceForPool) .SetDoc(R"DOC( MaxPoolWithIndex consumes an input blob X and applies max pooling across the blob according to kernel sizes, stride sizes and pad lengths defined by the ConvPoolOpBase operator. It also produces an explicit mask that defines the location that all maximum values were found, which is re-used in the gradient pass. This op is deterministic. )DOC") .Input( 0, "X", "Input data tensor from the previous operator; dimensions " "depend on whether the NCHW or NHWC operators are being used. For " "example, in the former, the input has size (N x C x H x W), where N is" " the batch size, C is the number of channels, and H and W are the " "height and the width of the data. The corresponding permutation of " "dimensions is used in the latter case. ") .Output( 0, "Y", "Output data tensor from average pooling across the input " "tensor. Dimensions will vary based on various kernel, stride, and pad " "sizes.") .Output( 1, "Index", "Mask of location indices of the found maximum values, " " used in the gradient operator to accumulate dY values to the " "appropriate locations in Y"); }; }; // namespace caffe2
max_pool_with_index.cu
#include "caffe2/operators/max_pool_with_index_gpu.h" #include "caffe2/utils/conversions.h" namespace caffe2 { namespace { /*** * Note: CUDA kernels are minor changes from those at: * https://github.com/BVLC/caffe/blob/master/src/caffe/layers/pooling_layer.cu * Originally licensed under BSD **/ template <typename Dtype> __global__ void MaxPoolForward( const int nthreads, const Dtype* const bottom_data, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const top_data, int* mask) { CUDA_1D_KERNEL_LOOP(index, nthreads) { const int pw = index % pooled_width; const int ph = (index / pooled_width) % pooled_height; const int c = (index / pooled_width / pooled_height) % channels; const int n = index / pooled_width / pooled_height / channels; int hstart = ph * stride_h - pad_h; int wstart = pw * stride_w - pad_w; const int hend = min(hstart + kernel_h, height); const int wend = min(wstart + kernel_w, width); hstart = max(hstart, 0); wstart = max(wstart, 0); float maxval = -FLT_MAX; int maxidx = -1; const Dtype* const bottom_slice = bottom_data + (n * channels + c) * height * width; for (int h = hstart; h < hend; ++h) { for (int w = wstart; w < wend; ++w) { if (convert::To<Dtype, float>(bottom_slice[h * width + w]) > maxval) { maxidx = h * width + w; maxval = convert::To<Dtype, float>(bottom_slice[maxidx]); } } } top_data[index] = convert::To<float, Dtype>(maxval); mask[index] = maxidx; } } template <typename Dtype> __global__ void MaxPoolBackward( const int nthreads, const Dtype* const top_diff, const int* const mask, const int num, const int channels, const int height, const int width, const int pooled_height, const int pooled_width, const int kernel_h, const int kernel_w, const int stride_h, const int stride_w, const int pad_h, const int pad_w, Dtype* const bottom_diff) { CUDA_1D_KERNEL_LOOP(index, nthreads) { // find out the local index // find out the local offset const int w = index % width; const int h = (index / width) % height; const int c = (index / width / height) % channels; const int n = index / width / height / channels; const int phstart = (h + pad_h < kernel_h) ? 0 : (h + pad_h - kernel_h) / stride_h + 1; const int phend = min((h + pad_h) / stride_h + 1, pooled_height); const int pwstart = (w + pad_w < kernel_w) ? 0 : (w + pad_w - kernel_w) / stride_w + 1; const int pwend = min((w + pad_w) / stride_w + 1, pooled_width); float gradient = 0; const int offset = (n * channels + c) * pooled_height * pooled_width; const Dtype* const top_diff_slice = top_diff + offset; const int* const mask_slice = mask + offset; for (int ph = phstart; ph < phend; ++ph) { for (int pw = pwstart; pw < pwend; ++pw) { if (mask_slice[ph * pooled_width + pw] == h * width + w) { gradient += convert::To<Dtype, float>(top_diff_slice[ph * pooled_width + pw]); } } } bottom_diff[index] = convert::To<float, Dtype>(gradient); } } }; template <typename T> bool MaxPoolWithIndexOp::DoRunWithType() { auto& X = Input(0); auto sizes = ConvPoolOpBase<CUDAContext>::GetOutputSize(X, X.dim32(1)); auto* Y = Output(0, sizes, at::dtype<T>()); int output_size = Y->numel(); auto* mask = Output(1, {output_size}, at::dtype<int>()); MaxPoolForward<T> <<<CAFFE_GET_BLOCKS(output_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( output_size, X.data<T>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), Y->dim32(2), Y->dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), Y->template mutable_data<T>(), mask->template mutable_data<int>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } bool MaxPoolWithIndexOp::RunOnDevice() { auto& X = Input(0); CAFFE_ENFORCE(X.dim() == 4, "Operator only supports 4D tensors"); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported input type"); } } template <typename T> bool MaxPoolWithIndexGradientOp::DoRunWithType() { auto& X = Input(0); auto& dY = Input(1); auto& mask = Input(2); CAFFE_ENFORCE(X.dim() == 4, "Operator only supports 4D tensors"); auto* dX = Output(0, X.sizes(), at::dtype<T>()); ConvPoolOpBase<CUDAContext>::ComputePads(vector<int>{X.dim32(2), X.dim32(3)}); MaxPoolBackward<T><<< CAFFE_GET_BLOCKS(X.numel()), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( X.numel(), dY.data<T>(), mask.data<int>(), X.dim32(0), X.dim32(1), X.dim32(2), X.dim32(3), dY.dim32(2), dY.dim32(3), kernel_h(), kernel_w(), stride_h(), stride_w(), pad_t(), pad_l(), dX->template mutable_data<T>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } bool MaxPoolWithIndexGradientOp::RunOnDevice() { auto& X = Input(0); if (X.IsType<float>()) { return DoRunWithType<float>(); } else if (X.IsType<at::Half>()) { return DoRunWithType<at::Half>(); } else { CAFFE_THROW("Unsupported input type"); } } namespace { REGISTER_CUDA_OPERATOR(MaxPoolWithIndex, MaxPoolWithIndexOp); REGISTER_CUDA_OPERATOR(MaxPoolWithIndexGradient, MaxPoolWithIndexGradientOp); class GetMaxPoolWithIndexGradient : public GradientMakerBase { using GradientMakerBase::GradientMakerBase; vector<OperatorDef> GetGradientDefs() override { return SingleGradientDef( "MaxPoolWithIndexGradient", "", vector<string>{I(0), GO(0), O(1)}, vector<string>{GI(0)}); } }; REGISTER_GRADIENT(MaxPoolWithIndex, GetMaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndexGradient); OPERATOR_SCHEMA(MaxPoolWithIndex) .NumInputs(1) .NumOutputs(2) .TensorInferenceFunction(ConvPoolOpBase<CPUContext>::TensorInferenceForPool) .SetDoc(R"DOC( MaxPoolWithIndex consumes an input blob X and applies max pooling across the blob according to kernel sizes, stride sizes and pad lengths defined by the ConvPoolOpBase operator. It also produces an explicit mask that defines the location that all maximum values were found, which is re-used in the gradient pass. This op is deterministic. )DOC") .Input( 0, "X", "Input data tensor from the previous operator; dimensions " "depend on whether the NCHW or NHWC operators are being used. For " "example, in the former, the input has size (N x C x H x W), where N is" " the batch size, C is the number of channels, and H and W are the " "height and the width of the data. The corresponding permutation of " "dimensions is used in the latter case. ") .Output( 0, "Y", "Output data tensor from average pooling across the input " "tensor. Dimensions will vary based on various kernel, stride, and pad " "sizes.") .Output( 1, "Index", "Mask of location indices of the found maximum values, " " used in the gradient operator to accumulate dY values to the " "appropriate locations in Y"); }; }; // namespace caffe2
forward.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "head.h" #define tpb 256 extern double *d_t; extern double *d_it; extern double *d_V; extern double *d_dV2; extern double *d_Vnew; extern double *d_m; extern double *d_h; extern double *d_jj; extern double *d_d; extern double *d_f; extern double *d_X; extern double *d_cai; /* extern double *d_m0; extern double *d_h0; extern double *d_jj0; extern double *d_d0; extern double *d_f0; extern double *d_X0; */ extern double *d_dVdt; //extern double *dcai; extern double *d_isi; extern double *d_D1V; extern double *d_D2V; //extern int ncount; __global__ void boundary(double *d_V){ int k = blockDim.x * blockIdx.x + threadIdx.x; if(k<nx){ d_V[(k+1)*(nx+2)] = d_V[(k+1)*(nx+2)+1]; d_V[(k+1)*(nx+2)+(nx+1)] = d_V[(k+1)*(nx+2)+nx]; d_V[k+1] = d_V[k+1+(nx+2)]; d_V[(ny+1)*(nx+2)+k+1] = d_V[ny*(nx+2)+k+1]; } } void bc(){ int bpg; //tpb = 256; bpg = (nx+tpb-1)/tpb; hipLaunchKernelGGL(( boundary), dim3(bpg), dim3(tpb), 0, 0, d_V); //hipDeviceSynchronize(); } __global__ void comp_dV2(double *d_V ,double *d_dV2){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); int id = k+(nx+2)+1+(2*i); d_dV2[k] = D*((d_V[id+1] + d_V[id-1] - 2*d_V[id]) / (dx*dx) + (d_V[id+(nx+2)] + d_V[id-(nx+2)] - 2*d_V[id])/(dy*dy)); } } void dV2(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dV2), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2); //hipDeviceSynchronize(); } __device__ void comp_it(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *d_isi, double *d_X, double *d_it, int I, int i, int k, double *d_t) { d_it[k] = 0.0; int id = k+nx+2+1+2*i; //comp_ina //double gna = 23; //double ena = ((R*temp) / frdy)*log(nao / nai); /* double am = 0.32*(d_V[k+nx+2+1+2*i] + 47.13) / (1 - exp(-0.1*(d_V[k+nx+2+1+2*i] + 47.13))); double bm = 0.08*exp(-d_V[k+nx+2+1+2*i] / 11); double ah, bh, aj ,bj; if (d_V[k+nx+2+1+2*i] < -40.0) { ah = 0.135*exp((80 + d_V[k+nx+2+1+2*i]) / -6.8); bh = 3.56*exp(0.079*d_V[k+nx+2+1+2*i]) + 310000 * exp(0.35*d_V[k+nx+2+1+2*i]); aj = (-127140 * exp(0.2444*d_V[k+nx+2+1+2*i]) - 0.00003474*exp(-0.04391*d_V[k+nx+2+1+2*i]))* ((d_V[k+nx+2+1+2*i] + 37.78)/(1 + exp(0.311*(d_V[k+nx+2+1+2*i] + 79.23)))); bj = (0.1212*exp(-0.01052*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1378*(d_V[k+nx+2+1+2*i] + 40.14))); } else { ah = 0; bh = 1 / (0.13*(1 + exp((d_V[k+nx+2+1+2*i] + 10.66) / -11.1))); aj = 0; bj = (0.3*exp(-0.0000002535*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1*(d_V[k+nx+2+1+2*i] + 32))); } double mtau = 1 / (am + bm); double htau = 1 / (ah + bh); double jtau = 1 / (aj + bj); double mss = am*mtau; double hss = ah*htau; double jss = aj*jtau; d_m0[k] = mss - (mss - d_m[k])*exp(-d_t[k] / mtau); d_h0[k] = hss - (hss - d_h[k])*exp(-d_t[k] / htau); d_jj0[k] = jss - (jss - d_jj[k])*exp(-d_t[k] / jtau); */ d_it[k] += gna*d_m[k] * d_m[k] * d_m[k] * d_h[k] * d_jj[k] * (d_V[id] - ena); //comp_ical __shared__ double esi[tpb]; //__shared__ double isi[tpb]; esi[I] = 7.7 - 13.0287*log(d_cai[k]); /* double ad = 50 * 0.095*exp(-0.01*(d_V[k+nx+2+1+2*i] - 5)) / (1 + exp(-0.072*(d_V[k+nx+2+1+2*i] - 5))); double bd = 50 * 0.07*exp(-0.017*(d_V[k+nx+2+1+2*i] + 44)) / (1 + exp(0.05*(d_V[k+nx+2+1+2*i] + 44))); double af = 50 * 0.012*exp(-0.008*(d_V[k+nx+2+1+2*i] + 28)) / (1 + exp(0.15*(d_V[k+nx+2+1+2*i] + 28))); double bf = 50 * 0.0065*exp(-0.02*(d_V[k+nx+2+1+2*i] + 30)) / (1 + exp(-0.2*(d_V[k+nx+2+1+2*i] + 30))); double taud = 1 / (ad + bd); double tauf = 1 / (af + bf); double dss = ad*taud; double fss = af*tauf; d_d0[k] = dss - (dss - d_d[k])*exp(-d_t[k] / taud); d_f0[k] = fss - (fss - d_f[k])*exp(-d_t[k] / tauf); */ d_isi[k] = 0.09*d_d[k] * d_f[k] * (d_V[id] - esi[I]); //dcai[k] = -0.0001*isi[I] + 0.07*(0.0001 - d_cai[k]); //d_cai[k] = d_cai[k] + dcai*dt; d_it[k] = d_it[k] + d_isi[k]; //comp_ik /* double gk = 0.282*sqrt(ko / 5.4); double ek = ((R*temp) / frdy)*log(ko / ki); //double prnak = 0.01833; //ek = ((R*temp) / frdy)*log((ko + prnak*nao) / (ki + prnak*nai)); double ax = 50 * 0.0005*exp(0.083*(d_V[k+nx+2+1+2*i] + 50)) / (1 + exp(0.057*(d_V[k+nx+2+1+2*i] + 50))); double bx = 50 * 0.0013*exp(-0.06*(d_V[k+nx+2+1+2*i] + 20)) / (1 + exp(-0.04*(d_V[k+nx+2+1+2*i] + 20))); double taux = 1 / (ax + bx); double xss = ax*taux; d_X0[k] = xss - (xss - d_X[k])*exp(-d_t[k] / taux); */ double Xi; if (d_V[id] > -100) { Xi = 2.837*(exp(0.04*(d_V[id] + 77)) - 1)/((d_V[id] + 77)*exp(0.04*(d_V[id] + 35))); } else { Xi = 1; } d_it[k] += gk*d_X[k] * Xi*(d_V[id] - ek); //comp_ik1 //double gk1 = 0.6047*(sqrt(ko / 5.4)); //double ek1 = ((R*temp) / frdy)*log(ko / ki); double ak1 = 1.02 / (1 + exp(0.2385*(d_V[id] - ek1 - 59.215))); double bk1 = (0.49124*exp(0.08032*(d_V[id] - ek1 + 5.476))+exp(0.06175*(d_V[id] - ek1 - 594.31)))/(1 + exp(-0.5143*(d_V[id] - ek1 + 4.753))); double K1ss = ak1 / (ak1 + bk1); d_it[k] += gk1*K1ss*(d_V[id] - ek1); //comp_ikp //double gkp = 0.0183; //double ekp = ((R*temp) / frdy)*log(ko / ki); double kp = 1 / (1 + exp((7.488 - d_V[id]) / 5.98)); d_it[k] += gkp*kp*(d_V[id] - ekp); //comp_ib d_it[k] += 0.03921*(d_V[id] + 59.87); } __global__ void comp_dVdt(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *d_isi, double *d_X, double *d_it, double *d_dVdt, double *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); d_t[k] = dt_max; comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, d_isi, d_X, d_it, I, i, k, d_t); d_dVdt[k] = -d_it[k]; } } void dVdt(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( comp_dVdt), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, d_isi, d_X, d_it, d_dVdt, d_t); } __global__ void plane_waves(double *d_dVdt){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<ny*5){ int i, j, id; i = (int)(k/5); j = k-i*5; id = i*nx+j; d_dVdt[id] = d_dVdt[id] + (-st); } } void stimu(){ int bpg; //int tpb; //tpb = 256; bpg = (ny*5+tpb-1)/tpb; hipLaunchKernelGGL(( plane_waves), dim3(bpg), dim3(tpb), 0, 0, d_dVdt); //hipDeviceSynchronize(); } /* Calculating the initial Ions Current*/ __device__ void comp_fluxes(double *d_cai, double *d_isi, int k, double *d_t){ d_cai[k] = d_cai[k] + (-0.0001*d_isi[k] + 0.07*(0.0001 - d_cai[k]))*d_t[k]; } /* update the gate value*/ __device__ void Rush_Larsen(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_X, int i, int k, double *d_t){ //Fast sodium current //gate variables can not be shared, should be local due to data racing !!!!!!!! int id = k+nx+2+1+2*i; //double bm = 0.08*exp(-d_V[k+nx+2+1+2*i] / 11); double am = 0.32*(d_V[id] + 47.13) / (1 - exp(-0.1*(d_V[id] + 47.13))); double bm = 0.08*exp(-d_V[id] / 11); double ah, bh, aj, bj; if (d_V[id] < -40.0) { ah = 0.135*exp((80 + d_V[id]) / -6.8); bh = 3.56*exp(0.079*d_V[id]) + 310000.0 * exp(0.35*d_V[id]); aj = (-127140 * exp(0.2444*d_V[id]) - 0.00003474*exp(-0.04391*d_V[id]))*((d_V[id] + 37.78) / (1.0 + exp(0.311*(d_V[id] + 79.23)))); bj = (0.1212*exp(-0.01052*d_V[id])) / (1.0 + exp(-0.1378*(d_V[id] + 40.14))); } else { ah = 0.0; bh = 1.0 / (0.13*(1.0 + exp((d_V[id] + 10.66) / -11.1))); aj = 0.0; bj = (0.3*exp(-0.0000002535*d_V[id])) / (1.0 + exp(-0.1*(d_V[id] + 32.0))); } double mtau = 1.0 / (am + bm); double htau = 1.0 / (ah + bh); double jtau = 1.0 / (aj + bj); double mss = am*mtau; double hss = ah*htau; double jss = aj*jtau; d_m[k] = mss - (mss - d_m[k])*exp(-d_t[k] / mtau); d_h[k] = hss - (hss - d_h[k])*exp(-d_t[k] / htau); d_jj[k] = jss - (jss - d_jj[k])*exp(-d_t[k] / jtau); //Slow inward current double ad = 50*0.095*exp(-0.01*(d_V[id] - 5)) / (1.0 + exp(-0.072*(d_V[id] - 5))); double bd = 50*0.07*exp(-0.017*(d_V[id] + 44)) / (1.0 + exp(0.05*(d_V[id] + 44))); double af = 50*0.012*exp(-0.008*(d_V[id] + 28)) / (1.0 + exp(0.15*(d_V[id] + 28))); double bf = 50*0.0065*exp(-0.02*(d_V[id] + 30)) / (1.0 + exp(-0.2*(d_V[id] + 30))); double taud = 1.0 / (ad + bd); double tauf = 1.0 / (af + bf); double dss = ad*taud; double fss = af*tauf; d_d[k] = dss - (dss - d_d[k])*exp(-d_t[k] / taud); d_f[k] = fss - (fss - d_f[k])*exp(-d_t[k] / tauf); //Time-dependent potassium current double ax = 50*0.0005*exp(0.083*(d_V[id] + 50)) / (1 + exp(0.057*(d_V[id] + 50))); double bx = 50*0.0013*exp(-0.06*(d_V[id] + 20)) / (1 + exp(-0.04*(d_V[id] + 20))); double taux = 1 / (ax + bx); double xss = ax*taux; d_X[k] = xss - (xss - d_X[k])*exp(-d_t[k] / taux); } __global__ void firsttime(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_X, double *d_cai, double *d_isi, double *d_dVdt, double *d_D1V, double *d_t){ // in order to get D1V[i][j], for computing D2V[i][j] in CCL(i, j, dt_max); /* The first time step*/ int k = threadIdx.x + blockIdx.x * blockDim.x; int i = (int)(k/nx); int id = k+nx+2+1+2*i; if(k<nx*ny){ d_D1V[k] = d_dVdt[k]; comp_fluxes(d_cai, d_isi, k, d_t); Rush_Larsen(d_V, d_m, d_h, d_jj, d_d, d_f, d_X, i, k, d_t); d_V[id] = d_V[id] + dt_max * d_dVdt[k]; } } void First(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( firsttime), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_X, d_cai, d_isi, d_dVdt, d_D1V, d_t); //hipDeviceSynchronize(); } __device__ void CCL(double *d_dVdt, double *d_D1V, double *d_D2V, int k, double *d_t){ /* double dt_range; dt_range = d_t[k]*2*(dt_univ > d_t[k]*2) + dt_univ*(dt_univ <= d_t[k]*2); d_D2V[k] = (d_dVdt[k] - d_D1V[k]) / d_t[k]; double DiscriminantP = 0.0, DiscriminantN = 0.0, dtz = 0.0; DiscriminantP = d_dVdt[k] * d_dVdt[k] + 2 * d_D2V[k] * Voffset*(d_dVdt[k] >= 0); DiscriminantN = d_dVdt[k] * d_dVdt[k] - 2 * d_D2V[k] * Voffset*(d_dVdt[k] < 0); dtz = -d_dVdt[k] / d_D2V[k]; d_t[k] = (-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]*(d_dVdt[k] >= 0)*(d_D2V[k]>0)+(-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]*(d_dVdt[k] >= 0)*(d_D2V[k]<0)*(DiscriminantP >= 0)+dtz*(d_dVdt[k] >= 0)*(d_D2V[k]<0)*(DiscriminantP < 0)+(-d_dVdt[k]+sqrt(DiscriminantN)) / d_D2V[k]*(d_dVdt[k] < 0)*(d_D2V[k]>0)*(DiscriminantN >= 0)+dtz*(d_dVdt[k] < 0)*(d_D2V[k]>0)*(DiscriminantN < 0)+(-d_dVdt[k] + sqrt(DiscriminantN)) / d_D2V[k]*(d_dVdt[k] < 0)*(d_D2V[k]<0); d_t[k] = d_t[k]*(d_t[k]<=dt_range && d_t[k]>=dt_min)+dt_range*(d_t[k]>dt_range)+dt_min*(d_t[k]<dt_min); */ double dt_range; if (dt_univ > d_t[k] * 2){ dt_range = d_t[k] * 2; } else{ dt_range = dt_univ; } d_D2V[k] = (d_dVdt[k] - d_D1V[k]) / d_t[k]; double DiscriminantP = 0, DiscriminantN = 0, dtz = 0; if (d_dVdt[k] >= 0){ DiscriminantP = d_dVdt[k] * d_dVdt[k] + 2 * d_D2V[k] * Voffset; if (d_D2V[k]>0){ d_t[k] = (-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]; } else if (d_D2V[k]<0){ dtz = -d_dVdt[k] / d_D2V[k]; if (DiscriminantP >= 0){ d_t[k] = (-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]; } else{ d_t[k] = dtz; } } } else{ DiscriminantN = d_dVdt[k] * d_dVdt[k] - 2 * d_D2V[k] * Voffset; if (d_D2V[k]>0){ dtz = -d_dVdt[k] / d_D2V[k]; if (DiscriminantN >= 0){ d_t[k] = (-d_dVdt[k] - sqrt(DiscriminantN)) / d_D2V[k]; } else{ d_t[k] = dtz; } } else if (d_D2V[k]<0){ d_t[k] = (-d_dVdt[k] - sqrt(DiscriminantN)) / d_D2V[k]; } } if (d_t[k]>dt_range){ d_t[k] = dt_range; } if (d_t[k]<dt_min){ d_t[k] = dt_min; } } __device__ void CCL_dtmax(double *d_dVdt, double *d_D1V, double *d_D2V, int k, double *d_t){ double dt_range; if (dt_univ > dt_max * 2){ dt_range = dt_max * 2; } else{ dt_range = dt_univ; } d_D2V[k] = (d_dVdt[k] - d_D1V[k]) / dt_max; double DiscriminantP = 0, DiscriminantN = 0, dtz = 0; if (d_dVdt[k] >= 0){ DiscriminantP = d_dVdt[k] * d_dVdt[k] + 2 * d_D2V[k] * Voffset; if (d_D2V[k]>0){ d_t[k] = (-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]; } else if (d_D2V[k]<0){ dtz = -d_dVdt[k] / d_D2V[k]; if (DiscriminantP >= 0){ d_t[k] = (-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]; } else{ d_t[k] = dtz; } } } else{ DiscriminantN = d_dVdt[k] * d_dVdt[k] - 2 * d_D2V[k] * Voffset; if (d_D2V[k]>0){ dtz = -d_dVdt[k] / d_D2V[k]; if (DiscriminantN >= 0){ d_t[k] = (-d_dVdt[k] - sqrt(DiscriminantN)) / d_D2V[k]; } else{ d_t[k] = dtz; } } else if (d_D2V[k]<0){ d_t[k] = (-d_dVdt[k] - sqrt(DiscriminantN)) / d_D2V[k]; } } if (d_t[k]>dt_range){ d_t[k] = dt_range; } if (d_t[k]<dt_min){ d_t[k] = dt_min; } } __global__ void ODE_CCL(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *d_isi, double *d_X, double *d_it, double *d_dVdt, double *d_D1V, double *d_D2V, double *d_t, int ncount){ int k = threadIdx.x + blockIdx.x * blockDim.x; int i = (int)(k/nx); int j = k - i*nx; int id = k+nx+2+1+2*i; int I = threadIdx.x; double dt_sum; if(k<nx*ny){ //***** adjust or correct time step---CCL method **/ /* dt_sum = 0.0; CCL_dtmax(d_dVdt, d_D1V, d_D2V, k, d_t); dt_sum = dt_sum + d_t[k]; while(dt_sum<dt_max){ d_D1V[k] = d_dVdt[k]; comp_fluxes(d_cai, d_isi, k, d_t); Rush_Larsen(d_V, d_m, d_h, d_jj, d_d, d_f, d_X, i, k, d_t); d_V[id] = d_V[id] + d_t[k] * d_dVdt[k] + d_t[k] * d_t[k] * d_D2V[k] / 2; comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, d_isi, d_X, d_it, I, i, k, d_t); d_dVdt[k] = -d_it[k] + (-st)*(ncount >= 1 && ncount <= stimtime && j >= 0 && j <= 4); CCL(d_dVdt, d_D1V, d_D2V, k, d_t); dt_sum = dt_sum + d_t[k]; } d_t[k] = dt_max - (dt_sum - d_t[k]);// here is a new dt !!! d_D1V[k] = d_dVdt[k]; comp_fluxes(d_cai, d_isi, k, d_t); Rush_Larsen(d_V, d_m, d_h, d_jj, d_d, d_f, d_X, i, k, d_t); d_V[id] = d_V[id] + d_t[k] * d_dVdt[k] + d_t[k] * d_t[k] * d_D2V[k] / 2; */ dt_sum = 0.0; CCL_dtmax(d_dVdt, d_D1V, d_D2V, k, d_t); do{ dt_sum = dt_sum + d_t[k]; if (dt_sum<dt_max){ d_D1V[k] = d_dVdt[k]; comp_fluxes(d_cai, d_isi, k, d_t); Rush_Larsen(d_V, d_m, d_h, d_jj, d_d, d_f, d_X, i, k, d_t); d_V[id] = d_V[id] + d_t[k] * d_dVdt[k] + d_t[k] * d_t[k] * d_D2V[k] / 2; }else{ d_t[k] = dt_max - (dt_sum - d_t[k]); d_D1V[k] = d_dVdt[k]; comp_fluxes(d_cai, d_isi, k, d_t); Rush_Larsen(d_V, d_m, d_h, d_jj, d_d, d_f, d_X, i, k, d_t); d_V[id] = d_V[id] + d_t[k] * d_dVdt[k] + d_t[k] * d_t[k] * d_D2V[k] / 2; break; } comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, d_isi, d_X, d_it, I, i, k, d_t); if(ncount >= 1 && ncount <= stimtime && j >= 0 && j <= 4){ d_dVdt[k] = -d_it[k] + (-st); }else{ d_dVdt[k] = -d_it[k]; } CCL(d_dVdt, d_D1V, d_D2V, k, d_t); } while (true); } } void ODE(int ncount){ int bpg; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( ODE_CCL), dim3(bpg), dim3(tpb), 0, 0, d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, d_isi, d_X, d_it, d_dVdt, d_D1V, d_D2V, d_t, ncount); //hipDeviceSynchronize(); } __global__ void Euler(double *d_V, double *d_dV2, double *d_Vnew){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*i] + dt_max/2 *d_dV2[k]; d_V[k+nx+2+1+2*i] = d_Vnew[k]; } } void Forward_Euler(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; hipLaunchKernelGGL(( Euler), dim3(bpg), dim3(tpb), 0, 0, d_V, d_dV2, d_Vnew); //hipDeviceSynchronize(); }
forward.cu
#include "head.h" #define tpb 256 extern double *d_t; extern double *d_it; extern double *d_V; extern double *d_dV2; extern double *d_Vnew; extern double *d_m; extern double *d_h; extern double *d_jj; extern double *d_d; extern double *d_f; extern double *d_X; extern double *d_cai; /* extern double *d_m0; extern double *d_h0; extern double *d_jj0; extern double *d_d0; extern double *d_f0; extern double *d_X0; */ extern double *d_dVdt; //extern double *dcai; extern double *d_isi; extern double *d_D1V; extern double *d_D2V; //extern int ncount; __global__ void boundary(double *d_V){ int k = blockDim.x * blockIdx.x + threadIdx.x; if(k<nx){ d_V[(k+1)*(nx+2)] = d_V[(k+1)*(nx+2)+1]; d_V[(k+1)*(nx+2)+(nx+1)] = d_V[(k+1)*(nx+2)+nx]; d_V[k+1] = d_V[k+1+(nx+2)]; d_V[(ny+1)*(nx+2)+k+1] = d_V[ny*(nx+2)+k+1]; } } void bc(){ int bpg; //tpb = 256; bpg = (nx+tpb-1)/tpb; boundary<<<bpg, tpb>>>(d_V); //cudaDeviceSynchronize(); } __global__ void comp_dV2(double *d_V ,double *d_dV2){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); int id = k+(nx+2)+1+(2*i); d_dV2[k] = D*((d_V[id+1] + d_V[id-1] - 2*d_V[id]) / (dx*dx) + (d_V[id+(nx+2)] + d_V[id-(nx+2)] - 2*d_V[id])/(dy*dy)); } } void dV2(){ int bpg; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; comp_dV2<<<bpg, tpb>>>(d_V, d_dV2); //cudaDeviceSynchronize(); } __device__ void comp_it(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *d_isi, double *d_X, double *d_it, int I, int i, int k, double *d_t) { d_it[k] = 0.0; int id = k+nx+2+1+2*i; //comp_ina //double gna = 23; //double ena = ((R*temp) / frdy)*log(nao / nai); /* double am = 0.32*(d_V[k+nx+2+1+2*i] + 47.13) / (1 - exp(-0.1*(d_V[k+nx+2+1+2*i] + 47.13))); double bm = 0.08*exp(-d_V[k+nx+2+1+2*i] / 11); double ah, bh, aj ,bj; if (d_V[k+nx+2+1+2*i] < -40.0) { ah = 0.135*exp((80 + d_V[k+nx+2+1+2*i]) / -6.8); bh = 3.56*exp(0.079*d_V[k+nx+2+1+2*i]) + 310000 * exp(0.35*d_V[k+nx+2+1+2*i]); aj = (-127140 * exp(0.2444*d_V[k+nx+2+1+2*i]) - 0.00003474*exp(-0.04391*d_V[k+nx+2+1+2*i]))* ((d_V[k+nx+2+1+2*i] + 37.78)/(1 + exp(0.311*(d_V[k+nx+2+1+2*i] + 79.23)))); bj = (0.1212*exp(-0.01052*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1378*(d_V[k+nx+2+1+2*i] + 40.14))); } else { ah = 0; bh = 1 / (0.13*(1 + exp((d_V[k+nx+2+1+2*i] + 10.66) / -11.1))); aj = 0; bj = (0.3*exp(-0.0000002535*d_V[k+nx+2+1+2*i])) / (1 + exp(-0.1*(d_V[k+nx+2+1+2*i] + 32))); } double mtau = 1 / (am + bm); double htau = 1 / (ah + bh); double jtau = 1 / (aj + bj); double mss = am*mtau; double hss = ah*htau; double jss = aj*jtau; d_m0[k] = mss - (mss - d_m[k])*exp(-d_t[k] / mtau); d_h0[k] = hss - (hss - d_h[k])*exp(-d_t[k] / htau); d_jj0[k] = jss - (jss - d_jj[k])*exp(-d_t[k] / jtau); */ d_it[k] += gna*d_m[k] * d_m[k] * d_m[k] * d_h[k] * d_jj[k] * (d_V[id] - ena); //comp_ical __shared__ double esi[tpb]; //__shared__ double isi[tpb]; esi[I] = 7.7 - 13.0287*log(d_cai[k]); /* double ad = 50 * 0.095*exp(-0.01*(d_V[k+nx+2+1+2*i] - 5)) / (1 + exp(-0.072*(d_V[k+nx+2+1+2*i] - 5))); double bd = 50 * 0.07*exp(-0.017*(d_V[k+nx+2+1+2*i] + 44)) / (1 + exp(0.05*(d_V[k+nx+2+1+2*i] + 44))); double af = 50 * 0.012*exp(-0.008*(d_V[k+nx+2+1+2*i] + 28)) / (1 + exp(0.15*(d_V[k+nx+2+1+2*i] + 28))); double bf = 50 * 0.0065*exp(-0.02*(d_V[k+nx+2+1+2*i] + 30)) / (1 + exp(-0.2*(d_V[k+nx+2+1+2*i] + 30))); double taud = 1 / (ad + bd); double tauf = 1 / (af + bf); double dss = ad*taud; double fss = af*tauf; d_d0[k] = dss - (dss - d_d[k])*exp(-d_t[k] / taud); d_f0[k] = fss - (fss - d_f[k])*exp(-d_t[k] / tauf); */ d_isi[k] = 0.09*d_d[k] * d_f[k] * (d_V[id] - esi[I]); //dcai[k] = -0.0001*isi[I] + 0.07*(0.0001 - d_cai[k]); //d_cai[k] = d_cai[k] + dcai*dt; d_it[k] = d_it[k] + d_isi[k]; //comp_ik /* double gk = 0.282*sqrt(ko / 5.4); double ek = ((R*temp) / frdy)*log(ko / ki); //double prnak = 0.01833; //ek = ((R*temp) / frdy)*log((ko + prnak*nao) / (ki + prnak*nai)); double ax = 50 * 0.0005*exp(0.083*(d_V[k+nx+2+1+2*i] + 50)) / (1 + exp(0.057*(d_V[k+nx+2+1+2*i] + 50))); double bx = 50 * 0.0013*exp(-0.06*(d_V[k+nx+2+1+2*i] + 20)) / (1 + exp(-0.04*(d_V[k+nx+2+1+2*i] + 20))); double taux = 1 / (ax + bx); double xss = ax*taux; d_X0[k] = xss - (xss - d_X[k])*exp(-d_t[k] / taux); */ double Xi; if (d_V[id] > -100) { Xi = 2.837*(exp(0.04*(d_V[id] + 77)) - 1)/((d_V[id] + 77)*exp(0.04*(d_V[id] + 35))); } else { Xi = 1; } d_it[k] += gk*d_X[k] * Xi*(d_V[id] - ek); //comp_ik1 //double gk1 = 0.6047*(sqrt(ko / 5.4)); //double ek1 = ((R*temp) / frdy)*log(ko / ki); double ak1 = 1.02 / (1 + exp(0.2385*(d_V[id] - ek1 - 59.215))); double bk1 = (0.49124*exp(0.08032*(d_V[id] - ek1 + 5.476))+exp(0.06175*(d_V[id] - ek1 - 594.31)))/(1 + exp(-0.5143*(d_V[id] - ek1 + 4.753))); double K1ss = ak1 / (ak1 + bk1); d_it[k] += gk1*K1ss*(d_V[id] - ek1); //comp_ikp //double gkp = 0.0183; //double ekp = ((R*temp) / frdy)*log(ko / ki); double kp = 1 / (1 + exp((7.488 - d_V[id]) / 5.98)); d_it[k] += gkp*kp*(d_V[id] - ekp); //comp_ib d_it[k] += 0.03921*(d_V[id] + 59.87); } __global__ void comp_dVdt(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *d_isi, double *d_X, double *d_it, double *d_dVdt, double *d_t){ int k = threadIdx.x + blockIdx.x * blockDim.x; int I = threadIdx.x; if(k<nx*ny){ int i = (int)(k/nx); d_t[k] = dt_max; comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, d_isi, d_X, d_it, I, i, k, d_t); d_dVdt[k] = -d_it[k]; } } void dVdt(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; comp_dVdt<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, d_isi, d_X, d_it, d_dVdt, d_t); } __global__ void plane_waves(double *d_dVdt){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<ny*5){ int i, j, id; i = (int)(k/5); j = k-i*5; id = i*nx+j; d_dVdt[id] = d_dVdt[id] + (-st); } } void stimu(){ int bpg; //int tpb; //tpb = 256; bpg = (ny*5+tpb-1)/tpb; plane_waves<<<bpg, tpb>>>(d_dVdt); //cudaDeviceSynchronize(); } /* Calculating the initial Ions Current*/ __device__ void comp_fluxes(double *d_cai, double *d_isi, int k, double *d_t){ d_cai[k] = d_cai[k] + (-0.0001*d_isi[k] + 0.07*(0.0001 - d_cai[k]))*d_t[k]; } /* update the gate value*/ __device__ void Rush_Larsen(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_X, int i, int k, double *d_t){ //Fast sodium current //gate variables can not be shared, should be local due to data racing !!!!!!!! int id = k+nx+2+1+2*i; //double bm = 0.08*exp(-d_V[k+nx+2+1+2*i] / 11); double am = 0.32*(d_V[id] + 47.13) / (1 - exp(-0.1*(d_V[id] + 47.13))); double bm = 0.08*exp(-d_V[id] / 11); double ah, bh, aj, bj; if (d_V[id] < -40.0) { ah = 0.135*exp((80 + d_V[id]) / -6.8); bh = 3.56*exp(0.079*d_V[id]) + 310000.0 * exp(0.35*d_V[id]); aj = (-127140 * exp(0.2444*d_V[id]) - 0.00003474*exp(-0.04391*d_V[id]))*((d_V[id] + 37.78) / (1.0 + exp(0.311*(d_V[id] + 79.23)))); bj = (0.1212*exp(-0.01052*d_V[id])) / (1.0 + exp(-0.1378*(d_V[id] + 40.14))); } else { ah = 0.0; bh = 1.0 / (0.13*(1.0 + exp((d_V[id] + 10.66) / -11.1))); aj = 0.0; bj = (0.3*exp(-0.0000002535*d_V[id])) / (1.0 + exp(-0.1*(d_V[id] + 32.0))); } double mtau = 1.0 / (am + bm); double htau = 1.0 / (ah + bh); double jtau = 1.0 / (aj + bj); double mss = am*mtau; double hss = ah*htau; double jss = aj*jtau; d_m[k] = mss - (mss - d_m[k])*exp(-d_t[k] / mtau); d_h[k] = hss - (hss - d_h[k])*exp(-d_t[k] / htau); d_jj[k] = jss - (jss - d_jj[k])*exp(-d_t[k] / jtau); //Slow inward current double ad = 50*0.095*exp(-0.01*(d_V[id] - 5)) / (1.0 + exp(-0.072*(d_V[id] - 5))); double bd = 50*0.07*exp(-0.017*(d_V[id] + 44)) / (1.0 + exp(0.05*(d_V[id] + 44))); double af = 50*0.012*exp(-0.008*(d_V[id] + 28)) / (1.0 + exp(0.15*(d_V[id] + 28))); double bf = 50*0.0065*exp(-0.02*(d_V[id] + 30)) / (1.0 + exp(-0.2*(d_V[id] + 30))); double taud = 1.0 / (ad + bd); double tauf = 1.0 / (af + bf); double dss = ad*taud; double fss = af*tauf; d_d[k] = dss - (dss - d_d[k])*exp(-d_t[k] / taud); d_f[k] = fss - (fss - d_f[k])*exp(-d_t[k] / tauf); //Time-dependent potassium current double ax = 50*0.0005*exp(0.083*(d_V[id] + 50)) / (1 + exp(0.057*(d_V[id] + 50))); double bx = 50*0.0013*exp(-0.06*(d_V[id] + 20)) / (1 + exp(-0.04*(d_V[id] + 20))); double taux = 1 / (ax + bx); double xss = ax*taux; d_X[k] = xss - (xss - d_X[k])*exp(-d_t[k] / taux); } __global__ void firsttime(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_X, double *d_cai, double *d_isi, double *d_dVdt, double *d_D1V, double *d_t){ // in order to get D1V[i][j], for computing D2V[i][j] in CCL(i, j, dt_max); /* The first time step*/ int k = threadIdx.x + blockIdx.x * blockDim.x; int i = (int)(k/nx); int id = k+nx+2+1+2*i; if(k<nx*ny){ d_D1V[k] = d_dVdt[k]; comp_fluxes(d_cai, d_isi, k, d_t); Rush_Larsen(d_V, d_m, d_h, d_jj, d_d, d_f, d_X, i, k, d_t); d_V[id] = d_V[id] + dt_max * d_dVdt[k]; } } void First(){ int bpg; bpg = (nx*ny+tpb-1)/tpb; firsttime<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_X, d_cai, d_isi, d_dVdt, d_D1V, d_t); //cudaDeviceSynchronize(); } __device__ void CCL(double *d_dVdt, double *d_D1V, double *d_D2V, int k, double *d_t){ /* double dt_range; dt_range = d_t[k]*2*(dt_univ > d_t[k]*2) + dt_univ*(dt_univ <= d_t[k]*2); d_D2V[k] = (d_dVdt[k] - d_D1V[k]) / d_t[k]; double DiscriminantP = 0.0, DiscriminantN = 0.0, dtz = 0.0; DiscriminantP = d_dVdt[k] * d_dVdt[k] + 2 * d_D2V[k] * Voffset*(d_dVdt[k] >= 0); DiscriminantN = d_dVdt[k] * d_dVdt[k] - 2 * d_D2V[k] * Voffset*(d_dVdt[k] < 0); dtz = -d_dVdt[k] / d_D2V[k]; d_t[k] = (-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]*(d_dVdt[k] >= 0)*(d_D2V[k]>0)+(-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]*(d_dVdt[k] >= 0)*(d_D2V[k]<0)*(DiscriminantP >= 0)+dtz*(d_dVdt[k] >= 0)*(d_D2V[k]<0)*(DiscriminantP < 0)+(-d_dVdt[k]+sqrt(DiscriminantN)) / d_D2V[k]*(d_dVdt[k] < 0)*(d_D2V[k]>0)*(DiscriminantN >= 0)+dtz*(d_dVdt[k] < 0)*(d_D2V[k]>0)*(DiscriminantN < 0)+(-d_dVdt[k] + sqrt(DiscriminantN)) / d_D2V[k]*(d_dVdt[k] < 0)*(d_D2V[k]<0); d_t[k] = d_t[k]*(d_t[k]<=dt_range && d_t[k]>=dt_min)+dt_range*(d_t[k]>dt_range)+dt_min*(d_t[k]<dt_min); */ double dt_range; if (dt_univ > d_t[k] * 2){ dt_range = d_t[k] * 2; } else{ dt_range = dt_univ; } d_D2V[k] = (d_dVdt[k] - d_D1V[k]) / d_t[k]; double DiscriminantP = 0, DiscriminantN = 0, dtz = 0; if (d_dVdt[k] >= 0){ DiscriminantP = d_dVdt[k] * d_dVdt[k] + 2 * d_D2V[k] * Voffset; if (d_D2V[k]>0){ d_t[k] = (-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]; } else if (d_D2V[k]<0){ dtz = -d_dVdt[k] / d_D2V[k]; if (DiscriminantP >= 0){ d_t[k] = (-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]; } else{ d_t[k] = dtz; } } } else{ DiscriminantN = d_dVdt[k] * d_dVdt[k] - 2 * d_D2V[k] * Voffset; if (d_D2V[k]>0){ dtz = -d_dVdt[k] / d_D2V[k]; if (DiscriminantN >= 0){ d_t[k] = (-d_dVdt[k] - sqrt(DiscriminantN)) / d_D2V[k]; } else{ d_t[k] = dtz; } } else if (d_D2V[k]<0){ d_t[k] = (-d_dVdt[k] - sqrt(DiscriminantN)) / d_D2V[k]; } } if (d_t[k]>dt_range){ d_t[k] = dt_range; } if (d_t[k]<dt_min){ d_t[k] = dt_min; } } __device__ void CCL_dtmax(double *d_dVdt, double *d_D1V, double *d_D2V, int k, double *d_t){ double dt_range; if (dt_univ > dt_max * 2){ dt_range = dt_max * 2; } else{ dt_range = dt_univ; } d_D2V[k] = (d_dVdt[k] - d_D1V[k]) / dt_max; double DiscriminantP = 0, DiscriminantN = 0, dtz = 0; if (d_dVdt[k] >= 0){ DiscriminantP = d_dVdt[k] * d_dVdt[k] + 2 * d_D2V[k] * Voffset; if (d_D2V[k]>0){ d_t[k] = (-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]; } else if (d_D2V[k]<0){ dtz = -d_dVdt[k] / d_D2V[k]; if (DiscriminantP >= 0){ d_t[k] = (-d_dVdt[k] + sqrt(DiscriminantP)) / d_D2V[k]; } else{ d_t[k] = dtz; } } } else{ DiscriminantN = d_dVdt[k] * d_dVdt[k] - 2 * d_D2V[k] * Voffset; if (d_D2V[k]>0){ dtz = -d_dVdt[k] / d_D2V[k]; if (DiscriminantN >= 0){ d_t[k] = (-d_dVdt[k] - sqrt(DiscriminantN)) / d_D2V[k]; } else{ d_t[k] = dtz; } } else if (d_D2V[k]<0){ d_t[k] = (-d_dVdt[k] - sqrt(DiscriminantN)) / d_D2V[k]; } } if (d_t[k]>dt_range){ d_t[k] = dt_range; } if (d_t[k]<dt_min){ d_t[k] = dt_min; } } __global__ void ODE_CCL(double *d_V, double *d_m, double *d_h, double *d_jj, double *d_d, double *d_f, double *d_cai, double *d_isi, double *d_X, double *d_it, double *d_dVdt, double *d_D1V, double *d_D2V, double *d_t, int ncount){ int k = threadIdx.x + blockIdx.x * blockDim.x; int i = (int)(k/nx); int j = k - i*nx; int id = k+nx+2+1+2*i; int I = threadIdx.x; double dt_sum; if(k<nx*ny){ //***** adjust or correct time step---CCL method **/ /* dt_sum = 0.0; CCL_dtmax(d_dVdt, d_D1V, d_D2V, k, d_t); dt_sum = dt_sum + d_t[k]; while(dt_sum<dt_max){ d_D1V[k] = d_dVdt[k]; comp_fluxes(d_cai, d_isi, k, d_t); Rush_Larsen(d_V, d_m, d_h, d_jj, d_d, d_f, d_X, i, k, d_t); d_V[id] = d_V[id] + d_t[k] * d_dVdt[k] + d_t[k] * d_t[k] * d_D2V[k] / 2; comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, d_isi, d_X, d_it, I, i, k, d_t); d_dVdt[k] = -d_it[k] + (-st)*(ncount >= 1 && ncount <= stimtime && j >= 0 && j <= 4); CCL(d_dVdt, d_D1V, d_D2V, k, d_t); dt_sum = dt_sum + d_t[k]; } d_t[k] = dt_max - (dt_sum - d_t[k]);// here is a new dt !!! d_D1V[k] = d_dVdt[k]; comp_fluxes(d_cai, d_isi, k, d_t); Rush_Larsen(d_V, d_m, d_h, d_jj, d_d, d_f, d_X, i, k, d_t); d_V[id] = d_V[id] + d_t[k] * d_dVdt[k] + d_t[k] * d_t[k] * d_D2V[k] / 2; */ dt_sum = 0.0; CCL_dtmax(d_dVdt, d_D1V, d_D2V, k, d_t); do{ dt_sum = dt_sum + d_t[k]; if (dt_sum<dt_max){ d_D1V[k] = d_dVdt[k]; comp_fluxes(d_cai, d_isi, k, d_t); Rush_Larsen(d_V, d_m, d_h, d_jj, d_d, d_f, d_X, i, k, d_t); d_V[id] = d_V[id] + d_t[k] * d_dVdt[k] + d_t[k] * d_t[k] * d_D2V[k] / 2; }else{ d_t[k] = dt_max - (dt_sum - d_t[k]); d_D1V[k] = d_dVdt[k]; comp_fluxes(d_cai, d_isi, k, d_t); Rush_Larsen(d_V, d_m, d_h, d_jj, d_d, d_f, d_X, i, k, d_t); d_V[id] = d_V[id] + d_t[k] * d_dVdt[k] + d_t[k] * d_t[k] * d_D2V[k] / 2; break; } comp_it(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, d_isi, d_X, d_it, I, i, k, d_t); if(ncount >= 1 && ncount <= stimtime && j >= 0 && j <= 4){ d_dVdt[k] = -d_it[k] + (-st); }else{ d_dVdt[k] = -d_it[k]; } CCL(d_dVdt, d_D1V, d_D2V, k, d_t); } while (true); } } void ODE(int ncount){ int bpg; bpg = (nx*ny+tpb-1)/tpb; ODE_CCL<<<bpg, tpb>>>(d_V, d_m, d_h, d_jj, d_d, d_f, d_cai, d_isi, d_X, d_it, d_dVdt, d_D1V, d_D2V, d_t, ncount); //cudaDeviceSynchronize(); } __global__ void Euler(double *d_V, double *d_dV2, double *d_Vnew){ int k = threadIdx.x + blockIdx.x * blockDim.x; if(k<nx*ny){ int i = (int)(k/nx); d_Vnew[k] = d_V[k+nx+2+1+2*i] + dt_max/2 *d_dV2[k]; d_V[k+nx+2+1+2*i] = d_Vnew[k]; } } void Forward_Euler(){ int bpg; //int tpb; //tpb = 256; bpg = (nx*ny+tpb-1)/tpb; Euler<<<bpg, tpb>>>(d_V, d_dV2, d_Vnew); //cudaDeviceSynchronize(); }
3c4b6b7de088e20d3480df72aeccaaa694b0aa77.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/LenSoftMax.cu" #else #include "../common.h" void THNN_(LenSoftMax_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCIndexTensor *len) { THCUNN_assertSameGPU(state, 2, input, output); if ((input->nDimension != 2) && (len->nDimension != 1)) { THError("2D tensor expected for input, 1D tensor expected for len"); } input = THCTensor_(newContiguous)(state, input); THCTensor_(resizeAs)(state, output, input); THCTensor_(zero)(state, output); long batchSize = input->size[0], dim = input->size[1]; long blocksY = 1, blocksZ = 1; dim3 blocks(batchSize, blocksY, blocksZ); dim3 threads(LENSOFTMAX_THREADS); hipLaunchKernelGGL(( cunn_LenSoftMax_updateOutput_kernel<real, accreal, THCIndex_t>), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, output), THCTensor_(data)(state, input), batchSize, dim, THCIndexTensor_(data)(state, len) ); THCudaCheck(hipGetLastError()); THCTensor_(free)(state, input); } void THNN_(LenSoftMax_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *output, THCIndexTensor *len) { THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput); if ((gradInput->nDimension != 2) && (len->nDimension != 1)) { THError("2D tensor expected for input, 1D tensor expected for len"); } output = THCTensor_(newContiguous)(state, output); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCTensor_(resizeAs)(state, gradInput, output); THCTensor_(zero)(state, gradInput); long batchSize = gradInput->size[0], dim = gradInput->size[1]; long blocksY = 1, blocksZ = 1; dim3 blocks(batchSize, blocksY, blocksZ); dim3 threads(LENSOFTMAX_THREADS); hipLaunchKernelGGL(( cunn_LenSoftMax_updateGradInput_kernel<real, accreal, THCIndex_t>), dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, gradInput), THCTensor_(data)(state, output), THCTensor_(data)(state, gradOutput), batchSize, dim, THCIndexTensor_(data)(state, len) ); THCudaCheck(hipGetLastError()); THCTensor_(free)(state, gradOutput); THCTensor_(free)(state, output); } #endif
3c4b6b7de088e20d3480df72aeccaaa694b0aa77.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/LenSoftMax.cu" #else #include "../common.h" void THNN_(LenSoftMax_updateOutput)( THCState *state, THCTensor *input, THCTensor *output, THCIndexTensor *len) { THCUNN_assertSameGPU(state, 2, input, output); if ((input->nDimension != 2) && (len->nDimension != 1)) { THError("2D tensor expected for input, 1D tensor expected for len"); } input = THCTensor_(newContiguous)(state, input); THCTensor_(resizeAs)(state, output, input); THCTensor_(zero)(state, output); long batchSize = input->size[0], dim = input->size[1]; long blocksY = 1, blocksZ = 1; dim3 blocks(batchSize, blocksY, blocksZ); dim3 threads(LENSOFTMAX_THREADS); cunn_LenSoftMax_updateOutput_kernel<real, accreal, THCIndex_t><<<blocks, threads, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, output), THCTensor_(data)(state, input), batchSize, dim, THCIndexTensor_(data)(state, len) ); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, input); } void THNN_(LenSoftMax_updateGradInput)( THCState *state, THCTensor *input, THCTensor *gradOutput, THCTensor *gradInput, THCTensor *output, THCIndexTensor *len) { THCUNN_check_nElement(state, input, gradOutput); THCUNN_assertSameGPU(state, 3, output, gradOutput, gradInput); if ((gradInput->nDimension != 2) && (len->nDimension != 1)) { THError("2D tensor expected for input, 1D tensor expected for len"); } output = THCTensor_(newContiguous)(state, output); gradOutput = THCTensor_(newContiguous)(state, gradOutput); THCTensor_(resizeAs)(state, gradInput, output); THCTensor_(zero)(state, gradInput); long batchSize = gradInput->size[0], dim = gradInput->size[1]; long blocksY = 1, blocksZ = 1; dim3 blocks(batchSize, blocksY, blocksZ); dim3 threads(LENSOFTMAX_THREADS); cunn_LenSoftMax_updateGradInput_kernel<real, accreal, THCIndex_t><<<blocks, threads, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, gradInput), THCTensor_(data)(state, output), THCTensor_(data)(state, gradOutput), batchSize, dim, THCIndexTensor_(data)(state, len) ); THCudaCheck(cudaGetLastError()); THCTensor_(free)(state, gradOutput); THCTensor_(free)(state, output); } #endif
e9952547ac0a5818ce6f0221eacc5ce605907a5a.hip
// !!! This is a file automatically generated by hipify!!! #include "matrix_mul.cuh" #include "als.cuh" #include <algorithm> #include <string> #include <cstdio> #include <fstream> #include <sstream> #include <ostream> #include <vector> #include <omp.h> #include <sys/time.h> #include "hashes.hpp" #include "features_calculate.hpp" using namespace std; void ReplaceHashesInFile(std::string& file, std::string& out_file ); void print_help() { std::cout << "Matrix multiplication tools:" << std::endl; std::cout << "MatrixMul [--als] [--replace-hashes --reco <file with recomendations>] [--likes <files with likes>] --a_file <user matrix file> --a_size <size of user matrix> \ --b_file <item matrix file> \ --b_size <size of item matrix> \ --f_size <count features> [--out <output file>] [--csamples <count simples>] [--it <count iterations> --als-error <count samples for error>] [--count_gpus <count gpus>] \ " << std::endl; std::cout << " --als - calculate mtrix factorization by als" << std::endl; std::cout << " --likes - files with user items likes for als" << std::endl; std::cout << " --als-error - if specified error is calculated on each iteration. users:items" << std::endl; std::cout << " --replace-hashes - replace all urls in reco file to hashes" << std::endl; std::cout << " --reco - file with recomendations" << std::endl; std::cout << " --likes-format [0|1] - format of input file of likes" << std::endl; std::cout << " --als-alfa - alfa for als" << std::endl; std::cout << " --skip-likes-filter - when multiplicate matrixies the likes filter will be ignored" << std::endl; std::cout << " --euclidian_norm - when multiplicate matrixies the euclidian norm will be applied to each vector" << std::endl; std::cout << " --create-features - create features from input triples <user> <ref_group> <group>/ use --p_file as input file path, a_file and b_file as files describes of matrixies " << std::endl; } int main(int argc, char *argv[] ) { string a_file_name; // = "/home/d.soloviev/ml/ok/recipes-all-users.txt"; string b_file_name; // = "/home/d.soloviev/ml/ok/recipes-all-topics.txt"; string p_file_name; // = "/home/d.soloviev/ml/ok/cluster_item_user.txt"; string output_file_name; // = "out.txt"; string likes_file_name; string reco_file; int a_size = 0; // = 17829029; // int a_size = 1782902; // int a_size = 500; int b_size = 0; // = 5356624; // int b_size = 500; int features_size = 50; int block_size = 5000; int n = 100; int csimples = 0; int cit = 10; bool is_als=false; int samples_for_calc_error_users=0; int samples_for_calc_error_items=0; bool replace_hashes=false; int likes_format=0; float als_alfa = 5; bool skip_likes_filter = false; bool euclidian_normalize = false; bool create_features = false; int count_gpus = 0; hipGetDeviceCount(&count_gpus); for( int i=1; i < argc; i++) { std::string sarg = argv[i]; if( sarg == "--a_file") { i++; a_file_name = argv[i]; std::cerr << " A matrix: " << a_file_name << std::endl; }else if( sarg == "--a_size") { i++; a_size = atoi(argv[i]); std::cerr << " A matrix size: "<< a_size << std::endl; }else if( sarg == "--b_file") { i++; b_file_name = argv[i]; std::cerr << " B matrix: " << b_file_name << std::endl; } else if( sarg == "--b_size") { i++; b_size = atoi(argv[i]); std::cerr << " B matrix size: " << b_size << std::endl; }else if( sarg == "--p_file") { i++; p_file_name = argv[i]; std::cerr << " Cluster file: " << p_file_name << std::endl; } else if( sarg == "--f_size") { i++; features_size = atoi(argv[i]); std::cerr << " Count features: " << features_size << std::endl; }else if( sarg == "--likes") { i++; likes_file_name = argv[i]; } else if( sarg == "--csamples") { i++; csimples = atoi(argv[i]); } else if( sarg == "--skip-likes-filter") { skip_likes_filter = true; }else if( sarg == "--euclidian_norm") { euclidian_normalize = true; }else if( sarg == "--als") { is_als = true; } else if( sarg == "--als-error") { i++; std::string samples(argv[i]); size_t pos = samples.find(":"); if(pos == std::string::npos) samples_for_calc_error_users = samples_for_calc_error_items = atoi(argv[i]); else{ samples_for_calc_error_users = atoi(samples.substr(0,pos).c_str()); samples_for_calc_error_items = atoi(samples.substr(pos+1).c_str()); } }else if( sarg == "--it") { i++; cit = atoi(argv[i]); }else if( sarg == "--out") { i++; output_file_name = argv[i]; }else if( sarg == "--reco") { i++; reco_file = argv[i]; }else if( sarg == "--replace-hashes") { replace_hashes = true; }else if( sarg == "--likes-format") { i++; likes_format = atoi(argv[i]); }else if( sarg == "--als-alfa") { i++; als_alfa = atof(argv[i]); }else if( sarg == "--create-features") { create_features = true; }else if( sarg == "--count_gpus") { i++; count_gpus = min(count_gpus, atoi(argv[i])); } if( sarg == "--help") { print_help(); exit(0); } } if( create_features && a_file_name.length() != 0 && b_file_name.length() != 0 && p_file_name.length() != 0 && features_size != 0 ) { std::cerr << "Start features calculation." << std::endl; features_calculate fc(a_file_name, a_size, b_file_name, b_size, p_file_name, features_size); fc.compute_features(); std::cerr << "Done." << std::endl; std::ofstream fout(output_file_name.c_str()); std::ostream& out ((output_file_name.length() == 0)? std::cout : fout ); std::cerr << "Start features serialization." << std::endl; fc.serialize(out); fout.close(); std::cerr << "Done." << std::endl; return 0; }else if(create_features) { std::cerr << "Missing one or more input files or features_size" << std::endl; print_help(); exit(1); } if((!replace_hashes && !is_als && (a_file_name.length() == 0 || b_file_name.length() == 0 || (p_file_name.length() == 0 && !skip_likes_filter && !euclidian_normalize) || a_size == 0 || b_size == 0 || features_size == 0 )) || (is_als && features_size == 0 ) ) { std::cerr << "Missing one or more arguments" << std::endl; print_help(); exit(1); } // matrix_mul m(a_file_name, b_file_name, a_size, b_size, features_size); if(!is_als && !replace_hashes) { if (count_gpus == 2) { int num_gpus = 2; std::vector<int> a_sizes(num_gpus); a_sizes[0] = a_size / 2; a_sizes[1] = a_size - a_sizes[0]; std::vector<string> output_file_names(num_gpus); output_file_names[0] = output_file_name; output_file_names[1] = output_file_name + "_temp"; omp_set_num_threads(num_gpus); #pragma omp parallel { unsigned int cpu_thread_id = omp_get_thread_num(); unsigned int num_cpu_threads = omp_get_num_threads(); // set and check the CUDA device for this CPU thread int gpu_id = -1; hipSetDevice(cpu_thread_id); hipGetDevice(&gpu_id); std::cerr << "CPU thread " << cpu_thread_id << " (of " << num_cpu_threads << ") uses CUDA device " << gpu_id << std::endl; std::cerr << "skip_likes_filter: " << skip_likes_filter << std::endl; std::cerr << "euclidian_normalize: " << euclidian_normalize << std::endl; int skip_lines = cpu_thread_id * a_sizes[0]; std::auto_ptr<matrix_mul> m; if( !skip_likes_filter && !euclidian_normalize) { m.reset(new part_matrix_mul(a_file_name, b_file_name, p_file_name, a_sizes[cpu_thread_id], b_size, features_size, skip_lines)); } else if (!euclidian_normalize) { m.reset(new matrix_mul (a_file_name, b_file_name, a_sizes[cpu_thread_id], b_size, features_size, skip_lines));} else { m.reset(new euclidian_norm_matrix_mul (a_file_name, b_file_name, a_sizes[cpu_thread_id], b_size, features_size, skip_lines));} std::ofstream fout(output_file_names[cpu_thread_id].c_str()); std::ostream& out ((output_file_names[cpu_thread_id].length() == 0)? std::cout : fout ); m->calculate(out, n, block_size); fout.close(); } std::ofstream part_1(output_file_names[0].c_str(), std::ios_base::binary | std::ios_base::app); std::ifstream part_2(output_file_names[1].c_str(), std::ios_base::binary); part_1.seekp(0, std::ios_base::end); part_1 << part_2.rdbuf(); part_1.close(); part_2.close(); remove(output_file_names[1].c_str()); } else { std::cerr << "skip_likes_filter: " << skip_likes_filter << std::endl; std::cerr << "euclidian_normalize: " << euclidian_normalize << std::endl; std::auto_ptr<matrix_mul> m; if( !skip_likes_filter && !euclidian_normalize) { m.reset(new part_matrix_mul(a_file_name, b_file_name, p_file_name, a_size, b_size, features_size)); } else if (!euclidian_normalize) { m.reset(new matrix_mul (a_file_name, b_file_name, a_size, b_size, features_size));} else { m.reset(new euclidian_norm_matrix_mul (a_file_name, b_file_name, a_size, b_size, features_size));} std::ofstream fout(output_file_name.c_str()); std::ostream& out ((output_file_name.length() == 0)? std::cout : fout ); m->calculate(out, n, block_size); fout.close(); } }else if(is_als) { std::ifstream f_stream(likes_file_name.c_str() ); std::istream& in ( (likes_file_name.length() == 0) ? std::cin : f_stream); std::cerr << " Count ALS iteration " << cit << std::endl; std::cerr << " Start Matrix Factorization - ALS " << std::endl; std::cerr << " Input file format - " << likes_format << std::endl; std::cerr << " ALS alfa - " << als_alfa << std::endl; std::cerr << " ALS count gpus - " << count_gpus << std::endl; //30 als als_alg(in, features_size, als_alfa, 30, csimples, samples_for_calc_error_users, samples_for_calc_error_items, likes_format, count_gpus); /// struct timeval t1; struct timeval t2; hipDeviceSynchronize(); gettimeofday(&t1, NULL); als_alg.calculate(cit); hipDeviceSynchronize(); gettimeofday(&t2, NULL); std::cout << "als calc time: " << t2.tv_sec - t1.tv_sec << std::endl; omp_set_num_threads(4); #pragma omp parallel { int thread_id = omp_get_thread_num(); if (thread_id == 0) { std::ofstream fout_users((output_file_name+".ufea").c_str()); als_alg.serialize_users(fout_users); fout_users.close(); } else if (thread_id == 1) { std::ofstream fout_items((output_file_name+".ifea").c_str()); als_alg.serialize_items(fout_items); fout_items.close(); } else if (thread_id == 2) { std::ofstream fout_umap((output_file_name+".umap").c_str()); als_alg.serialize_users_map(fout_umap); fout_umap.close(); } else if (thread_id == 3) { std::ofstream fout_imap((output_file_name+".imap").c_str()); als_alg.serialize_items_map(fout_imap); fout_imap.close(); } } }else if(replace_hashes) { /// /// /// ReplaceHashesInFile(reco_file, output_file_name); } return 0; } void ReplaceHashesInFile(std::string& file, std::string& out_file ) { std::ifstream in_f(file.c_str()); std::ofstream o_f(out_file.c_str()); std::string line; char const tab_delim = '\t'; std::istream& in(in_f.good()?in_f:std::cin); std::ostream& out(o_f.good()?o_f:std::cout); while(std::getline(in, line)) { std::istringstream line_stream(line); std::string value; int i=0; while(getline(line_stream, value, tab_delim)) { if( i== 0) { out << value; }else { std::transform(value.begin(), value.end(), value.begin(), ::toupper); out << "\t" << hash_64(value.begin(), value.end()); } i++; } out << std::endl; } }
e9952547ac0a5818ce6f0221eacc5ce605907a5a.cu
#include "matrix_mul.cuh" #include "als.cuh" #include <algorithm> #include <string> #include <cstdio> #include <fstream> #include <sstream> #include <ostream> #include <vector> #include <omp.h> #include <sys/time.h> #include "hashes.hpp" #include "features_calculate.hpp" using namespace std; void ReplaceHashesInFile(std::string& file, std::string& out_file ); void print_help() { std::cout << "Matrix multiplication tools:" << std::endl; std::cout << "MatrixMul [--als] [--replace-hashes --reco <file with recomendations>] [--likes <files with likes>] --a_file <user matrix file> --a_size <size of user matrix> \ --b_file <item matrix file> \ --b_size <size of item matrix> \ --f_size <count features> [--out <output file>] [--csamples <count simples>] [--it <count iterations> --als-error <count samples for error>] [--count_gpus <count gpus>] \ " << std::endl; std::cout << " --als - calculate mtrix factorization by als" << std::endl; std::cout << " --likes - files with user items likes for als" << std::endl; std::cout << " --als-error - if specified error is calculated on each iteration. users:items" << std::endl; std::cout << " --replace-hashes - replace all urls in reco file to hashes" << std::endl; std::cout << " --reco - file with recomendations" << std::endl; std::cout << " --likes-format [0|1] - format of input file of likes" << std::endl; std::cout << " --als-alfa - alfa for als" << std::endl; std::cout << " --skip-likes-filter - when multiplicate matrixies the likes filter will be ignored" << std::endl; std::cout << " --euclidian_norm - when multiplicate matrixies the euclidian norm will be applied to each vector" << std::endl; std::cout << " --create-features - create features from input triples <user> <ref_group> <group>/ use --p_file as input file path, a_file and b_file as files describes of matrixies " << std::endl; } int main(int argc, char *argv[] ) { string a_file_name; // = "/home/d.soloviev/ml/ok/recipes-all-users.txt"; string b_file_name; // = "/home/d.soloviev/ml/ok/recipes-all-topics.txt"; string p_file_name; // = "/home/d.soloviev/ml/ok/cluster_item_user.txt"; string output_file_name; // = "out.txt"; string likes_file_name; string reco_file; int a_size = 0; // = 17829029; // int a_size = 1782902; // int a_size = 500; int b_size = 0; // = 5356624; // int b_size = 500; int features_size = 50; int block_size = 5000; int n = 100; int csimples = 0; int cit = 10; bool is_als=false; int samples_for_calc_error_users=0; int samples_for_calc_error_items=0; bool replace_hashes=false; int likes_format=0; float als_alfa = 5; bool skip_likes_filter = false; bool euclidian_normalize = false; bool create_features = false; int count_gpus = 0; cudaGetDeviceCount(&count_gpus); for( int i=1; i < argc; i++) { std::string sarg = argv[i]; if( sarg == "--a_file") { i++; a_file_name = argv[i]; std::cerr << " A matrix: " << a_file_name << std::endl; }else if( sarg == "--a_size") { i++; a_size = atoi(argv[i]); std::cerr << " A matrix size: "<< a_size << std::endl; }else if( sarg == "--b_file") { i++; b_file_name = argv[i]; std::cerr << " B matrix: " << b_file_name << std::endl; } else if( sarg == "--b_size") { i++; b_size = atoi(argv[i]); std::cerr << " B matrix size: " << b_size << std::endl; }else if( sarg == "--p_file") { i++; p_file_name = argv[i]; std::cerr << " Cluster file: " << p_file_name << std::endl; } else if( sarg == "--f_size") { i++; features_size = atoi(argv[i]); std::cerr << " Count features: " << features_size << std::endl; }else if( sarg == "--likes") { i++; likes_file_name = argv[i]; } else if( sarg == "--csamples") { i++; csimples = atoi(argv[i]); } else if( sarg == "--skip-likes-filter") { skip_likes_filter = true; }else if( sarg == "--euclidian_norm") { euclidian_normalize = true; }else if( sarg == "--als") { is_als = true; } else if( sarg == "--als-error") { i++; std::string samples(argv[i]); size_t pos = samples.find(":"); if(pos == std::string::npos) samples_for_calc_error_users = samples_for_calc_error_items = atoi(argv[i]); else{ samples_for_calc_error_users = atoi(samples.substr(0,pos).c_str()); samples_for_calc_error_items = atoi(samples.substr(pos+1).c_str()); } }else if( sarg == "--it") { i++; cit = atoi(argv[i]); }else if( sarg == "--out") { i++; output_file_name = argv[i]; }else if( sarg == "--reco") { i++; reco_file = argv[i]; }else if( sarg == "--replace-hashes") { replace_hashes = true; }else if( sarg == "--likes-format") { i++; likes_format = atoi(argv[i]); }else if( sarg == "--als-alfa") { i++; als_alfa = atof(argv[i]); }else if( sarg == "--create-features") { create_features = true; }else if( sarg == "--count_gpus") { i++; count_gpus = min(count_gpus, atoi(argv[i])); } if( sarg == "--help") { print_help(); exit(0); } } if( create_features && a_file_name.length() != 0 && b_file_name.length() != 0 && p_file_name.length() != 0 && features_size != 0 ) { std::cerr << "Start features calculation." << std::endl; features_calculate fc(a_file_name, a_size, b_file_name, b_size, p_file_name, features_size); fc.compute_features(); std::cerr << "Done." << std::endl; std::ofstream fout(output_file_name.c_str()); std::ostream& out ((output_file_name.length() == 0)? std::cout : fout ); std::cerr << "Start features serialization." << std::endl; fc.serialize(out); fout.close(); std::cerr << "Done." << std::endl; return 0; }else if(create_features) { std::cerr << "Missing one or more input files or features_size" << std::endl; print_help(); exit(1); } if((!replace_hashes && !is_als && (a_file_name.length() == 0 || b_file_name.length() == 0 || (p_file_name.length() == 0 && !skip_likes_filter && !euclidian_normalize) || a_size == 0 || b_size == 0 || features_size == 0 )) || (is_als && features_size == 0 ) ) { std::cerr << "Missing one or more arguments" << std::endl; print_help(); exit(1); } // matrix_mul m(a_file_name, b_file_name, a_size, b_size, features_size); if(!is_als && !replace_hashes) { if (count_gpus == 2) { int num_gpus = 2; std::vector<int> a_sizes(num_gpus); a_sizes[0] = a_size / 2; a_sizes[1] = a_size - a_sizes[0]; std::vector<string> output_file_names(num_gpus); output_file_names[0] = output_file_name; output_file_names[1] = output_file_name + "_temp"; omp_set_num_threads(num_gpus); #pragma omp parallel { unsigned int cpu_thread_id = omp_get_thread_num(); unsigned int num_cpu_threads = omp_get_num_threads(); // set and check the CUDA device for this CPU thread int gpu_id = -1; cudaSetDevice(cpu_thread_id); cudaGetDevice(&gpu_id); std::cerr << "CPU thread " << cpu_thread_id << " (of " << num_cpu_threads << ") uses CUDA device " << gpu_id << std::endl; std::cerr << "skip_likes_filter: " << skip_likes_filter << std::endl; std::cerr << "euclidian_normalize: " << euclidian_normalize << std::endl; int skip_lines = cpu_thread_id * a_sizes[0]; std::auto_ptr<matrix_mul> m; if( !skip_likes_filter && !euclidian_normalize) { m.reset(new part_matrix_mul(a_file_name, b_file_name, p_file_name, a_sizes[cpu_thread_id], b_size, features_size, skip_lines)); } else if (!euclidian_normalize) { m.reset(new matrix_mul (a_file_name, b_file_name, a_sizes[cpu_thread_id], b_size, features_size, skip_lines));} else { m.reset(new euclidian_norm_matrix_mul (a_file_name, b_file_name, a_sizes[cpu_thread_id], b_size, features_size, skip_lines));} std::ofstream fout(output_file_names[cpu_thread_id].c_str()); std::ostream& out ((output_file_names[cpu_thread_id].length() == 0)? std::cout : fout ); m->calculate(out, n, block_size); fout.close(); } std::ofstream part_1(output_file_names[0].c_str(), std::ios_base::binary | std::ios_base::app); std::ifstream part_2(output_file_names[1].c_str(), std::ios_base::binary); part_1.seekp(0, std::ios_base::end); part_1 << part_2.rdbuf(); part_1.close(); part_2.close(); remove(output_file_names[1].c_str()); } else { std::cerr << "skip_likes_filter: " << skip_likes_filter << std::endl; std::cerr << "euclidian_normalize: " << euclidian_normalize << std::endl; std::auto_ptr<matrix_mul> m; if( !skip_likes_filter && !euclidian_normalize) { m.reset(new part_matrix_mul(a_file_name, b_file_name, p_file_name, a_size, b_size, features_size)); } else if (!euclidian_normalize) { m.reset(new matrix_mul (a_file_name, b_file_name, a_size, b_size, features_size));} else { m.reset(new euclidian_norm_matrix_mul (a_file_name, b_file_name, a_size, b_size, features_size));} std::ofstream fout(output_file_name.c_str()); std::ostream& out ((output_file_name.length() == 0)? std::cout : fout ); m->calculate(out, n, block_size); fout.close(); } }else if(is_als) { std::ifstream f_stream(likes_file_name.c_str() ); std::istream& in ( (likes_file_name.length() == 0) ? std::cin : f_stream); std::cerr << " Count ALS iteration " << cit << std::endl; std::cerr << " Start Matrix Factorization - ALS " << std::endl; std::cerr << " Input file format - " << likes_format << std::endl; std::cerr << " ALS alfa - " << als_alfa << std::endl; std::cerr << " ALS count gpus - " << count_gpus << std::endl; //30 als als_alg(in, features_size, als_alfa, 30, csimples, samples_for_calc_error_users, samples_for_calc_error_items, likes_format, count_gpus); /// struct timeval t1; struct timeval t2; cudaDeviceSynchronize(); gettimeofday(&t1, NULL); als_alg.calculate(cit); cudaDeviceSynchronize(); gettimeofday(&t2, NULL); std::cout << "als calc time: " << t2.tv_sec - t1.tv_sec << std::endl; omp_set_num_threads(4); #pragma omp parallel { int thread_id = omp_get_thread_num(); if (thread_id == 0) { std::ofstream fout_users((output_file_name+".ufea").c_str()); als_alg.serialize_users(fout_users); fout_users.close(); } else if (thread_id == 1) { std::ofstream fout_items((output_file_name+".ifea").c_str()); als_alg.serialize_items(fout_items); fout_items.close(); } else if (thread_id == 2) { std::ofstream fout_umap((output_file_name+".umap").c_str()); als_alg.serialize_users_map(fout_umap); fout_umap.close(); } else if (thread_id == 3) { std::ofstream fout_imap((output_file_name+".imap").c_str()); als_alg.serialize_items_map(fout_imap); fout_imap.close(); } } }else if(replace_hashes) { /// /// /// ReplaceHashesInFile(reco_file, output_file_name); } return 0; } void ReplaceHashesInFile(std::string& file, std::string& out_file ) { std::ifstream in_f(file.c_str()); std::ofstream o_f(out_file.c_str()); std::string line; char const tab_delim = '\t'; std::istream& in(in_f.good()?in_f:std::cin); std::ostream& out(o_f.good()?o_f:std::cout); while(std::getline(in, line)) { std::istringstream line_stream(line); std::string value; int i=0; while(getline(line_stream, value, tab_delim)) { if( i== 0) { out << value; }else { std::transform(value.begin(), value.end(), value.begin(), ::toupper); out << "\t" << hash_64(value.begin(), value.end()); } i++; } out << std::endl; } }
064704294eb7fe0bf3a75ae154e8e783d3bb7bf7.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" #include "matrix.h" // CUDA #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include "rocblas.h" #include "cudaKernels.h" #include "cudaCommon.h" void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // At least 2 arguments expected // Input and result if (nrhs!=4) mexErrMsgTxt("Call form is accumulateBterm(source term, destination term, coefficient, preciditon term)"); /* mex parameters are: 0 Source term (that this applies the B operator to) 1 Destination term (that this stores the result in) 2 Precondition coefficient (one double) 3 Precondition term (Accumulates successive scaled B operations */ // Get GPU array pointers ArrayMetadata amd; double **srcdst = getGPUSourcePointers(prhs, &amd, 0, 1); double **accum = getGPUSourcePointers(prhs, &amd, 2, 2); // Get some control variables sorted out int *dims = amd.dim; dim3 gridsize; gridsize.x = dims[0]/EDGEDIM_BOP; gridsize.y = dims[1]/EDGEDIM_BOP; gridsize.z = 1; if(gridsize.x * EDGEDIM_BOP < dims[0]) gridsize.x++; if(gridsize.y * EDGEDIM_BOP < dims[1]) gridsize.y++; dim3 blocksize; blocksize.x = blocksize.y = EDGEDIM_BOP+2; blocksize.z = 1; int nx = dims[0]; int ny = dims[1]; int nz = dims[2]; hipLaunchKernelGGL(( Laplacian_B_OperatorKernel), dim3(gridsize), dim3(blocksize), 0, 0, srcdst[0], srcdst[1], *mxGetPr(prhs[2]), accum[0], nx, ny, nz, 4); }
064704294eb7fe0bf3a75ae154e8e783d3bb7bf7.cu
#include <stdio.h> #include <string.h> #include <stdarg.h> #ifdef UNIX #include <stdint.h> #include <unistd.h> #endif #include "mex.h" #include "matrix.h" // CUDA #include "cuda.h" #include "cuda_runtime.h" #include "cublas.h" #include "cudaKernels.h" #include "cudaCommon.h" void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { // At least 2 arguments expected // Input and result if (nrhs!=4) mexErrMsgTxt("Call form is accumulateBterm(source term, destination term, coefficient, preciditon term)"); /* mex parameters are: 0 Source term (that this applies the B operator to) 1 Destination term (that this stores the result in) 2 Precondition coefficient (one double) 3 Precondition term (Accumulates successive scaled B operations */ // Get GPU array pointers ArrayMetadata amd; double **srcdst = getGPUSourcePointers(prhs, &amd, 0, 1); double **accum = getGPUSourcePointers(prhs, &amd, 2, 2); // Get some control variables sorted out int *dims = amd.dim; dim3 gridsize; gridsize.x = dims[0]/EDGEDIM_BOP; gridsize.y = dims[1]/EDGEDIM_BOP; gridsize.z = 1; if(gridsize.x * EDGEDIM_BOP < dims[0]) gridsize.x++; if(gridsize.y * EDGEDIM_BOP < dims[1]) gridsize.y++; dim3 blocksize; blocksize.x = blocksize.y = EDGEDIM_BOP+2; blocksize.z = 1; int nx = dims[0]; int ny = dims[1]; int nz = dims[2]; Laplacian_B_OperatorKernel<<<gridsize, blocksize>>>( srcdst[0], srcdst[1], *mxGetPr(prhs[2]), accum[0], nx, ny, nz, 4); }
faafb9dfd585032f2cd84f386bc4adbe67ccaaf6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset size_t idx_x = threadIdx.x + blockIdx.x*blockDim.x; size_t idx_y = threadIdx.y + blockIdx.y*blockDim.y; if (idx_x >= numRows || idx_y >= numCols) return; //it can happen on the "remainder" block printf("rows: %d/%d -- cols: %d/%d \n", idx_x, numRows, idx_y, numCols); size_t idxvec = idx_x*numCols + idx_y; uchar4 rgb_value = rgbaImage[idxvec]; greyImage[idxvec] = (unsigned char)(.299f*rgb_value.x + .587f*rgb_value.y + .114f*rgb_value.z); } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const int blockWidth = 32; const dim3 blockSize(blockWidth,blockWidth, 1); unsigned int numBlocksX = (unsigned int)(numRows / blockWidth + 1); unsigned int numBlocksY = (unsigned int)(numCols / blockWidth + 1); const dim3 gridSize(numBlocksX,numBlocksY, 1); printf(" x: %d, y: %d \n", numBlocksX, numBlocksY); hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
faafb9dfd585032f2cd84f386bc4adbe67ccaaf6.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset size_t idx_x = threadIdx.x + blockIdx.x*blockDim.x; size_t idx_y = threadIdx.y + blockIdx.y*blockDim.y; if (idx_x >= numRows || idx_y >= numCols) return; //it can happen on the "remainder" block printf("rows: %d/%d -- cols: %d/%d \n", idx_x, numRows, idx_y, numCols); size_t idxvec = idx_x*numCols + idx_y; uchar4 rgb_value = rgbaImage[idxvec]; greyImage[idxvec] = (unsigned char)(.299f*rgb_value.x + .587f*rgb_value.y + .114f*rgb_value.z); } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched const int blockWidth = 32; const dim3 blockSize(blockWidth,blockWidth, 1); unsigned int numBlocksX = (unsigned int)(numRows / blockWidth + 1); unsigned int numBlocksY = (unsigned int)(numCols / blockWidth + 1); const dim3 gridSize(numBlocksX,numBlocksY, 1); printf(" x: %d, y: %d \n", numBlocksX, numBlocksY); rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
49f8835572dab9b18b2bf2cb8ea5829b19cbb71a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define DIVUP(x, y) (((x) + (y) - 1) / (y)) /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of module and B_Y * filtersPerThread * * images: (numColors, imgPixels, numImages) with stride given * filters: (numColors, filterPixels, numFilters) if conv * (numModules, numColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModules, numImages) * * B_Y one of 4, 8, 16 * B_X one of 16, 32 * imgsPerThread one of 1, 2, 4 * filtersPerThread one of 1, 2, 4, 8 * * Number of filters per module should be divisible by B_Y * filtersPerThread * checkImgBounds indicates whether number of images is divisible by B_X * imgsPerThread * * The imgSize here is the size of the actual image without the padding. * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_color(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSize, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[B_Y*numColors][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters __shared__ float shImages[B_Y*numColors][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images const int imgPixels = imgSize * imgSize; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = blockIdx.y % blocksPerModule; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += myImgIdx; filters += filtersPerThread * B_Y * blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX; if (!conv) { filters += moduleIdx * numColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y) * numImages * numModulesX * numModulesX + myImgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } for (int p = 0; p < filterPixels; p += B_Y) { /* * Load B_Y pixels from B_Y*filtersPerThread filters */ if (shFilterLoadY < B_Y) { #pragma unroll for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) { if (p + p2 + shFilterLoadY < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0; } } } } /* * Load B_Y pixels from B_X*imgsPerThread images */ const int pixIdx = p + threadIdx.y; if (pixIdx < filterPixels) { const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize; const int y = paddingStart + imgLoadModPosY + pixIdx / filterSize; if (y >= 0 && y< imgSize && x >= 0 && x < imgSize) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = images[imgStride * (c * imgPixels + y * imgSize + x) + i * B_X]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0; } } } } else { // Padding #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0; } } } } __syncthreads(); #pragma unroll for (int i = 0; i < B_Y*numColors; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y]; } } } __syncthreads(); } if (scale) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModulesX * numModulesX] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModulesX * numModulesX] + scaleOutputs * prod[f][g]; } } } } else { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModulesX * numModulesX] = scaleOutputs * prod[f][g]; } } } } } /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of B_Y * filtersPerThread * * images: (numImgColors, imgPixels, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModules, numImages) * * B_Y one of 4, 8, 16 * B_X one of 16, 32 * imgsPerThread one of 1, 2, 4 * filtersPerThread one of 1, 2, 4, 8 * colorCache: how many colors to put into shmem * * numFilters should be divisible by B_Y * filtersPerThread * numImages be divisible by B_X * imgsPerThread * numFilterColors should be divisible by colorCache. * numImgColors must be even. * numFilters must be divisible by numGroups. * * The imgSize here is the size of the actual image without the padding. * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_sparse(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSize, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[B_Y*colorCache][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters __shared__ float shImages[B_Y*colorCache][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images const int imgPixels = imgSize * imgSize; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesX; const int blockColorIdx = numFilterColors * blockGroupIdx; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += blockColorIdx * imgPixels * imgStride + myImgIdx; filters +=blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX; if (!conv) { filters += moduleIdx * numFilterColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y) * numImages * numModules + myImgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } // __shared__ int imgPos[] for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) for (int p = 0; p < filterPixels; p += B_Y) { /* * Load B_Y pixels from B_Y*filtersPerThread filters */ if (shFilterLoadY < B_Y) { #pragma unroll for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) { if (p + p2 + shFilterLoadY < filterPixels) { #pragma unroll for (int c = 0; c < colorCache; c++) { shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters]; } } else { #pragma unroll for (int c = 0; c < colorCache; c++) { shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0; } } } } /* * Load B_Y pixels from B_X*imgsPerThread images */ const int pixIdx = p + threadIdx.y; if (pixIdx < filterPixels) { const int x = imgLoadModPosX + pixIdx % filterSize; const int y = imgLoadModPosY + pixIdx / filterSize; if (y >= 0 && y < imgSize && x >= 0 && x < imgSize) { float* m = &images[imgStride * (oc * imgPixels + y * imgSize + x)]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorCache; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X]; } } else { #pragma unroll for (int c = 0; c < colorCache; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0; } } } } else { // Padding #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorCache; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0; } } } } __syncthreads(); #pragma unroll for (int i = 0; i < B_Y*colorCache; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y]; } } } __syncthreads(); } } if (scale) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g]; } } } } else { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g]; } } } } } /* * images: (groups, coloursPerGroup, height, width, images) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, rows, cols, groups, numFiltersPerGroup) * * targets: (numFilters, numModules, numImages) */ int _filterActs( int numGroups, int numImgColorsPerGroup, int numImgRows, int numImgCols, int numImages, int numModulesX, int numModulesY, int numFilterRows, int numFilterCols, int numFiltersPerGroup, float * imageptr, float * filterptr, float * targetptr, int paddingStart, int moduleStride, int imgStride, // step from start of img 0 to start of img 1 float scaleTargets, float scaleOutput, bool conv) { int numImgColors = numGroups * numImgColorsPerGroup; int numFilterColors = numImgColorsPerGroup; int numModules = numModulesX * numModulesY; int imgSize = numImgRows; int imgPixels = numImgRows * numImgCols; int numFilters = numFiltersPerGroup * numGroups; int filterPixels = numFilterRows * numFilterCols; int filterSize = numFilterRows; //XXX: asserts should be turned to ifs and returns assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 2 == 0); assert(numFilters % (16 * numGroups) == 0); assert(numImgColors % numGroups == 0); assert(imgSize * imgSize == imgPixels); assert(filterSize * filterSize == filterPixels); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize); assert(moduleStride <= filterSize); dim3 blocks = numFiltersPerGroup % 32 == 0 ? dim3(DIVUP(numImages, 32 * 4), (numModules * numFilters) / (4 * 8)) : dim3(DIVUP(numImages, 32 * 4), (numModules * numFilters) / (4 * 4)); dim3 threads(32, 4); bool checkImgBounds = numImages % 128 != 0; if (numModulesX != numModulesY) { return 1; } if (numImgRows != numImgCols) { return 2; } if (numFilterRows != numFilterCols) { return 3; } if (numImgColors <= 3) { assert(numGroups == 1); // It has to be based on above definitions, but just to be sure. if (scaleTargets == 0) { // don't scale if (numImgColors == 1) { if (checkImgBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, false, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, false, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, false, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, false, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 2) { if (checkImgBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 3) { if (checkImgBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, false, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, false, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, false, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, false, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } else { // do scale if (numImgColors == 1) { if (checkImgBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, true, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, true, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 1, true, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 1, true, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 2) { if (checkImgBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 3) { if (checkImgBounds) { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, true, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, true, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 8, 3, true, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_color < 4, 32, 4, 4, 3, true, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } } else { if (scaleTargets == 0) { // don't scale if (checkImgBounds) { if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else { if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else { // do scale if (checkImgBounds) { if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, true >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else { if (numFiltersPerGroup % 32 == 0) { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { hipFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, hipFuncCachePreferShared); hipLaunchKernelGGL(( filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, false >) , dim3(blocks), dim3(threads), 0, 0, imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } } { // new scope permits new vars hipError_t err = hipGetLastError(); if (HIPBLAS_STATUS_SUCCESS != err) { PyErr_Format(PyExc_RuntimeError, "filterActs failed (%s)", hipGetErrorString(err)); return -1; } } return 0; }
49f8835572dab9b18b2bf2cb8ea5829b19cbb71a.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #define DIVUP(x, y) (((x) + (y) - 1) / (y)) /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of module and B_Y * filtersPerThread * * images: (numColors, imgPixels, numImages) with stride given * filters: (numColors, filterPixels, numFilters) if conv * (numModules, numColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModules, numImages) * * B_Y one of 4, 8, 16 * B_X one of 16, 32 * imgsPerThread one of 1, 2, 4 * filtersPerThread one of 1, 2, 4, 8 * * Number of filters per module should be divisible by B_Y * filtersPerThread * checkImgBounds indicates whether number of images is divisible by B_X * imgsPerThread * * The imgSize here is the size of the actual image without the padding. * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int numColors, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_color(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSize, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesX, const int imgStride, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[B_Y*numColors][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters __shared__ float shImages[B_Y*numColors][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images const int imgPixels = imgSize * imgSize; const int filterPixels = filterSize * filterSize; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = blockIdx.y % blocksPerModule; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += myImgIdx; filters += filtersPerThread * B_Y * blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX; if (!conv) { filters += moduleIdx * numColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx * B_Y * filtersPerThread + threadIdx.y) * numImages * numModulesX * numModulesX + myImgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } for (int p = 0; p < filterPixels; p += B_Y) { /* * Load B_Y pixels from B_Y*filtersPerThread filters */ if (shFilterLoadY < B_Y) { #pragma unroll for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) { if (p + p2 + shFilterLoadY < filterPixels) { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[(c * filterPixels + p + p2) * numFilters]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0; } } } } /* * Load B_Y pixels from B_X*imgsPerThread images */ const int pixIdx = p + threadIdx.y; if (pixIdx < filterPixels) { const int x = paddingStart + imgLoadModPosX + pixIdx % filterSize; const int y = paddingStart + imgLoadModPosY + pixIdx / filterSize; if (y >= 0 && y< imgSize && x >= 0 && x < imgSize) { #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = images[imgStride * (c * imgPixels + y * imgSize + x) + i * B_X]; } } else { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0; } } } } else { // Padding #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < numColors; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0; } } } } __syncthreads(); #pragma unroll for (int i = 0; i < B_Y*numColors; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y]; } } } __syncthreads(); } if (scale) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModulesX * numModulesX] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModulesX * numModulesX] + scaleOutputs * prod[f][g]; } } } } else { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModulesX * numModulesX] = scaleOutputs * prod[f][g]; } } } } } /* * Block size B_YxB_X. Each block applies B_Y * filtersPerThread filters to B_X * imgsPerThread images. * threadIdx.x determines image * threadIdx.y determines filter * * blockIdx.x determines image batch of B_X * imgsPerThread * blockIdx.y determines filter batch of B_Y * filtersPerThread * * images: (numImgColors, imgPixels, numImages) with stride given * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, filterPixels, numFilters) otherwise * * targets: (numFilters, numModules, numImages) * * B_Y one of 4, 8, 16 * B_X one of 16, 32 * imgsPerThread one of 1, 2, 4 * filtersPerThread one of 1, 2, 4, 8 * colorCache: how many colors to put into shmem * * numFilters should be divisible by B_Y * filtersPerThread * numImages be divisible by B_X * imgsPerThread * numFilterColors should be divisible by colorCache. * numImgColors must be even. * numFilters must be divisible by numGroups. * * The imgSize here is the size of the actual image without the padding. * */ template <int B_Y, int B_X, int imgsPerThread, int filtersPerThread, int colorCache, bool scale, bool checkImgBounds> __global__ void filterActs_YxX_sparse(float* images, float* filters, float* targets, const int numImages, const int numFilters, const int imgSize, const int filterSize, const int paddingStart, const int moduleStride, const int numModulesX, const int imgStride, const int numImgColors, const int numGroups, const float scaleTargets, const float scaleOutputs, const bool conv) { __shared__ float shFilters[B_Y*colorCache][B_Y * filtersPerThread]; // pre-load B_Y pixels from B_Y*filtersPerThread filters __shared__ float shImages[B_Y*colorCache][B_X * imgsPerThread]; // pre-load B_Y pixels from B_X*imgsPerThread images const int imgPixels = imgSize * imgSize; const int filterPixels = filterSize * filterSize; const int numFilterColors = numImgColors / numGroups; const int blocksPerModule = numFilters / (B_Y*filtersPerThread); const int moduleIdx = blockIdx.y / blocksPerModule; const int blockFilterIdx = filtersPerThread * B_Y * (blockIdx.y % blocksPerModule); const int numFiltersPerGroup = numFilters / numGroups; const int blockGroupIdx = blockFilterIdx / numFiltersPerGroup; const int numModules = numModulesX * numModulesX; const int blockColorIdx = numFilterColors * blockGroupIdx; const int tidx = threadIdx.y * B_X + threadIdx.x; const int imgLoadModPosY = paddingStart + (moduleIdx / numModulesX) * moduleStride; const int imgLoadModPosX = paddingStart + (moduleIdx % numModulesX) * moduleStride; const int shFilterLoadY = tidx / (B_Y * filtersPerThread); const int shFilterLoadX = tidx % (B_Y * filtersPerThread); const int myImgIdx = blockIdx.x * B_X * imgsPerThread + threadIdx.x; images += blockColorIdx * imgPixels * imgStride + myImgIdx; filters +=blockFilterIdx + shFilterLoadY * numFilters + shFilterLoadX; if (!conv) { filters += moduleIdx * numFilterColors * filterPixels * numFilters; } targets += moduleIdx * numImages + (blockFilterIdx + threadIdx.y) * numImages * numModules + myImgIdx; float prod[filtersPerThread][imgsPerThread]; #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] = 0; } } // __shared__ int imgPos[] for (int oc = 0; oc < numFilterColors; oc += colorCache) { // oc stands for outer color (loop) for (int p = 0; p < filterPixels; p += B_Y) { /* * Load B_Y pixels from B_Y*filtersPerThread filters */ if (shFilterLoadY < B_Y) { #pragma unroll for (int p2 = 0; p2 < B_Y; p2 += B_X/filtersPerThread) { if (p + p2 + shFilterLoadY < filterPixels) { #pragma unroll for (int c = 0; c < colorCache; c++) { shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = filters[((oc+c) * filterPixels + p + p2) * numFilters]; } } else { #pragma unroll for (int c = 0; c < colorCache; c++) { shFilters[shFilterLoadY + p2 + c * B_Y][shFilterLoadX] = 0; } } } } /* * Load B_Y pixels from B_X*imgsPerThread images */ const int pixIdx = p + threadIdx.y; if (pixIdx < filterPixels) { const int x = imgLoadModPosX + pixIdx % filterSize; const int y = imgLoadModPosY + pixIdx / filterSize; if (y >= 0 && y < imgSize && x >= 0 && x < imgSize) { float* m = &images[imgStride * (oc * imgPixels + y * imgSize + x)]; #pragma unroll for (int i = 0; i < imgsPerThread; i++) { if (!checkImgBounds || myImgIdx + i * B_X < numImages) { #pragma unroll for (int c = 0; c < colorCache; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = m[c * imgStride * imgPixels + i * B_X]; } } else { #pragma unroll for (int c = 0; c < colorCache; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0; } } } } else { // Padding #pragma unroll for (int i = 0; i < imgsPerThread; i++) { #pragma unroll for (int c = 0; c < colorCache; c++) { shImages[threadIdx.y + c * B_Y][threadIdx.x + i * B_X] = 0; } } } } __syncthreads(); #pragma unroll for (int i = 0; i < B_Y*colorCache; i++) { #pragma unroll for(int f = 0; f < filtersPerThread; f++) { #pragma unroll for(int g = 0; g < imgsPerThread; g++) { prod[f][g] += shImages[i][g * B_X + threadIdx.x] * shFilters[i][threadIdx.y + f * B_Y]; } } } __syncthreads(); } } if (scale) { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleTargets * targets[g * B_X + f * B_Y * numImages * numModules] + scaleOutputs * prod[f][g]; } } } } else { #pragma unroll for (int g = 0; g < imgsPerThread; g++) { if (!checkImgBounds || myImgIdx + g * B_X < numImages) { #pragma unroll for (int f = 0; f < filtersPerThread; f++) { targets[g * B_X + f * B_Y * numImages * numModules] = scaleOutputs * prod[f][g]; } } } } } /* * images: (groups, coloursPerGroup, height, width, images) * filters: (numFilterColors, filterPixels, numFilters) if conv * (numModules, numFilterColors, rows, cols, groups, numFiltersPerGroup) * * targets: (numFilters, numModules, numImages) */ int _filterActs( int numGroups, int numImgColorsPerGroup, int numImgRows, int numImgCols, int numImages, int numModulesX, int numModulesY, int numFilterRows, int numFilterCols, int numFiltersPerGroup, float * imageptr, float * filterptr, float * targetptr, int paddingStart, int moduleStride, int imgStride, // step from start of img 0 to start of img 1 float scaleTargets, float scaleOutput, bool conv) { int numImgColors = numGroups * numImgColorsPerGroup; int numFilterColors = numImgColorsPerGroup; int numModules = numModulesX * numModulesY; int imgSize = numImgRows; int imgPixels = numImgRows * numImgCols; int numFilters = numFiltersPerGroup * numGroups; int filterPixels = numFilterRows * numFilterCols; int filterSize = numFilterRows; //XXX: asserts should be turned to ifs and returns assert(numGroups > 1 || (numImgColors > 0 && (numImgColors <= 3 || numImgColors % 2 == 0))); assert(numGroups == 1 || numFilterColors % 2 == 0); assert(numFilters % (16 * numGroups) == 0); assert(numImgColors % numGroups == 0); assert(imgSize * imgSize == imgPixels); assert(filterSize * filterSize == filterPixels); // These routines don't handle the case when only part of the image is visited in the convolution assert(paddingStart <= 0 && paddingStart + (numModules-1)*moduleStride + filterSize >= imgSize); assert(moduleStride <= filterSize); dim3 blocks = numFiltersPerGroup % 32 == 0 ? dim3(DIVUP(numImages, 32 * 4), (numModules * numFilters) / (4 * 8)) : dim3(DIVUP(numImages, 32 * 4), (numModules * numFilters) / (4 * 4)); dim3 threads(32, 4); bool checkImgBounds = numImages % 128 != 0; if (numModulesX != numModulesY) { return 1; } if (numImgRows != numImgCols) { return 2; } if (numFilterRows != numFilterCols) { return 3; } if (numImgColors <= 3) { assert(numGroups == 1); // It has to be based on above definitions, but just to be sure. if (scaleTargets == 0) { // don't scale if (numImgColors == 1) { if (checkImgBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 1, false, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 1, false, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 1, false, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 1, false, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 2) { if (checkImgBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 2, false, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 2, false, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 2, false, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 2, false, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 3) { if (checkImgBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 3, false, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 3, false, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 3, false, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, false, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 3, false, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } else { // do scale if (numImgColors == 1) { if (checkImgBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 1, true, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 1, true, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 1, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 1, true, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 1, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 1, true, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 2) { if (checkImgBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 2, true, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 2, true, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 2, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 2, true, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 2, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 2, true, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } else if (numImgColors == 3) { if (checkImgBounds) { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 3, true, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, true >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 3, true, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } else { if (numFilters % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 8, 3, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 8, 3, true, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_color< 4, 32, 4, 4, 3, true, false >, cudaFuncCachePreferShared); filterActs_YxX_color < 4, 32, 4, 4, 3, true, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, scaleTargets, scaleOutput, conv); } } } } } else { if (scaleTargets == 0) { // don't scale if (checkImgBounds) { if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else { if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 8, 2, false, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 4, 2, false, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } else { // do scale if (checkImgBounds) { if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, true >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, true > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } else { if (numFiltersPerGroup % 32 == 0) { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 8, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 8, 2, true, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } else { cudaFuncSetCacheConfig(filterActs_YxX_sparse< 4, 32, 4, 4, 2, false, false >, cudaFuncCachePreferShared); filterActs_YxX_sparse < 4, 32, 4, 4, 2, true, false > <<<blocks, threads>>>(imageptr, filterptr, targetptr, numImages, numFilters, imgSize, filterSize, paddingStart, moduleStride, numModulesX, imgStride, numImgColors, numGroups, scaleTargets, scaleOutput, conv); } } } } { // new scope permits new vars cudaError_t err = cudaGetLastError(); if (CUBLAS_STATUS_SUCCESS != err) { PyErr_Format(PyExc_RuntimeError, "filterActs failed (%s)", cudaGetErrorString(err)); return -1; } } return 0; }
8660b62e5bb96d98fb4f6a0e89a7cd4510f25761.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "histo_MultiBlock.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *buffer = NULL; hipMalloc(&buffer, XSIZE*YSIZE); long size = XSIZE*YSIZE; unsigned int *histo = NULL; hipMalloc(&histo, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( histo_MultiBlock), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,size,histo); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( histo_MultiBlock), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,size,histo); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( histo_MultiBlock), dim3(gridBlock),dim3(threadBlock), 0, 0, buffer,size,histo); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
8660b62e5bb96d98fb4f6a0e89a7cd4510f25761.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "histo_MultiBlock.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned char *buffer = NULL; cudaMalloc(&buffer, XSIZE*YSIZE); long size = XSIZE*YSIZE; unsigned int *histo = NULL; cudaMalloc(&histo, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); histo_MultiBlock<<<gridBlock,threadBlock>>>(buffer,size,histo); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { histo_MultiBlock<<<gridBlock,threadBlock>>>(buffer,size,histo); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { histo_MultiBlock<<<gridBlock,threadBlock>>>(buffer,size,histo); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d6fbf02813e99a6d9f389c987795167fc897f72d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <string.h> #include <cutil.h> #include <miscmath.h> #include <amgx_cusparse.h> #include <thrust/copy.h> #include <solvers/multicolor_dilu_solver.h> #include <solvers/block_common_solver.h> #include <gaussian_elimination.h> #include <basic_types.h> #include <util.h> #include <texture.h> #include <ld_functions.h> #include <matrix_io.h> #include <thrust/logical.h> #include <sm_utils.inl> #include <amgx_types/util.h> #include <algorithm> #define AMGX_ILU_COLORING namespace amgx { namespace multicolor_dilu_solver { enum { CTA_SIZE = 128, WARP_SIZE = 32 }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, int NUM_WARP_ITERS_PER_BLOCK > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_setup_NxN_kernel_large( const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict A_diag, const Matrix_type *__restrict A_vals, Matrix_type *__restrict Einv, const int *sorted_rows_by_color, const int *row_colors, const int num_rows_per_color, const int current_color ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Shared memory to broadcast column IDs. __shared__ volatile int s_a_col_ids[CTA_SIZE]; __shared__ volatile int s_a_col_its[CTA_SIZE]; // Each thread keeps its own pointer. volatile int *my_s_a_col_ids = &s_a_col_ids[threadIdx.x - lane_id]; volatile int *my_s_a_col_its = &s_a_col_its[threadIdx.x - lane_id]; // Shared memory to store the matrices. __shared__ volatile Vector_type s_A_mtx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK]; __shared__ volatile Vector_type s_B_mtx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_A_mtx = &s_A_mtx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE]; volatile Vector_type *my_s_B_mtx = &s_B_mtx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE]; // Shared memory to store the index of the element Aji. __shared__ volatile int s_A_ji[NUM_WARPS_PER_CTA]; // Each thread keeps its own pointer. volatile int *my_s_A_ji = &s_A_ji[warp_id]; // Precomputing some stuff int idx[NUM_WARP_ITERS_PER_BLOCK]; int idy[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { const int id = (WARP_SIZE * wb + lane_id) % NxN; idx[wb] = id / N; idy[wb] = id % N; } // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id; // Iterate over the rows of the matrix. One warp per row. for ( ; utils::any( a_row_it < num_rows_per_color ) ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = -1; if ( a_row_it < num_rows_per_color ) { a_row_id = sorted_rows_by_color[a_row_it]; } // Load the diagonal. Vector_type e_out[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { e_out[wb] = (Vector_type)0.0; } #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) if ( a_row_id != -1 && (wb * WARP_SIZE + lane_id) < NxN) { e_out[wb] = A_vals[NxN * A_diag[a_row_id] + wb * WARP_SIZE + lane_id]; } // Skip the 1st iteration of the outer-loop (that loop runs on the host). if ( current_color != 0 ) { // Ranges of the rows. int a_col_begin(0), a_col_end(0); if ( a_row_id != -1 ) { a_col_begin = A_rows[a_row_id ]; a_col_end = A_rows[a_row_id + 1]; } // Iterate over the elements in the columns. for ( ; a_col_begin < a_col_end ; a_col_begin += NxN ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id; // The identifier of the column if the iterator is valid. int a_col_tmp = -1, a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_tmp = A_cols[a_col_it]; } if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color ) { a_col_id = a_col_tmp; } // When the diagonal is stored inside the matrix, we have to reject it. We // could be using a template parameter but it's not needed since that // rejection is really cheap (a couple of extra cycles -- CMP+MOV). if ( a_col_id == a_row_id ) { a_col_id = -1; } // We partition valid and invalid column ids. Valid ones come first. int vote = utils::ballot( a_col_id != -1 ); int ones = __popc( vote ); int dest = __popc( vote & utils::lane_mask_lt() ); if ( a_col_id == -1 ) { dest = ones + lane_id - dest; } my_s_a_col_ids[dest] = a_col_id; my_s_a_col_its[dest] = a_col_it; // Temporary storage with zeros for OOB Vector_type my_A[NUM_WARP_ITERS_PER_BLOCK], my_B[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_A[wb] = (Vector_type)0.0; my_B[wb] = (Vector_type)0.0; } // Threads collaborate to load the rows. for ( int k = 0 ; k < WARP_SIZE ; ++k ) { // Exchange column indices. const int uniform_a_col_id = my_s_a_col_ids[k]; // Early exit. if ( uniform_a_col_id == -1 ) { break; } // Load the iterator. const int uniform_a_col_it = my_s_a_col_its[k]; // Load the two matrices. #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) if ((wb * WARP_SIZE + lane_id) < NxN) { my_A[wb] = A_vals[NxN * uniform_a_col_it + wb * WARP_SIZE + lane_id]; my_B[wb] = Einv [NxN * uniform_a_col_id + wb * WARP_SIZE + lane_id]; } #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_s_A_mtx[lane_id + wb * WARP_SIZE] = my_A[wb]; my_s_B_mtx[lane_id + wb * WARP_SIZE] = my_B[wb]; } // Compute the product of matrices. #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_A[wb] = (Vector_type)0.0; #pragma unroll for ( int m = 0 ; m < N ; ++m ) { my_A[wb] += my_s_A_mtx[N * idx[wb] + m] * my_s_B_mtx[N * m + idy[wb]]; } } #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) if ((wb * WARP_SIZE + lane_id) < NxN) { my_s_A_mtx[lane_id + wb * WARP_SIZE] = my_A[wb]; } // We looking for columns in the two rows we're interested in. int b_col_it = A_rows[uniform_a_col_id ]; int b_col_end = A_rows[uniform_a_col_id + 1]; // Init the marker to -1. if ( lane_id == 0 ) { *my_s_A_ji = -1; } // Run the loop. b_col_it += lane_id; int shared_found = utils::ballot( lane_id == 0 && uniform_a_col_id == -1 ); do { bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id; if ( found ) { *my_s_A_ji = b_col_it; } shared_found = shared_found | utils::ballot(found); b_col_it += NxN; } while ( __popc( shared_found ) == 0 && utils::any( b_col_it < b_col_end ) ); // Load the blocks. const int w_aji = *my_s_A_ji; Vector_type my_C[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_C[wb] = (Vector_type)0.0; if ( w_aji != -1 && (wb * WARP_SIZE + lane_id) < NxN) { my_C[wb] = A_vals[NxN * w_aji + wb * WARP_SIZE + lane_id]; } my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_C[wb]; } // Update e_out. #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { #pragma unroll for ( int m = 0 ; m < N ; ++m ) { e_out[wb] -= my_s_A_mtx[N * idx[wb] + m] * my_s_B_mtx[N * m + idy[wb]]; } } } } // a_col_begin < a_col_end } // current_color != 0 // Store e_out in A #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id] = e_out[wb]; } // Invert the matrices. #pragma unroll for ( int row = 0 ; row < N ; ++row ) { Vector_type diag(0), diag_tmp = my_s_A_mtx[N * row + row]; if ( isNotCloseToZero(diag_tmp) ) { diag = Vector_type(1) / diag_tmp; } else { diag = Vector_type(1) / epsilon(diag_tmp); } if ( lane_id < N && lane_id != row) { my_s_A_mtx[N * row + lane_id] = my_s_B_mtx[N * row + lane_id] = my_s_B_mtx[N * row + lane_id] * diag; } #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) if ( idx[wb] != row && idy[wb] != row) { my_s_A_mtx[wb * WARP_SIZE + lane_id] = my_s_B_mtx[wb * WARP_SIZE + lane_id] - my_s_B_mtx[N * idx[wb] + row] * my_s_B_mtx[N * row + idy[wb]]; } if ( lane_id < N ) { Vector_type tmp = diag; if ( lane_id != row ) { tmp = -my_s_A_mtx[N * lane_id + row] * diag; } my_s_A_mtx[N * lane_id + row] = tmp; } #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id]; } } // Store the results to Einv. if ( a_row_id != -1 ) #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) if (wb * WARP_SIZE + lane_id < NxN) { Einv[NxN * a_row_id + wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id]; } } } template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_setup_NxN_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict A_diag, const Matrix_type *__restrict A_vals, Matrix_type *__restrict Einv, const int *sorted_rows_by_color, const int *row_colors, const int num_rows_per_color, const int current_color ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN; // Upper-bound on the number of items per warp. const int NUM_ITEMS_PER_WARP_CEIL = (WARP_SIZE + NxN - 1) / NxN; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_NxN = lane_id / NxN; const int lane_id_mod_NxN = lane_id % NxN; // Useful index to compute matrix products. const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N; const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N; // We need NxN to compute a NxN block. Encode a mask for the first block. int mask_tmp = utils::ballot( lane_id_div_NxN == 0 ); // Mask for ballots. We shift the mask with NxN active bits by the needed number of bits. const int mask_NxN = mask_tmp << (lane_id_div_NxN * __popc(mask_tmp)); // Shared memory to broadcast column IDs. __shared__ volatile int s_a_col_ids[CTA_SIZE]; __shared__ volatile int s_a_col_its[CTA_SIZE]; // Each thread keeps its own pointer. volatile int *my_s_a_col_ids = &s_a_col_ids[threadIdx.x - lane_id_mod_NxN]; volatile int *my_s_a_col_its = &s_a_col_its[threadIdx.x - lane_id_mod_NxN]; // Shared memory to store the matrices. __shared__ volatile Vector_type s_A_mtx[CTA_SIZE]; __shared__ volatile Vector_type s_B_mtx[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_A_mtx = &s_A_mtx[threadIdx.x - lane_id_mod_NxN]; volatile Vector_type *my_s_B_mtx = &s_B_mtx[threadIdx.x - lane_id_mod_NxN]; // Shared memory to store the index of the element Aji. __shared__ volatile int s_A_ji[NUM_WARPS_PER_CTA * NUM_ITEMS_PER_WARP_CEIL]; // Each thread keeps its own pointer. volatile int *my_s_A_ji = &s_A_ji[warp_id * NUM_ITEMS_PER_WARP_CEIL + lane_id_div_NxN]; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN; // Iterate over the rows of the matrix. One warp per row. for ( ; utils::any( a_row_it < num_rows_per_color ) ; a_row_it += NUM_ITEMS_PER_GRID ) { // Is the thread active? For example, for 5x5 only the first 25 threads are active per warp. // At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals // to false ; that's the common trick to determine if a number is a power of 2). int is_active = true; if ( NxN & (NxN - 1) ) { is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP; } int a_row_id = -1; if ( is_active && a_row_it < num_rows_per_color ) { a_row_id = sorted_rows_by_color[a_row_it]; } // Load the diagonal. Vector_type e_out(0); if ( a_row_id != -1 ) { e_out = A_vals[NxN * A_diag[a_row_id] + lane_id_mod_NxN]; } // Skip the 1st iteration of the outer-loop (that loop runs on the host). if ( current_color != 0 ) { // Ranges of the rows. int a_col_begin(0), a_col_end(0); if ( a_row_id != -1 ) { a_col_begin = A_rows[a_row_id ]; a_col_end = A_rows[a_row_id + 1]; } // Iterate over the elements in the columns. for ( ; a_col_begin < a_col_end ; a_col_begin += NxN ) { unsigned int active_mask = utils::activemask(); // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id_mod_NxN; // The identifier of the column if the iterator is valid. int a_col_tmp = -1, a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_tmp = A_cols[a_col_it]; } if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color ) { a_col_id = a_col_tmp; } // When the diagonal is stored inside the matrix, we have to reject it. We // could be using a template parameter but it's not needed since that // rejection is really cheap (a couple of extra cycles -- CMP+MOV). if ( a_col_id == a_row_id ) { a_col_id = -1; } // We partition valid and invalid column ids. Valid ones come first. int vote = utils::ballot( a_col_id != -1, active_mask ) & mask_NxN; int ones = __popc( vote ); int dest = __popc( vote & utils::lane_mask_lt() ); if ( a_col_id == -1 ) { dest = ones + lane_id_mod_NxN - dest; } my_s_a_col_ids[dest] = a_col_id; my_s_a_col_its[dest] = a_col_it; // Threads collaborate to load the rows. for ( int k = 0 ; k < NxN ; ++k ) { // Exchange column indices. const int uniform_a_col_id = my_s_a_col_ids[k]; // Early exit. if ( utils::all( uniform_a_col_id == -1, active_mask ) ) { break; } // Load the iterator. const int uniform_a_col_it = my_s_a_col_its[k]; // Load the two matrices. Vector_type my_A(0), my_B(0); if ( uniform_a_col_id != -1 ) { my_A = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN]; my_B = Einv [NxN * uniform_a_col_id + lane_id_mod_NxN]; } my_s_A_mtx[lane_id_mod_NxN] = my_A; my_s_B_mtx[lane_id_mod_NxN] = my_B; utils::syncwarp(active_mask); // Compute the product of matrices. Vector_type tmp(0); #pragma unroll for ( int m = 0 ; m < N ; ++m ) { tmp += my_s_A_mtx[N * lane_id_mod_NxN_div_N + m] * my_s_B_mtx[N * m + lane_id_mod_NxN_mod_N]; } my_s_A_mtx[lane_id_mod_NxN] = tmp; // We looking for columns in the two rows we're interested in. int b_col_it(0), b_col_end(0); if ( is_active && uniform_a_col_id != -1 ) { b_col_it = A_rows[uniform_a_col_id ]; b_col_end = A_rows[uniform_a_col_id + 1]; } // Init the marker to -1. if ( lane_id_mod_NxN == 0 ) { *my_s_A_ji = -1; } // Run the loop. b_col_it += lane_id_mod_NxN; int shared_found = utils::ballot( lane_id_mod_NxN == 0 && uniform_a_col_id == -1, active_mask ); do { bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id; if ( found ) { *my_s_A_ji = b_col_it; } shared_found = shared_found | utils::ballot(found, active_mask); b_col_it += NxN; } while ( __popc( shared_found ) < NUM_ITEMS_PER_WARP && utils::any( b_col_it < b_col_end, active_mask ) ); // Load the blocks. const int w_aji = *my_s_A_ji; Vector_type my_C(0); if ( w_aji != -1 ) { my_C = A_vals[NxN * w_aji + lane_id_mod_NxN]; } my_s_B_mtx[lane_id_mod_NxN] = my_C; // Update e_out. #pragma unroll for ( int m = 0 ; m < N ; ++m ) { e_out -= my_s_A_mtx[N * lane_id_mod_NxN_div_N + m] * my_s_B_mtx[N * m + lane_id_mod_NxN_mod_N]; } } } // a_col_begin < a_col_end } // current_color != 0 // Store e_out in A my_s_A_mtx[lane_id_mod_NxN] = e_out; // Invert the matrices. #pragma unroll for ( int row = 0 ; row < N ; ++row ) { Vector_type diag(0), diag_tmp = my_s_A_mtx[N * row + row]; if ( isNotCloseToZero(diag_tmp) ) { diag = Vector_type(1) / diag_tmp; } else { diag = Vector_type(1) / epsilon(diag_tmp); } if ( is_active && lane_id_mod_NxN_div_N == 0 && lane_id_mod_NxN_mod_N != row ) { my_s_A_mtx[N * row + lane_id_mod_NxN_mod_N] *= diag; } if ( is_active && lane_id_mod_NxN_div_N != row && lane_id_mod_NxN_mod_N != row ) { my_s_A_mtx[lane_id_mod_NxN] -= my_s_A_mtx[N * lane_id_mod_NxN_div_N + row] * my_s_A_mtx[N * row + lane_id_mod_NxN_mod_N]; } if ( is_active && lane_id_mod_NxN_div_N == 0 ) { Vector_type tmp = diag; if ( lane_id_mod_NxN_mod_N != row ) { tmp = -my_s_A_mtx[N * lane_id_mod_NxN_mod_N + row] * diag; } my_s_A_mtx[N * lane_id_mod_NxN_mod_N + row] = tmp; } } // Store the results to Einv. if ( a_row_id != -1 ) { Einv[NxN * a_row_id + lane_id_mod_NxN] = my_s_A_mtx[lane_id_mod_NxN]; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 16 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 16 ) #endif void DILU_setup_1x1_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict A_diag, const Matrix_type *__restrict A_vals, Matrix_type *__restrict Einv, const int *sorted_rows_by_color, const int *row_colors, const int num_rows_per_color, const int current_color ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Number of items per grid. const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_NTPR = lane_id / NUM_THREADS_PER_ROW; const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW; // Shared memory to broadcast column IDs. __shared__ int s_a_col_ids[CTA_SIZE]; // Each thread keeps its own pointer. int *my_s_a_col_ids = &s_a_col_ids[warp_id * WARP_SIZE]; // Shared memory to store the matrices. __shared__ int s_A_ji[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. int *my_s_A_ji = &s_A_ji[warp_id * WARP_SIZE]; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_WARPS_PER_CTA + warp_id; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_WARPS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load the diagonal. Vector_type e_out(0); // Skip the 1st iteration of the outer-loop (that loop runs on the host). if ( current_color != 0 ) { // Ranges of the row. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // Iterate over the elements in the columns. for ( ; a_col_begin < a_col_end ; a_col_begin += WARP_SIZE ) { // Each thread loads a single element. int a_col_it = a_col_begin + lane_id; // The identifier of the column if the iterator is valid. int a_col_tmp = -1, a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_tmp = A_cols[a_col_it]; } if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color ) { a_col_id = a_col_tmp; } // When the diagonal is stored inside the matrix, we have to reject it. We // could be using a template parameter but it's not needed since that // rejection is really cheap (a couple of extra cycles -- CMP+MOV). if ( a_col_id == a_row_id ) { a_col_id = -1; } // We partition valid and invalid column ids. Valid ones come first. int vote = utils::ballot( a_col_id != -1 ); int ones = __popc( vote ); int dest = __popc( vote & utils::lane_mask_lt() ); if ( a_col_id == -1 ) { dest = ones + lane_id - dest; } my_s_a_col_ids[dest] = a_col_id; // Reset A_jis. my_s_A_ji[lane_id] = -1; __syncwarp(); // Threads collaborate to load the rows. for ( int k = 0 ; k < ones ; k += WARP_SIZE / NUM_THREADS_PER_ROW ) { const int local_k = k + lane_id_div_NTPR; // Exchange column indices. int uniform_a_col_id = -1; if ( local_k < ones ) { uniform_a_col_id = my_s_a_col_ids[local_k]; } // We look for columns in the rows we're interested in. int b_col_it(0), b_col_end(0); if ( uniform_a_col_id != -1 ) { b_col_it = A_rows[uniform_a_col_id ]; b_col_end = A_rows[uniform_a_col_id + 1]; } // Run the loop. b_col_it += lane_id_mod_NTPR; int shared_found = utils::ballot( lane_id_mod_NTPR == 0 && uniform_a_col_id == -1 ); do { bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id; if ( found ) { my_s_A_ji[local_k] = b_col_it; } shared_found = shared_found | utils::ballot(found); b_col_it += NUM_THREADS_PER_ROW; } while ( __popc( shared_found ) < WARP_SIZE / NUM_THREADS_PER_ROW && utils::any( b_col_it < b_col_end ) ); } __syncwarp(); // Where to get my A_ji from (if any). int a_ji_it = my_s_A_ji[dest]; // Grab A_jis. Matrix_type a_ji(0); if ( a_ji_it != -1 ) { a_ji = A_vals[a_ji_it]; } // Update e_out. if ( a_col_id != -1 ) { e_out += a_ji * Einv[a_col_id] * A_vals[a_col_it]; } } // a_col_begin < a_col_end } // current_color != 0 // Reduce the e_outs in one value. #pragma unroll for ( int mask = WARP_SIZE / 2 ; mask > 0 ; mask >>= 1 ) { e_out += utils::shfl_xor( e_out, mask ); } // Store the result. if ( lane_id == 0 ) { Matrix_type res = A_vals[A_diag[a_row_id]] - e_out; if ( res != Matrix_type(0) ) { res = Matrix_type(1) / res; } Einv[a_row_id] = static_cast<Vector_type>(res); } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int N, bool ROW_MAJOR, int WARP_SIZE, typename Value_type > static __device__ __forceinline__ Value_type reduce_distributed_vectors( Value_type x, int is_leader ) { if ( N & (N - 1) ) { #pragma unroll for ( int i = 1 ; i < N ; ++i ) { Value_type other_x = utils::shfl_down( x, ROW_MAJOR ? i : N * i ); if ( is_leader ) { x += other_x; } } } else { #pragma unroll for ( int i = 1 ; i < N ; i <<= 1 ) { x += utils::shfl_xor( x, ROW_MAJOR ? i : N * i ); } } return x; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, bool HAS_EXTERNAL_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_forward_NxN_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_NxN = lane_id / NxN; const int lane_id_mod_NxN = lane_id % NxN; // Useful index to compute matrix products. const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N; const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N; // We to get my data from when I use SHFL. const int shfl_offset = lane_id - lane_id_mod_NxN; // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_NxN]; // Is the thread active? For example, for 5x5 only the first 25 threads are active per warp. // At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals // to false ; that's the common trick to determine if a number is a power of 2). int is_active = true; if ( NxN & (NxN - 1) ) { is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP; } // Determine which NxN block the threads work with. int a_row_it = num_rows_per_color; if ( is_active ) { a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN; } // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_bmAx(0); if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { my_bmAx = __cachingLoad(&b[N * a_row_id + lane_id_mod_NxN_div_N]); } } else { if ( lane_id_mod_NxN_div_N == 0 ) { my_bmAx = b[N * a_row_id + lane_id_mod_NxN_mod_N]; } } // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If the diagonal is stored separately, we have a special treatment. int a_col_max = a_col_end; if ( HAS_EXTERNAL_DIAG ) { ++a_col_max; } // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += NxN ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id_mod_NxN; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end ) { a_col_id = a_row_id; } // Determine if the color is valid. int a_col_is_valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == FIRST ) { a_col_is_valid = a_col_id >= boundary_index; } else { a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color; } } #else if ( a_col_id != -1 && current_color != 0 ) { a_col_is_valid = row_colors[a_col_id] < current_color; } #endif // Count the number of active columns. // int vote = utils::ballot(aColId != -1); // The number of iterations. // int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) ); // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < NxN ; k += N ) { int my_k = k + lane_id_mod_NxN_div_N; // Load N blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k ); int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k ); Vector_type my_x(0); if ( uniform_a_col_id != -1 ) { my_x = __cachingLoad(&x[N * uniform_a_col_id + lane_id_mod_NxN_mod_N]); } if ( uniform_a_col_id != -1 && uniform_a_col_is_valid ) { my_x += delta[N * uniform_a_col_id + lane_id_mod_NxN_mod_N]; } my_s_mem[lane_id_mod_NxN] = my_x; // Load N blocks of A. #pragma unroll for ( int i = 0 ; i < N ; ++i ) { int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } if ( HAS_EXTERNAL_DIAG && is_active && uniform_a_col_tmp == a_col_end ) { uniform_a_col_it = A_diag[a_row_id]; } Matrix_type my_val(0); if ( uniform_a_col_it != -1 ) { my_val = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN]; } if ( ROW_MAJOR ) { my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_mod_N]; } else { my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_div_N]; } } } // Loop over k } // Loop over aColIt // Load Einvs. Vector_type my_Einv = Einv[NxN * a_row_id + lane_id_mod_NxN]; // Reduce bmAx terms. int is_leader = lane_id_mod_NxN_div_N == 0; if ( ROW_MAJOR ) { is_leader = lane_id_mod_NxN_mod_N == 0; } my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader ); // Update the shared terms. if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { my_s_mem[lane_id_mod_NxN_div_N] = my_bmAx; } } else { if ( lane_id_mod_NxN_div_N == 0 ) { my_s_mem[lane_id_mod_NxN_mod_N] = my_bmAx; } } // Update the diagonal term. if ( ROW_MAJOR ) { my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_mod_N]; } else { my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_div_N]; } // Reduce bmAx terms. my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader ); // Store the results. if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { delta[N * a_row_id + lane_id_mod_NxN_div_N] = my_bmAx; } } else { if ( lane_id_mod_NxN_div_N == 0 ) { delta[N * a_row_id + lane_id_mod_NxN_mod_N] = my_bmAx; } } } } template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, bool HAS_EXTERNAL_DIAG, int NUM_WARP_ITERS_PER_BLOCK > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_forward_NxN_kernel_large( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of rows computed per CTA. const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA; // Number of rows? per grid. const int NUM_ITEMS_PER_GRID = CTA_SIZE; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. // Useful index to compute matrix products. const int lane_id_div_N = lane_id / N; const int lane_id_mod_N = lane_id % N; // id of a lane inside the block const int blocks_per_warp = WARP_SIZE / N; // we process this cols per warp per row const int row_elems_per_warp = blocks_per_warp * N; // Shared to store bmAx __shared__ volatile Vector_type bmAx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK]; volatile Vector_type *my_bmAx_s = &bmAx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE]; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_bmAx(0); if ( lane_id < N ) { my_bmAx = __cachingLoad(&b[N * a_row_id + lane_id]); } #pragma unroll for (int i = 0; i < NUM_WARP_ITERS_PER_BLOCK; i++) { my_bmAx_s[WARP_SIZE * i + lane_id] = 0.0; } // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If the diagonal is stored separately, we have a special treatment. int a_col_max = a_col_end; if ( HAS_EXTERNAL_DIAG ) { ++a_col_max; } // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += WARP_SIZE ) // NxN { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end ) { a_col_id = a_row_id; } // Determine if the color is valid. int a_col_is_valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == FIRST ) { a_col_is_valid = a_col_id >= boundary_index; } else { a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color; } } #else if ( a_col_id != -1 && current_color != 0 ) { a_col_is_valid = row_colors[a_col_id] < current_color; } #endif // Loop over columns. We compute blocks_per_warp columns per iteration. for ( int k = 0 ; k < WARP_SIZE ; k += blocks_per_warp ) { // id of the processed block by this thread int my_k = k + lane_id_div_N; // Load N blocks of X (if valid) int uniform_a_col_id = utils::shfl( a_col_id, my_k ); int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, my_k ); Vector_type my_x(0); if ( uniform_a_col_id != -1 && lane_id < row_elems_per_warp) { my_x = __cachingLoad(&x[N * uniform_a_col_id + lane_id_mod_N]); } if ( uniform_a_col_id != -1 && uniform_a_col_is_valid && lane_id < row_elems_per_warp) { my_x += delta[N * uniform_a_col_id + lane_id_mod_N]; } //my_s_mem[lane_id] = my_x; #pragma unroll for ( int i = 0 ; i < blocks_per_warp ; ++i ) { // k-th batch of blocks, i-th block. each thread process a column/row of a_it = uniform_a_col_tmp int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; // check if we are going out of bounds/color if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end ) { uniform_a_col_it = A_diag[a_row_id]; } // swipe with the whole warp if (uniform_a_col_it != -1) { int block_inside_id = lane_id; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { Matrix_type my_val(0); if ( uniform_a_col_it != -1 && block_inside_id < NxN) { my_val = A_vals[NxN * uniform_a_col_it + block_inside_id]; } my_bmAx_s[block_inside_id] -= my_val * utils::shfl(my_x, N * i + block_inside_id % N); // MOD IS SLOW! block_inside_id += WARP_SIZE; } } } } // Loop over k } // Loop over aColIt // Load Einvs. Vector_type my_Einv[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { my_Einv[j] = 0.0; } #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { if ((WARP_SIZE * j + lane_id) < NxN) { my_Einv[j] = Einv[NxN * a_row_id + WARP_SIZE * j + lane_id]; } } // Reduce bmAx terms. { #pragma unroll for ( int i = 0 ; i < N ; ++i ) { if ( lane_id < N ) { my_bmAx += my_bmAx_s[N * lane_id + i]; } } } // Update the diagonal term. int block_inside_id = lane_id; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { my_bmAx_s[block_inside_id] = my_Einv[j] * utils::shfl(my_bmAx, block_inside_id % N); block_inside_id += WARP_SIZE; } // Reduce bmAx terms. { my_bmAx = 0.0; #pragma unroll for ( int i = 0 ; i < N ; ++i ) { int idx = N * lane_id + i; if ( lane_id < N ) { my_bmAx += my_bmAx_s[idx]; } } } // Store the results. if ( lane_id < N ) { delta[N * a_row_id + lane_id] = my_bmAx; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, bool HAS_EXTERNAL_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_forward_4x4_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Number of items per warp. const int NUM_ITEMS_PER_WARP = WARP_SIZE / 16; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_mod_16 = lane_id % 16; // Useful index to compute matrix products. const int lane_id_mod_16_div_4 = lane_id_mod_16 / 4; const int lane_id_mod_16_mod_4 = lane_id_mod_16 % 4; // We to get my data from when I use SHFL. const int shfl_offset = lane_id - lane_id_mod_16; // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_16]; // Determine which 16 block the threads work with. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + threadIdx.x / 16; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_bmAx(0); if ( ROW_MAJOR ) { if ( lane_id_mod_16_mod_4 == 0 ) { my_bmAx = __cachingLoad(&b[4 * a_row_id + lane_id_mod_16_div_4]); } } else { if ( lane_id_mod_16_div_4 == 0 ) { my_bmAx = b[4 * a_row_id + lane_id_mod_16_mod_4]; } } // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If the diagonal is stored separately, we have a special treatment. int a_col_max = a_col_end; if ( HAS_EXTERNAL_DIAG ) { ++a_col_max; } // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += 16 ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id_mod_16; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end ) { a_col_id = a_row_id; } // Determine if the color is valid. int a_col_is_valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == FIRST ) { a_col_is_valid = a_col_id >= boundary_index; } else { a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color; } } #else if ( a_col_id != -1 && current_color != 0 ) { a_col_is_valid = row_colors[a_col_id] < current_color; } #endif // Count the number of active columns. // int vote = utils::ballot(aColId != -1); // The number of iterations. // int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) ); // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < 16 ; k += 4 ) { int my_k = k + lane_id_mod_16_div_4; // Load N blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k ); int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k ); Vector_type my_x(0); if ( uniform_a_col_id != -1 ) { my_x = __cachingLoad(&x[4 * uniform_a_col_id + lane_id_mod_16_mod_4]); } if ( uniform_a_col_id != -1 && uniform_a_col_is_valid ) { my_x += delta[4 * uniform_a_col_id + lane_id_mod_16_mod_4]; } my_s_mem[lane_id_mod_16] = my_x; // Load N blocks of A. #pragma unroll for ( int i = 0 ; i < 4 ; ++i ) { int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end ) { uniform_a_col_it = A_diag[a_row_id]; } Matrix_type my_val(0); if ( uniform_a_col_it != -1 ) { my_val = A_vals[16 * uniform_a_col_it + lane_id_mod_16]; } if ( ROW_MAJOR ) { my_bmAx -= my_val * my_s_mem[4 * i + lane_id_mod_16_mod_4]; } else { my_bmAx -= my_val * my_s_mem[4 * i + lane_id_mod_16_div_4]; } } } // Loop over k } // Loop over aColIt // Load Einvs. Vector_type my_Einv = Einv[16 * a_row_id + lane_id_mod_16]; // Reduce bmAx terms. int is_leader = lane_id_mod_16_div_4 == 0; if ( ROW_MAJOR ) { is_leader = lane_id_mod_16_mod_4 == 0; } my_bmAx = reduce_distributed_vectors<4, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader ); // Update the shared terms. if ( ROW_MAJOR ) { if ( lane_id_mod_16_mod_4 == 0 ) { my_s_mem[lane_id_mod_16_div_4] = my_bmAx; } } else { if ( lane_id_mod_16_div_4 == 0 ) { my_s_mem[lane_id_mod_16_mod_4] = my_bmAx; } } // Update the diagonal term. if ( ROW_MAJOR ) { my_bmAx = my_Einv * my_s_mem[lane_id_mod_16_mod_4]; } else { my_bmAx = my_Einv * my_s_mem[lane_id_mod_16_div_4]; } // Reduce bmAx terms. my_bmAx = reduce_distributed_vectors<4, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader ); // Store the results. if ( ROW_MAJOR ) { if ( lane_id_mod_16_mod_4 == 0 ) { delta[4 * a_row_id + lane_id_mod_16_div_4] = my_bmAx; } } else { if ( lane_id_mod_16_div_4 == 0 ) { delta[4 * a_row_id + lane_id_mod_16_mod_4] = my_bmAx; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int CTA_SIZE, bool HAS_EXTERNAL_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_forward_4x4_kernel_row_major_vec4( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *Einv, const ColoringType boundary_coloring, const int boundary_index ) { // Number of half warps per CTA. const int NUM_HALF_WARPS = CTA_SIZE / 16; // Coordinates of the thread. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Coordinates of the thread in the CTA. const int thread_id_div_16 = threadIdx.x / 16; const int thread_id_mod_16 = threadIdx.x % 16; // Useful constants. const int thread_id_mod_16_div_4 = thread_id_mod_16 / 4; const int thread_id_mod_16_mod_4 = thread_id_mod_16 % 4; const int shfl_offset = 16 * (lane_id / 16); // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[16 * thread_id_div_16]; // The iterator over rows. int a_row_it = blockIdx.x * NUM_HALF_WARPS + thread_id_div_16; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_HALF_WARPS ) { unsigned int active_mask = utils::activemask(); int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_bmAx(0); if ( thread_id_mod_16_div_4 == 0 ) { my_bmAx = __cachingLoad(&b[4 * a_row_id + thread_id_mod_16_mod_4]); } // The range of the row. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If it has an external diagonal, we need one more item to put the diag. int a_col_max = a_col_end; if ( HAS_EXTERNAL_DIAG ) { ++a_col_max; } // Each warp load column indices of 32 nonzero blocks for ( ; a_col_begin < a_col_max ; a_col_begin += 16 ) { unsigned int active_mask_inner = utils::activemask(); int a_col_it = a_col_begin + thread_id_mod_16; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = __cachingLoad(&A_cols[a_col_it]); } if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end ) { a_col_id = a_row_id; } // Determine if the color is valid. int a_col_is_valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == FIRST ) { a_col_is_valid = a_col_id >= boundary_index; } else { a_col_is_valid = a_col_id < boundary_index && __cachingLoad(&row_colors[a_col_id]) < current_color; } } #else if ( a_col_id != -1 && current_color != 0 ) { a_col_is_valid = row_colors[a_col_id] < current_color; } #endif // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < 16 ; k += 4 ) { int my_k = k + thread_id_mod_16_div_4; // Load 8 blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k, warpSize, active_mask_inner ); int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k, warpSize, active_mask_inner ); Vector_type my_x(0); if ( uniform_a_col_id != -1 ) { my_x = __cachingLoad(&x[4 * uniform_a_col_id + thread_id_mod_16_mod_4]); } if ( uniform_a_col_id != -1 && uniform_a_col_is_valid ) { my_x += delta[4 * uniform_a_col_id + thread_id_mod_16_mod_4]; } my_s_mem[thread_id_mod_16] = my_x; int uniform_a_col_tmp = a_col_begin + my_k, uniform_a_col_it = -1; if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end ) { uniform_a_col_it = A_diag[a_row_id]; } Matrix_type my_vals[4] = { Matrix_type(0) }; if ( uniform_a_col_it != -1 ) { utils::load_vec4( my_vals, &A_vals[16 * uniform_a_col_it + 4 * thread_id_mod_16_mod_4] ); } my_bmAx -= my_vals[0] * my_s_mem[4 * thread_id_mod_16_div_4 + 0]; my_bmAx -= my_vals[1] * my_s_mem[4 * thread_id_mod_16_div_4 + 1]; my_bmAx -= my_vals[2] * my_s_mem[4 * thread_id_mod_16_div_4 + 2]; my_bmAx -= my_vals[3] * my_s_mem[4 * thread_id_mod_16_div_4 + 3]; } } // Load Einvs. Matrix_type my_Einv = Einv[16 * a_row_id + thread_id_mod_16]; // Reduce bmAx terms. my_bmAx += utils::shfl_xor( my_bmAx, 4, warpSize, active_mask ); my_bmAx += utils::shfl_xor( my_bmAx, 8, warpSize, active_mask ); // Update the shared terms. if ( thread_id_mod_16_div_4 == 0 ) { my_s_mem[thread_id_mod_16_mod_4] = my_bmAx; } // Update the diagonal term. my_bmAx = my_Einv * my_s_mem[thread_id_mod_16_mod_4]; // Reduce bmAx terms. my_bmAx += utils::shfl_xor( my_bmAx, 1, warpSize, active_mask ); my_bmAx += utils::shfl_xor( my_bmAx, 2, warpSize, active_mask ); // Store the results. if ( thread_id_mod_16_mod_4 == 0 ) { delta[4 * a_row_id + thread_id_mod_16_div_4] = my_bmAx; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE, bool HAS_EXTERNAL_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_forward_1x1_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index ) { // Number of items per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW; // Number of items per grid. const int NUM_ROWS_PER_GRID = gridDim.x * NUM_ROWS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ROWS_PER_CTA + (threadIdx.x / NUM_THREADS_PER_ROW); // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ROWS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_bmAx = amgx::types::util<Vector_type>::get_zero(); if ( lane_id_mod_NTPR == 0 ) { my_bmAx = __cachingLoad(&b[a_row_id]); } // If it has an external diag. if ( HAS_EXTERNAL_DIAG && lane_id_mod_NTPR == 0 ) { my_bmAx -= A_vals[A_diag[a_row_id]] * x[a_row_id]; } // Don't do anything if X is zero. int a_col_it = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If the diagonal is stored separately, we have a special treatment. //if( HAS_EXTERNAL_DIAG ) // ++a_col_end; // Each warp load column indices of 32 nonzero blocks for ( a_col_it += lane_id_mod_NTPR ; utils::any( a_col_it < a_col_end ) ; a_col_it += NUM_THREADS_PER_ROW ) { // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } // Ignore the diagonal element since its color is smaller, and been accounted for above if (HAS_EXTERNAL_DIAG && a_col_id == a_row_id) { a_col_id = -1; } // Load x. Vector_type my_x(0); if ( a_col_id != -1 ) { my_x = __cachingLoad(&x[a_col_id]); } // Is it really a valid column (due to coloring). int valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == FIRST ) { valid = a_col_id >= boundary_index; } else { valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color; } } #else if ( a_col_id != -1 && current_color != 0 ) { valid = row_colors[a_col_tmp] < current_color; } #endif // Load my x value. if ( valid ) { my_x += delta[a_col_id]; } // Load my item from A. Matrix_type my_val(0); if ( a_col_it < a_col_end ) { my_val = A_vals[a_col_it]; } // Update bmAx. my_bmAx -= my_val * my_x; } // Reduce bmAx terms. #pragma unroll for ( int mask = NUM_THREADS_PER_ROW / 2 ; mask > 0 ; mask >>= 1 ) { my_bmAx += utils::shfl_xor( my_bmAx, mask ); } // Store the results. if ( lane_id_mod_NTPR == 0 ) { delta[a_row_id] = Einv[a_row_id] * my_bmAx; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_backward_NxN_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_NxN = lane_id / NxN; const int lane_id_mod_NxN = lane_id % NxN; // Useful index to compute matrix products. const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N; const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N; // We to get my data from when I use SHFL. const int shfl_offset = lane_id - lane_id_mod_NxN; // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_NxN]; // Is the thread active? For example, for 5x5 only the first 25 threads are active per warp. // At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals // to false ; that's the common trick to determine if a number is a power of 2). int is_active = true; if ( NxN & (NxN - 1) ) { is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP; } // Determine which NxN block the threads work with. int a_row_it = num_rows_per_color; if ( is_active ) { a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN; } // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_delta(0); // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_end ) ; a_col_begin += NxN ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id_mod_NxN; // Get the ID of the column. int a_col_tmp = -1, a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_tmp = A_cols[a_col_it]; } // Make sure the column is interesting. #ifdef AMGX_ILU_COLORING int valid = false; if ( a_col_tmp != -1 && current_color != 0 ) { if ( boundary_coloring == LAST ) { valid = a_col_tmp >= boundary_index; } else { valid = a_col_tmp < boundary_index && row_colors[a_col_tmp] > current_color; } } #else int valid = false; if ( a_col_tmp != -1 && row_colors[a_col_tmp] > current_color ) { valid = true; } #endif // Set the column id. if ( valid ) { a_col_id = a_col_tmp; } // Count the number of active columns. // int vote = utils::ballot(aColId != -1); // The number of iterations. // int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) ); // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < NxN ; k += N ) { int my_k = k + lane_id_mod_NxN_div_N; // Load N blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k ); Vector_type my_x(0); if ( uniform_a_col_id != -1 ) { my_x = Delta[N * uniform_a_col_id + lane_id_mod_NxN_mod_N]; } my_s_mem[lane_id_mod_NxN] = my_x; // Load N blocks of A. #pragma unroll for ( int i = 0 ; i < N ; ++i ) { //if( uniform_a_col_id == -1 ) // break; int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } Matrix_type my_val(0); if ( uniform_a_col_it != -1 ) { my_val = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN]; } if ( ROW_MAJOR ) { my_delta += my_val * my_s_mem[N * i + lane_id_mod_NxN_mod_N]; } else { my_delta += my_val * my_s_mem[N * i + lane_id_mod_NxN_div_N]; } } } // Loop over k } // Loop over aColIt // Load Einvs. Matrix_type my_Einv = Einv[NxN * a_row_id + lane_id_mod_NxN]; // Reduce bmAx terms. int is_leader = lane_id_mod_NxN_div_N == 0; if ( ROW_MAJOR ) { is_leader = lane_id_mod_NxN_mod_N == 0; } my_delta = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_delta, is_leader ); // Update the shared terms. if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { my_s_mem[lane_id_mod_NxN_div_N] = my_delta; } } else { if ( lane_id_mod_NxN_div_N == 0 ) { my_s_mem[lane_id_mod_NxN_mod_N] = my_delta; } } // Update the diagonal term. if ( ROW_MAJOR ) { my_delta = my_Einv * my_s_mem[lane_id_mod_NxN_mod_N]; } else { my_delta = my_Einv * my_s_mem[lane_id_mod_NxN_div_N]; } // Reduce bmAx terms. my_delta = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_delta, is_leader ); // Store the results. if ( ROW_MAJOR ) { const int offset = N * a_row_id + lane_id_mod_NxN_div_N; Vector_type my_b(0), my_x(0); if ( lane_id_mod_NxN_mod_N == 0 ) { my_b = __cachingLoad(&delta[offset]); my_x = x [offset]; } my_delta = my_b - my_delta; if ( lane_id_mod_NxN_mod_N == 0 ) { x [offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } else { const int offset = N * a_row_id + lane_id_mod_NxN_mod_N; Vector_type my_b(0), my_x(0); if ( lane_id_mod_NxN_div_N == 0 ) { my_b = __cachingLoad(&delta[offset]); my_x = x [offset]; } my_delta = my_b - my_delta; if ( lane_id_mod_NxN_div_N == 0 ) { x [offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, int NUM_WARP_ITERS_PER_BLOCK > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_backward_NxN_kernel_large( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_N = lane_id / N; const int lane_id_mod_N = lane_id % N; // id of a lane inside the block const int blocks_per_warp = WARP_SIZE / N; // we process this cols per warp per row const int row_elems_per_warp = blocks_per_warp * N; // Shared to store t_delta __shared__ volatile Vector_type delta_s[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK]; volatile Vector_type *my_delta_s = &delta_s[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE]; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Accumulator Vector_type my_delta(0); //Vector_type mAx[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int i = 0; i < NUM_WARP_ITERS_PER_BLOCK; i++) { my_delta_s[WARP_SIZE * i + lane_id] = 0.0; } // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_end ) ; a_col_begin += WARP_SIZE ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id; // Get the ID of the column. int a_col_tmp = -1, a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_tmp = A_cols[a_col_it]; } // Make sure the column is interesting. #ifdef AMGX_ILU_COLORING int valid = false; if ( a_col_tmp != -1 && current_color != 0 ) { if ( boundary_coloring == LAST ) { valid = a_col_tmp >= boundary_index; } else { valid = a_col_tmp < boundary_index && row_colors[a_col_tmp] > current_color; } } #else int valid = false; if ( a_col_tmp != -1 && row_colors[a_col_tmp] > current_color ) { valid = true; } #endif // Set the column id. if ( valid ) { a_col_id = a_col_tmp; } // Loop over columns. We compute blocks_per_warp columns per iteration. for ( int k = 0 ; k < WARP_SIZE ; k += blocks_per_warp ) { // id of the processed block by this thread int my_k = k + lane_id_div_N; // Load N blocks of X (if valid) int uniform_a_col_id = utils::shfl( a_col_id, my_k ); Vector_type my_x(0); if ( uniform_a_col_id != -1 && lane_id < row_elems_per_warp) { my_x = Delta[N * uniform_a_col_id + lane_id_mod_N]; } // Load blocks of A. // for each block in a batch #pragma unroll for ( int i = 0 ; i < blocks_per_warp ; ++i ) { // k-th batch of blocks, i-th block. each thread process a column/row of a_it = uniform_a_col_tmp int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; // check if we are going out of bounds/color if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } // swipe with the whole warp if (uniform_a_col_it != -1) { int block_inside_id = lane_id; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { Matrix_type my_val(0); if ( uniform_a_col_it != -1 && block_inside_id < NxN) { my_val = A_vals[NxN * uniform_a_col_it + block_inside_id]; } my_delta_s[block_inside_id] -= my_val * utils::shfl(my_x, N * i + block_inside_id % N); //my_s_mem[N*i + block_inside_id % N]; // MOD IS SLOW! block_inside_id += WARP_SIZE; } } } } // Loop over k } // Loop over aColIt // Load Einvs. Vector_type my_Einv[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { my_Einv[j] = 0.0; } #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { if ((WARP_SIZE * j + lane_id) < NxN) { my_Einv[j] = Einv[NxN * a_row_id + WARP_SIZE * j + lane_id]; } } // Reduce bmAx terms. { #pragma unroll for ( int i = 0 ; i < N ; ++i ) { if ( lane_id < N ) { my_delta += my_delta_s[N * lane_id + i]; } } } // Update the diagonal term. if ( ROW_MAJOR ) { int block_inside_id = lane_id; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { my_delta_s[block_inside_id] = my_Einv[j] * utils::shfl(my_delta, block_inside_id % N); block_inside_id += WARP_SIZE; } } // Reduce bmAx terms. { my_delta = 0.0; #pragma unroll for ( int i = 0 ; i < N ; ++i ) { if ( lane_id < N ) { my_delta += my_delta_s[N * lane_id + i]; } } } // Store the results. if ( ROW_MAJOR ) { const int offset = N * a_row_id + lane_id; Vector_type my_b(0), my_x(0); if ( lane_id < N ) { my_b = __cachingLoad(&delta[offset]); my_x = x [offset]; } my_delta = my_b - my_delta; if ( lane_id < N ) { x [offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename IndexType, typename ValueTypeA, typename ValueTypeB, typename WeightType, int CTA_SIZE, bool ROW_MAJOR > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 16 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 16 ) #endif void DILU_backward_4x4_kernel( const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *nonzero_values, ValueTypeB *x, const WeightType weight, const int *sorted_rows_by_color, const int *__restrict row_colors, const ValueTypeA *Einv, const ValueTypeB *delta, ValueTypeB *Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const IndexType boundary_index) { const int nHalfWarps = CTA_SIZE / 16; // Number of half warps per CTA. const int laneId = utils::lane_id(); const int halfWarpId = threadIdx.x / 16; const int halfLaneId = threadIdx.x % 16; const int halfLaneId_div_4 = halfLaneId / 4; const int halfLaneId_mod_4 = halfLaneId % 4; const int upperHalf = 16 * (laneId / 16); // Shared memory needed to exchange X and delta. __shared__ volatile ValueTypeB s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile ValueTypeB *my_s_mem = &s_mem[16 * halfWarpId]; // Iterate over the rows of the matrix. One warp per two rows. for ( int aRowIt = blockIdx.x * nHalfWarps + halfWarpId ; aRowIt < num_rows_per_color ; aRowIt += gridDim.x * nHalfWarps ) { int aRowId = sorted_rows_by_color[aRowIt]; // Load one block of B. ValueTypeB my_delta(0); // The range of the rows. int aColBegin = row_offsets[aRowId ]; int aColEnd = row_offsets[aRowId + 1]; // Each warp load column indices of 16 nonzero blocks for ( ; aColBegin < aColEnd ; aColBegin += 16 ) { int aColIt = aColBegin + halfLaneId; // Get the ID of the column. int aColTmp = -1, aColId = -1; if ( aColIt < aColEnd ) { aColTmp = column_indices[aColIt]; } #ifdef AMGX_ILU_COLORING bool valid = (((aColTmp < boundary_index || boundary_coloring == SYNC_COLORS) && (row_colors[aColTmp] > current_color)) || (aColTmp >= boundary_index && boundary_coloring == LAST)); if ( aColTmp != -1 && valid ) { aColId = aColTmp; } #else if ( aColTmp != -1 && row_colors[aColTmp] > current_color ) { aColId = aColTmp; } #endif for ( int k = 0 ; k < 16 ; k += 4 ) { int my_k = k + halfLaneId_div_4; // Exchange column indices. int waColId = utils::shfl( aColId, upperHalf + my_k ); // Load 8 blocks of X if needed. ValueTypeB my_x(0); if ( waColId != -1 ) { my_x = Delta[4 * waColId + halfLaneId_mod_4]; } my_s_mem[halfLaneId] = my_x; // Load 8 blocks of A. #pragma unroll for ( int i = 0 ; i < 4 ; ++i ) { const int k_i = k + i; int w_aColTmp = aColBegin + k_i, w_aColIt = -1; if ( utils::shfl( aColId, upperHalf + k_i ) != -1 && w_aColTmp < aColEnd ) w_aColIt = w_aColTmp; ValueTypeA my_val(0); if ( w_aColIt != -1 ) { my_val = nonzero_values[16 * w_aColIt + halfLaneId]; } if ( ROW_MAJOR ) { my_delta += my_val * my_s_mem[4 * i + halfLaneId_mod_4]; } else { my_delta += my_val * my_s_mem[4 * i + halfLaneId_div_4]; } } } // Loop over k } // Loop over aColIt // Load EINV values. ValueTypeA my_Einv = Einv[16 * aRowId + halfLaneId]; // Reduce delta terms. if ( ROW_MAJOR ) { my_delta += utils::shfl_xor( my_delta, 1 ); my_delta += utils::shfl_xor( my_delta, 2 ); } else { my_delta += utils::shfl_xor( my_delta, 4 ); my_delta += utils::shfl_xor( my_delta, 8 ); } // Update the shared terms. if ( ROW_MAJOR ) { if ( halfLaneId_mod_4 == 0 ) { my_s_mem[halfLaneId_div_4] = my_delta; } } else { if ( halfLaneId_div_4 == 0 ) { my_s_mem[halfLaneId_mod_4] = my_delta; } } // Update the diagonal term. if ( ROW_MAJOR ) { my_delta = my_Einv * my_s_mem[halfLaneId_mod_4]; } else { my_delta = my_Einv * my_s_mem[halfLaneId_div_4]; } // Regroup results. if ( ROW_MAJOR ) { my_delta += utils::shfl_xor( my_delta, 1 ); my_delta += utils::shfl_xor( my_delta, 2 ); } else { my_delta += utils::shfl_xor( my_delta, 4 ); my_delta += utils::shfl_xor( my_delta, 8 ); } // Store the results. if ( ROW_MAJOR ) { int offset = 4 * aRowId + halfLaneId_div_4; ValueTypeB my_b(0), my_x(0); if ( halfLaneId_mod_4 == 0 ) { my_b = __cachingLoad(&delta[offset]); my_x = x[offset]; } my_delta = my_b - my_delta; if ( halfLaneId_mod_4 == 0 ) { x[offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } else { int offset = 4 * aRowId + halfLaneId_mod_4; ValueTypeB my_b(0), my_x(0); if ( halfLaneId_div_4 == 0 ) { my_b = __cachingLoad(&delta[offset]); my_x = x[offset]; } my_delta = my_b - my_delta; if ( halfLaneId_div_4 == 0 ) { x[offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int CTA_SIZE > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 16 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 16 ) #endif void DILU_backward_4x4_kernel_row_major_vec4( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index ) { // Number of half warps per CTA. const int NUM_HALF_WARPS = CTA_SIZE / 16; // Coordinates of the thread. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Coordinates of the thread in the CTA. const int thread_id_div_16 = threadIdx.x / 16; const int thread_id_mod_16 = threadIdx.x % 16; // Useful constants. const int thread_id_mod_16_div_4 = thread_id_mod_16 / 4; const int thread_id_mod_16_mod_4 = thread_id_mod_16 % 4; const int shfl_offset = 16 * (lane_id / 16); // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[16 * thread_id_div_16]; // The iterator over rows. int a_row_it = blockIdx.x * NUM_HALF_WARPS + thread_id_div_16; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_HALF_WARPS ) { unsigned int active_mask = utils::activemask(); int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_delta(0); // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // Each warp load column indices of 32 nonzero blocks for ( ; a_col_begin < a_col_end ; a_col_begin += 16 ) { unsigned int active_mask_inner = utils::activemask(); int a_col_it = a_col_begin + thread_id_mod_16; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = __cachingLoad(&A_cols[a_col_it]); } #ifdef AMGX_ILU_COLORING int valid = false; if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == LAST ) { valid = a_col_id >= boundary_index; } else { valid = a_col_id < boundary_index && __cachingLoad(&row_colors[a_col_id]) > current_color; } } #else int valid = false; if ( a_col_id != -1 && row_colors[a_col_id] > current_color ) { valid = true; } #endif // Set the column id. if ( !valid ) { a_col_id = -1; } // Loop over columns. We compute 8 columns per iteration. #pragma unroll 2 for ( int k = 0 ; k < 16 ; k += 4 ) { int my_k = k + thread_id_mod_16_div_4; // Load 8 blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k, warpSize, active_mask_inner ); Vector_type my_Delta(0); if ( uniform_a_col_id != -1 ) { my_Delta = Delta[4 * uniform_a_col_id + thread_id_mod_16_mod_4]; } my_s_mem[thread_id_mod_16] = my_Delta; int uniform_a_col_it = a_col_begin + my_k; if ( uniform_a_col_id == -1 || uniform_a_col_it >= a_col_end ) { uniform_a_col_it = -1; } Matrix_type my_vals[4] = { Matrix_type(0) }; if ( uniform_a_col_it != -1 ) { utils::load_vec4( my_vals, &A_vals[16 * uniform_a_col_it + 4 * thread_id_mod_16_mod_4] ); } my_delta += my_vals[0] * my_s_mem[4 * thread_id_mod_16_div_4 + 0]; my_delta += my_vals[1] * my_s_mem[4 * thread_id_mod_16_div_4 + 1]; my_delta += my_vals[2] * my_s_mem[4 * thread_id_mod_16_div_4 + 2]; my_delta += my_vals[3] * my_s_mem[4 * thread_id_mod_16_div_4 + 3]; } // Loop over k } // Loop over aColIt // Load EINV values. Matrix_type my_Einv = Einv[16 * a_row_id + thread_id_mod_16]; // Reduce delta terms. my_delta += utils::shfl_xor( my_delta, 4, warpSize, active_mask ); my_delta += utils::shfl_xor( my_delta, 8, warpSize, active_mask ); // Update the shared terms. if ( thread_id_mod_16_div_4 == 0 ) { my_s_mem[thread_id_mod_16_mod_4] = my_delta; } // Update the diagonal term. my_delta = my_Einv * my_s_mem[thread_id_mod_16_mod_4]; // Regroup results. my_delta += utils::shfl_xor( my_delta, 1, warpSize, active_mask ); my_delta += utils::shfl_xor( my_delta, 2, warpSize, active_mask ); // Store the results. int offset = 4 * a_row_id + thread_id_mod_16_div_4; Vector_type my_b(0), my_x(0); if ( thread_id_mod_16_mod_4 == 0 ) { my_b = __cachingLoad(&delta[offset]); my_x = x [offset]; } my_delta = my_b - my_delta; if ( thread_id_mod_16_mod_4 == 0 ) { x [offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_backward_1x1_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index ) { // Number of items per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW; // Number of items per grid. const int NUM_ROWS_PER_GRID = gridDim.x * NUM_ROWS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ROWS_PER_CTA + (threadIdx.x / NUM_THREADS_PER_ROW); // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ROWS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_delta(0); // Don't do anything if X is zero. int a_col_it = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // Each warp load column indices of 32 nonzero blocks for ( a_col_it += lane_id_mod_NTPR ; utils::any( a_col_it < a_col_end ) ; a_col_it += NUM_THREADS_PER_ROW ) { // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } // Is it really a valid column (due to coloring). int valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { //if( boundary_coloring == LAST ) // valid = a_col_id >= boundary_index; //else // valid = a_col_id < boundary_index && row_colors[a_col_id] > current_color; valid = (((a_col_id < boundary_index || boundary_coloring == SYNC_COLORS) && (row_colors[a_col_id] > current_color)) || (a_col_id >= boundary_index && boundary_coloring == LAST)); } #else //if( a_col_id != -1 && current_color != 0 ) if ( a_col_id != -1 ) { valid = row_colors[a_col_id] > current_color; } #endif // Load my Delta value. Vector_type my_Delta(0); if ( valid ) { my_Delta = Delta[a_col_id]; } // Load my item from A. Matrix_type my_val(0); if ( valid ) { my_val = A_vals[a_col_it]; } // Update bmAx. my_delta += my_val * my_Delta; } // Reduce bmAx terms. #pragma unroll for ( int mask = NUM_THREADS_PER_ROW / 2 ; mask > 0 ; mask >>= 1 ) { my_delta += utils::shfl_xor( my_delta, mask ); } // Store the results. if ( lane_id_mod_NTPR == 0 ) { Vector_type my_x = __cachingLoad(&delta[a_row_id]) - Einv[a_row_id] * my_delta; x [a_row_id] += weight * my_x; Delta[a_row_id] = my_x; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 16 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 16 ) #endif void DILU_backward_NxN_kernel_skip( Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color ) { const int NUM_ITEMS_PER_CTA = CTA_SIZE / N; // Number of updated block items per CTA const int ITEM_ID = threadIdx.x / N; const int ITEM_BLOCK_OFFSET = threadIdx.x % N; const int is_active = ITEM_ID < NUM_ITEMS_PER_CTA; // The first row. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + ITEM_ID; // Iterate over the rows of the matrix. One warp per two rows. for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_ITEMS_PER_CTA ) { if ( is_active ) { int a_row_id = sorted_rows_by_color[a_row_it]; const int idx = N * a_row_id + ITEM_BLOCK_OFFSET; Vector_type my_b = __cachingLoad(&delta[idx]); Vector_type my_x = x[idx]; x[idx] = my_x + weight * my_b; Delta[idx] = my_b; } } } // ---------- // Methods // ---------- template< typename Matrix_type, typename Vector_type, int N > void DILU_forward_NxN_dispatch( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index, const int row_major, const int has_external_diag ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ROWS_PER_WARP = ::max(WARP_SIZE / NxN, 1); // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = NUM_ROWS_PER_WARP * NUM_WARPS_PER_CTA; // The number of threads to launch. const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); // Branch to the correct kernel call. int code = 2 * (row_major ? 1 : 0) + (has_external_diag ? 1 : 0); switch ( code ) { case 0: // Column-major, no external diagonal. hipLaunchKernelGGL(( DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; case 1: // Column-major, external diagonal. hipLaunchKernelGGL(( DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; case 2: // Row-major, no external diagonal. hipLaunchKernelGGL(( DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; case 3: // Row-major, external diagonal. hipLaunchKernelGGL(( DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; default: FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED ); } cudaCheckError(); } template< typename Matrix_type, typename Vector_type, int N > void DILU_forward_NxN_dispatch_large( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index, const int row_major, const int has_external_diag ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = NUM_WARPS_PER_CTA; // Each warp is going to sweep through bloock this many times const int NUM_WARP_ITERS_PER_BLOCK = ((NxN - 1) / WARP_SIZE) + 1; // The number of threads to launch. const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); // Branch to the correct kernel call. if (!row_major) { FatalError("COL MAJOR is not supported for this large block_size", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } switch ( has_external_diag ) { case 0: // Row-major, no external diagonal. hipLaunchKernelGGL(( DILU_forward_NxN_kernel_large<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, NUM_WARP_ITERS_PER_BLOCK>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; case 1: // Row-major, external diagonal. hipLaunchKernelGGL(( DILU_forward_NxN_kernel_large<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, NUM_WARP_ITERS_PER_BLOCK>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; default: FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED ); } cudaCheckError(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type > void DILU_forward_NxN_dispatch( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index, const int block_size, const int row_major, const int has_external_diag ) { switch ( block_size ) { case 1: { const int NUM_THREADS_PER_ROW = 8; // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW; // The number of threads to launch. const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); if ( has_external_diag ) { hipLaunchKernelGGL(( DILU_forward_1x1_kernel<Matrix_type, Vector_type, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); } else { hipLaunchKernelGGL(( DILU_forward_1x1_kernel<Matrix_type, Vector_type, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); } cudaCheckError(); } break; case 2: DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 2>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; case 3: DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 3>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; case 4: if ( row_major ) { // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / 16; // The number of threads to launch. const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); if ( has_external_diag ) //DILU_forward_4x4_kernel<Matrix_type, Vector_type, CTA_SIZE, WARP_SIZE, true, true><<<grid_size, CTA_SIZE>>>( hipLaunchKernelGGL(( DILU_forward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, CTA_SIZE, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); else hipLaunchKernelGGL(( DILU_forward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, CTA_SIZE, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); cudaCheckError(); } else DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 4>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; case 5: DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 5>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; case 8: DILU_forward_NxN_dispatch_large<Matrix_type, Vector_type, 8>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; case 10: DILU_forward_NxN_dispatch_large<Matrix_type, Vector_type, 10>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; default: FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED ); } cudaCheckError(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int N > void DILU_backward_NxN_dispatch( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index, const int row_major ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ROWS_PER_WARP = ::max(WARP_SIZE / NxN, 1); // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = NUM_ROWS_PER_WARP * NUM_WARPS_PER_CTA; // The number of threads to launch. const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); // Branch to the correct kernel call. if ( row_major ) { hipLaunchKernelGGL(( DILU_backward_NxN_kernel<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, true>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index ); } else { hipLaunchKernelGGL(( DILU_backward_NxN_kernel<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, false>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index ); } cudaCheckError(); } template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int NUM_WARP_ITERS_PER_BLOCK > void DILU_backward_NxN_dispatch_large( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index, const int row_major ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = NUM_WARPS_PER_CTA; // The number of threads to launch. const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); // Branch to the correct kernel call. if ( row_major ) { hipLaunchKernelGGL(( DILU_backward_NxN_kernel_large<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, true, NUM_WARP_ITERS_PER_BLOCK>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index ); } else { FatalError("col major is not supported for this blocksize in multicolor DILU solver", AMGX_ERR_NOT_IMPLEMENTED); } cudaCheckError(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType> void DILU_backward_NxN_dispatch( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index, const int block_size, const int row_major ) { switch ( block_size ) { case 1: { const int NUM_THREADS_PER_ROW = 8; // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW; // The number of threads to launch. const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); hipLaunchKernelGGL(( DILU_backward_1x1_kernel<Matrix_type, Vector_type, WeightType, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index ); cudaCheckError(); } break; case 2: DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 2>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; case 3: DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 3>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; case 4: //if( false ) if ( row_major ) { // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / 16; // The number of threads to launch. const int grid_size = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); //DILU_backward_NxN_kernel<Matrix_type, Vector_type, 4, CTA_SIZE, WARP_SIZE, true><<<grid_size, CTA_SIZE>>>( hipLaunchKernelGGL(( DILU_backward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, WeightType, CTA_SIZE>) , dim3(grid_size), dim3(CTA_SIZE), 0, 0, A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index ); cudaCheckError(); } else DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 4>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; case 5: DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 5>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; case 8: DILU_backward_NxN_dispatch_large<Matrix_type, Vector_type, WeightType, 8, 2>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; case 10: DILU_backward_NxN_dispatch_large<Matrix_type, Vector_type, WeightType, 10, 4>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; default: FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED ); } cudaCheckError(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< class T_Config > MulticolorDILUSolver_Base<T_Config>::MulticolorDILUSolver_Base( AMG_Config &cfg, const std::string &cfg_scope, ThreadManager *tmng ) : Solver<T_Config>( cfg, cfg_scope, tmng ) { this->weight = cfg.AMG_Config::template getParameter<double>("relaxation_factor", cfg_scope); this->m_reorder_cols_by_color_desired = (cfg.AMG_Config::template getParameter<int>("reorder_cols_by_color", cfg_scope) != 0); this->m_insert_diagonal_desired = (cfg.AMG_Config::template getParameter<int>("insert_diag_while_reordering", cfg_scope) != 0); this->m_boundary_coloring = cfg.AMG_Config::template getParameter<ColoringType>("boundary_coloring", cfg_scope); this->always_obey_coloring = 0; if (weight == 0) { weight = 1.; amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Multicolor DILU smoother\n"); } } // Destructor template<class T_Config> MulticolorDILUSolver_Base<T_Config>::~MulticolorDILUSolver_Base() { Einv.clear(); Einv.shrink_to_fit(); } template<class T_Config> void MulticolorDILUSolver_Base<T_Config>::computeEinv(Matrix<T_Config> &A) { ViewType oldView = A.currentView(); A.setView(this->m_explicit_A->getViewExterior()); if ( A.get_block_dimx() != A.get_block_dimy() ) { FatalError("DILU implemented only for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if ( A.get_block_dimx() > 32) // actually much more less than 32 doe to register file limitations, but... { FatalError("DILU implemented only for squared blocks of size <= 32", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } computeEinv_NxN( A, A.get_block_dimx() ); A.setView(oldView); } template< class T_Config > void MulticolorDILUSolver_Base<T_Config>::printSolverParameters() const { std::cout << "relaxation_factor = " << this->weight << std::endl; } // Solver setup template< class T_Config > void MulticolorDILUSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure) { m_explicit_A = dynamic_cast<Matrix<T_Config>*>(this->m_A); if (!this->m_explicit_A) { FatalError("MulticolorDILUSolver only works with explicit matrices", AMGX_ERR_INTERNAL); } int N = this->m_explicit_A->get_num_cols() * this->m_explicit_A->get_block_dimy(); if (this->m_explicit_A->getColoringLevel() < 1) { FatalError("Matrix must be colored to use multicolor dilu solver. Try setting: coloring_level=1 in the configuration file", AMGX_ERR_NOT_IMPLEMENTED); } m_delta.resize(N); m_Delta.resize(N); m_delta.set_block_dimy(this->m_explicit_A->get_block_dimy()); m_Delta.set_block_dimy(this->m_explicit_A->get_block_dimy()); m_delta.set_block_dimx(1); m_Delta.set_block_dimx(1); if ( this->m_explicit_A->getBlockFormat() != ROW_MAJOR ) { FatalError("Multicolor DILU solver only supports row major format for the blocks", AMGX_ERR_CONFIGURATION); } computeEinv( *this->m_explicit_A ); } // template< class T_Config > void MulticolorDILUSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) { } // Solve one iteration template<class T_Config> bool MulticolorDILUSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { if ( this->m_explicit_A->get_block_dimx() != this->m_explicit_A->get_block_dimy() ) { FatalError("DILU implemented only for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if ( this->m_explicit_A->get_block_dimx() > 32) // actually much more less than 32 doe to register file limitations, but... { FatalError("DILU implemented only for squared blocks of size <= 32", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if (xIsZero) { x.dirtybit = 0; } if (!this->m_explicit_A->is_matrix_singleGPU()) { this->m_explicit_A->manager->exchange_halo_async(x, x.tag); this->m_explicit_A->manager->exchange_halo_async(b, b.tag); } if (this->m_explicit_A->getViewExterior() == this->m_explicit_A->getViewInterior()) { if (!this->m_explicit_A->is_matrix_singleGPU()) { this->m_explicit_A->manager->exchange_halo_wait(x, x.tag); this->m_explicit_A->manager->exchange_halo_wait(b, b.tag); } } ViewType oldView = this->m_explicit_A->currentView(); ViewType flags; bool latencyHiding = true; if (this->m_explicit_A->is_matrix_singleGPU() || (x.dirtybit == 0 && b.dirtybit == 0)) { latencyHiding = false; this->m_explicit_A->setViewExterior(); flags = (ViewType)(this->m_explicit_A->getViewExterior()); } else { flags = (ViewType)(this->m_explicit_A->getViewInterior()); this->m_explicit_A->setViewInterior(); } if (xIsZero) { thrust::fill(x.begin(), x.end(), types::util<ValueTypeB>::get_zero()); cudaCheckError(); } this->smooth_NxN(*this->m_explicit_A, b, x, flags); if (latencyHiding) { if (!this->m_explicit_A->is_matrix_singleGPU()) { this->m_explicit_A->manager->exchange_halo_wait(x, x.tag); this->m_explicit_A->manager->exchange_halo_wait(b, b.tag); } this->m_explicit_A->setViewExterior(); flags = (ViewType)(~(this->m_explicit_A->getViewInterior()) & this->m_explicit_A->getViewExterior()); if (flags != 0) { this->smooth_NxN(*this->m_explicit_A, b, x, flags); } } x.dirtybit = 1; this->m_explicit_A->setView(oldView); return (this->converged(b, x)); } template<class T_Config> void MulticolorDILUSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x ) {} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void MulticolorDILUSolver<TemplateConfig<AMGX_host, V, M, I> >::computeEinv_NxN(const Matrix_h &A, const int bsize) { FatalError("Multicolor DILU smoother not implemented for host format, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void MulticolorDILUSolver<TemplateConfig<AMGX_host, V, M, I> >::smooth_NxN( const Matrix_h &A, VVector &b, VVector &x, ViewType separation_flag ) { FatalError("Haven't implemented Multicolor DILU smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::MulticolorDILUSolver( AMG_Config &cfg, const std::string &cfg_scope, ThreadManager *tmng ) : MulticolorDILUSolver_Base<TemplateConfig<AMGX_device, V, M, I> >( cfg, cfg_scope, tmng ) { int device = 0; hipGetDevice( &device ); hipDeviceProp_t properties; hipGetDeviceProperties( &properties, device ); m_is_kepler = properties.major >= 3; } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::computeEinv_NxN(const Matrix_d &A, const int bsize) { const int bsize_sq = bsize * bsize; this->Einv.resize( A.get_num_cols()*bsize_sq, 0.0 ); // sol::prof_start(); for ( int i = 0, num_colors = A.getMatrixColoring().getNumColors() ; i < num_colors ; ++i ) { const int color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i]; const int num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1] - color_offset; if ( num_rows_per_color == 0 ) { continue; } const int CTA_SIZE = 128; const int NUM_WARPS_PER_CTA = CTA_SIZE / 32; int ROWS_PER_WARP = 1; if ( bsize_sq > 1 && bsize_sq < 6 ) { ROWS_PER_WARP = 32 / bsize_sq; } const int ROWS_PER_CTA = ROWS_PER_WARP * NUM_WARPS_PER_CTA; const int GRID_SIZE = ::min( 4096, (num_rows_per_color + ROWS_PER_CTA - 1) / ROWS_PER_CTA ); hipStream_t stream = thrust::global_thread_handle::get_stream(); switch ( bsize ) { case 1: hipLaunchKernelGGL(( DILU_setup_1x1_kernel<ValueTypeA, ValueTypeB, 8, CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream, A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 2: hipLaunchKernelGGL(( DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 2, CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream, A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 3: hipLaunchKernelGGL(( DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 3, CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream, A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 4: hipLaunchKernelGGL(( DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 4, CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream, A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 5: hipLaunchKernelGGL(( DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 5, CTA_SIZE, 32>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream, A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 8: hipLaunchKernelGGL(( DILU_setup_NxN_kernel_large<ValueTypeA, ValueTypeB, 8, CTA_SIZE, 32, 2>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream, A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 10: hipLaunchKernelGGL(( DILU_setup_NxN_kernel_large<ValueTypeA, ValueTypeB, 10, CTA_SIZE, 32, 4>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, stream, A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; default: FatalError( "Multicolor-DILU Setup: block size was not enabled in the code, contact AMGX developers.", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE ); } cudaCheckError(); } } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::smooth_NxN( const Matrix_d &A, VVector &b, VVector &x, ViewType separation_flag ) { AMGX_CPU_PROFILER( "MulticolorDILUSolver::smooth_NxN " ); int offset = 0, separation = 0; A.getOffsetAndSizeForView(INTERIOR, &offset, &separation); // Only have separation=num interior rows if we are only working on the interior // and the boundary coloring is FIRST or LAST, otherwise set separation offset to // total number of rows if ( separation_flag != this->m_explicit_A->getViewInterior() || this->m_explicit_A->getViewExterior() == this->m_explicit_A->getViewInterior() || this->m_boundary_coloring != LAST && this->m_boundary_coloring != FIRST ) { separation = A.row_offsets.size() - 1; } else { amgx_printf("separation active\n"); } // -------------------- // Forward Sweep // -------------------- const int num_colors = this->m_explicit_A->getMatrixColoring().getNumColors(); for ( int i = 0 ; i < num_colors ; ++i ) { int color_offset(0); if ( separation_flag & INTERIOR ) { color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i]; } else { color_offset = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i]; } int num_rows_per_color(0); if ( separation_flag == this->m_explicit_A->getViewInterior() ) { num_rows_per_color = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i]; } else { num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1]; } num_rows_per_color -= color_offset; if ( num_rows_per_color == 0 ) { continue; } int boundary_index = separation; if ( this->m_boundary_coloring == SYNC_COLORS ) { boundary_index = A.get_num_rows(); } DILU_forward_NxN_dispatch( A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), A.diag.raw(), x.raw(), b.raw(), this->m_delta.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, num_rows_per_color, i, A.getMatrixColoring().getRowColors().raw(), this->Einv.raw(), this->m_boundary_coloring, boundary_index, A.get_block_dimy(), A.getBlockFormat() == ROW_MAJOR, A.hasProps(DIAG) ); cudaCheckError(); } // -------------------- // Backward Sweep // -------------------- for ( int i = num_colors - 1 ; i >= 0 ; --i ) { int color_offset(0); if ( separation_flag & INTERIOR ) { color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i]; } else { color_offset = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i]; } int num_rows_per_color(0); if ( separation_flag == this->m_explicit_A->getViewInterior() ) { num_rows_per_color = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i]; } else { num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1]; } num_rows_per_color -= color_offset; if ( num_rows_per_color == 0 ) { continue; } if ( i == num_colors - 1 ) { const int NUM_ROWS_PER_CTA = CTA_SIZE / A.get_block_dimy(); const int GRID_SIZE = ::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); switch ( A.get_block_dimy() ) { case 1: hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 1, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 2: hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 2, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 3: hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 3, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 4: hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 4, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 5: hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 5, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 8: hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 8, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 10: hipLaunchKernelGGL(( DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 10, CTA_SIZE>) , dim3(GRID_SIZE), dim3(CTA_SIZE), 0, 0, x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; } cudaCheckError(); } else { DILU_backward_NxN_dispatch( A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), this->Einv.raw(), this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color, i, this->m_boundary_coloring, separation, A.get_block_dimy(), A.getBlockFormat() == ROW_MAJOR ); cudaCheckError(); } } } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class MulticolorDILUSolver_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) // AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class MulticolorDILUSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) // AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } } // namespace amgx
d6fbf02813e99a6d9f389c987795167fc897f72d.cu
/* Copyright (c) 2011-2017, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <string.h> #include <cutil.h> #include <miscmath.h> #include <amgx_cusparse.h> #include <thrust/copy.h> #include <solvers/multicolor_dilu_solver.h> #include <solvers/block_common_solver.h> #include <gaussian_elimination.h> #include <basic_types.h> #include <util.h> #include <texture.h> #include <ld_functions.h> #include <matrix_io.h> #include <thrust/logical.h> #include <sm_utils.inl> #include <amgx_types/util.h> #include <algorithm> #define AMGX_ILU_COLORING namespace amgx { namespace multicolor_dilu_solver { enum { CTA_SIZE = 128, WARP_SIZE = 32 }; /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, int NUM_WARP_ITERS_PER_BLOCK > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_setup_NxN_kernel_large( const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict A_diag, const Matrix_type *__restrict A_vals, Matrix_type *__restrict Einv, const int *sorted_rows_by_color, const int *row_colors, const int num_rows_per_color, const int current_color ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Shared memory to broadcast column IDs. __shared__ volatile int s_a_col_ids[CTA_SIZE]; __shared__ volatile int s_a_col_its[CTA_SIZE]; // Each thread keeps its own pointer. volatile int *my_s_a_col_ids = &s_a_col_ids[threadIdx.x - lane_id]; volatile int *my_s_a_col_its = &s_a_col_its[threadIdx.x - lane_id]; // Shared memory to store the matrices. __shared__ volatile Vector_type s_A_mtx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK]; __shared__ volatile Vector_type s_B_mtx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_A_mtx = &s_A_mtx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE]; volatile Vector_type *my_s_B_mtx = &s_B_mtx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE]; // Shared memory to store the index of the element Aji. __shared__ volatile int s_A_ji[NUM_WARPS_PER_CTA]; // Each thread keeps its own pointer. volatile int *my_s_A_ji = &s_A_ji[warp_id]; // Precomputing some stuff int idx[NUM_WARP_ITERS_PER_BLOCK]; int idy[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { const int id = (WARP_SIZE * wb + lane_id) % NxN; idx[wb] = id / N; idy[wb] = id % N; } // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id; // Iterate over the rows of the matrix. One warp per row. for ( ; utils::any( a_row_it < num_rows_per_color ) ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = -1; if ( a_row_it < num_rows_per_color ) { a_row_id = sorted_rows_by_color[a_row_it]; } // Load the diagonal. Vector_type e_out[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { e_out[wb] = (Vector_type)0.0; } #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) if ( a_row_id != -1 && (wb * WARP_SIZE + lane_id) < NxN) { e_out[wb] = A_vals[NxN * A_diag[a_row_id] + wb * WARP_SIZE + lane_id]; } // Skip the 1st iteration of the outer-loop (that loop runs on the host). if ( current_color != 0 ) { // Ranges of the rows. int a_col_begin(0), a_col_end(0); if ( a_row_id != -1 ) { a_col_begin = A_rows[a_row_id ]; a_col_end = A_rows[a_row_id + 1]; } // Iterate over the elements in the columns. for ( ; a_col_begin < a_col_end ; a_col_begin += NxN ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id; // The identifier of the column if the iterator is valid. int a_col_tmp = -1, a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_tmp = A_cols[a_col_it]; } if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color ) { a_col_id = a_col_tmp; } // When the diagonal is stored inside the matrix, we have to reject it. We // could be using a template parameter but it's not needed since that // rejection is really cheap (a couple of extra cycles -- CMP+MOV). if ( a_col_id == a_row_id ) { a_col_id = -1; } // We partition valid and invalid column ids. Valid ones come first. int vote = utils::ballot( a_col_id != -1 ); int ones = __popc( vote ); int dest = __popc( vote & utils::lane_mask_lt() ); if ( a_col_id == -1 ) { dest = ones + lane_id - dest; } my_s_a_col_ids[dest] = a_col_id; my_s_a_col_its[dest] = a_col_it; // Temporary storage with zeros for OOB Vector_type my_A[NUM_WARP_ITERS_PER_BLOCK], my_B[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_A[wb] = (Vector_type)0.0; my_B[wb] = (Vector_type)0.0; } // Threads collaborate to load the rows. for ( int k = 0 ; k < WARP_SIZE ; ++k ) { // Exchange column indices. const int uniform_a_col_id = my_s_a_col_ids[k]; // Early exit. if ( uniform_a_col_id == -1 ) { break; } // Load the iterator. const int uniform_a_col_it = my_s_a_col_its[k]; // Load the two matrices. #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) if ((wb * WARP_SIZE + lane_id) < NxN) { my_A[wb] = A_vals[NxN * uniform_a_col_it + wb * WARP_SIZE + lane_id]; my_B[wb] = Einv [NxN * uniform_a_col_id + wb * WARP_SIZE + lane_id]; } #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_s_A_mtx[lane_id + wb * WARP_SIZE] = my_A[wb]; my_s_B_mtx[lane_id + wb * WARP_SIZE] = my_B[wb]; } // Compute the product of matrices. #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_A[wb] = (Vector_type)0.0; #pragma unroll for ( int m = 0 ; m < N ; ++m ) { my_A[wb] += my_s_A_mtx[N * idx[wb] + m] * my_s_B_mtx[N * m + idy[wb]]; } } #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) if ((wb * WARP_SIZE + lane_id) < NxN) { my_s_A_mtx[lane_id + wb * WARP_SIZE] = my_A[wb]; } // We looking for columns in the two rows we're interested in. int b_col_it = A_rows[uniform_a_col_id ]; int b_col_end = A_rows[uniform_a_col_id + 1]; // Init the marker to -1. if ( lane_id == 0 ) { *my_s_A_ji = -1; } // Run the loop. b_col_it += lane_id; int shared_found = utils::ballot( lane_id == 0 && uniform_a_col_id == -1 ); do { bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id; if ( found ) { *my_s_A_ji = b_col_it; } shared_found = shared_found | utils::ballot(found); b_col_it += NxN; } while ( __popc( shared_found ) == 0 && utils::any( b_col_it < b_col_end ) ); // Load the blocks. const int w_aji = *my_s_A_ji; Vector_type my_C[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_C[wb] = (Vector_type)0.0; if ( w_aji != -1 && (wb * WARP_SIZE + lane_id) < NxN) { my_C[wb] = A_vals[NxN * w_aji + wb * WARP_SIZE + lane_id]; } my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_C[wb]; } // Update e_out. #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { #pragma unroll for ( int m = 0 ; m < N ; ++m ) { e_out[wb] -= my_s_A_mtx[N * idx[wb] + m] * my_s_B_mtx[N * m + idy[wb]]; } } } } // a_col_begin < a_col_end } // current_color != 0 // Store e_out in A #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id] = e_out[wb]; } // Invert the matrices. #pragma unroll for ( int row = 0 ; row < N ; ++row ) { Vector_type diag(0), diag_tmp = my_s_A_mtx[N * row + row]; if ( isNotCloseToZero(diag_tmp) ) { diag = Vector_type(1) / diag_tmp; } else { diag = Vector_type(1) / epsilon(diag_tmp); } if ( lane_id < N && lane_id != row) { my_s_A_mtx[N * row + lane_id] = my_s_B_mtx[N * row + lane_id] = my_s_B_mtx[N * row + lane_id] * diag; } #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) if ( idx[wb] != row && idy[wb] != row) { my_s_A_mtx[wb * WARP_SIZE + lane_id] = my_s_B_mtx[wb * WARP_SIZE + lane_id] - my_s_B_mtx[N * idx[wb] + row] * my_s_B_mtx[N * row + idy[wb]]; } if ( lane_id < N ) { Vector_type tmp = diag; if ( lane_id != row ) { tmp = -my_s_A_mtx[N * lane_id + row] * diag; } my_s_A_mtx[N * lane_id + row] = tmp; } #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) { my_s_B_mtx[wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id]; } } // Store the results to Einv. if ( a_row_id != -1 ) #pragma unroll for (int wb = 0; wb < NUM_WARP_ITERS_PER_BLOCK; wb++) if (wb * WARP_SIZE + lane_id < NxN) { Einv[NxN * a_row_id + wb * WARP_SIZE + lane_id] = my_s_A_mtx[wb * WARP_SIZE + lane_id]; } } } template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_setup_NxN_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict A_diag, const Matrix_type *__restrict A_vals, Matrix_type *__restrict Einv, const int *sorted_rows_by_color, const int *row_colors, const int num_rows_per_color, const int current_color ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN; // Upper-bound on the number of items per warp. const int NUM_ITEMS_PER_WARP_CEIL = (WARP_SIZE + NxN - 1) / NxN; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_NxN = lane_id / NxN; const int lane_id_mod_NxN = lane_id % NxN; // Useful index to compute matrix products. const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N; const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N; // We need NxN to compute a NxN block. Encode a mask for the first block. int mask_tmp = utils::ballot( lane_id_div_NxN == 0 ); // Mask for ballots. We shift the mask with NxN active bits by the needed number of bits. const int mask_NxN = mask_tmp << (lane_id_div_NxN * __popc(mask_tmp)); // Shared memory to broadcast column IDs. __shared__ volatile int s_a_col_ids[CTA_SIZE]; __shared__ volatile int s_a_col_its[CTA_SIZE]; // Each thread keeps its own pointer. volatile int *my_s_a_col_ids = &s_a_col_ids[threadIdx.x - lane_id_mod_NxN]; volatile int *my_s_a_col_its = &s_a_col_its[threadIdx.x - lane_id_mod_NxN]; // Shared memory to store the matrices. __shared__ volatile Vector_type s_A_mtx[CTA_SIZE]; __shared__ volatile Vector_type s_B_mtx[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_A_mtx = &s_A_mtx[threadIdx.x - lane_id_mod_NxN]; volatile Vector_type *my_s_B_mtx = &s_B_mtx[threadIdx.x - lane_id_mod_NxN]; // Shared memory to store the index of the element Aji. __shared__ volatile int s_A_ji[NUM_WARPS_PER_CTA * NUM_ITEMS_PER_WARP_CEIL]; // Each thread keeps its own pointer. volatile int *my_s_A_ji = &s_A_ji[warp_id * NUM_ITEMS_PER_WARP_CEIL + lane_id_div_NxN]; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN; // Iterate over the rows of the matrix. One warp per row. for ( ; utils::any( a_row_it < num_rows_per_color ) ; a_row_it += NUM_ITEMS_PER_GRID ) { // Is the thread active? For example, for 5x5 only the first 25 threads are active per warp. // At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals // to false ; that's the common trick to determine if a number is a power of 2). int is_active = true; if ( NxN & (NxN - 1) ) { is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP; } int a_row_id = -1; if ( is_active && a_row_it < num_rows_per_color ) { a_row_id = sorted_rows_by_color[a_row_it]; } // Load the diagonal. Vector_type e_out(0); if ( a_row_id != -1 ) { e_out = A_vals[NxN * A_diag[a_row_id] + lane_id_mod_NxN]; } // Skip the 1st iteration of the outer-loop (that loop runs on the host). if ( current_color != 0 ) { // Ranges of the rows. int a_col_begin(0), a_col_end(0); if ( a_row_id != -1 ) { a_col_begin = A_rows[a_row_id ]; a_col_end = A_rows[a_row_id + 1]; } // Iterate over the elements in the columns. for ( ; a_col_begin < a_col_end ; a_col_begin += NxN ) { unsigned int active_mask = utils::activemask(); // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id_mod_NxN; // The identifier of the column if the iterator is valid. int a_col_tmp = -1, a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_tmp = A_cols[a_col_it]; } if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color ) { a_col_id = a_col_tmp; } // When the diagonal is stored inside the matrix, we have to reject it. We // could be using a template parameter but it's not needed since that // rejection is really cheap (a couple of extra cycles -- CMP+MOV). if ( a_col_id == a_row_id ) { a_col_id = -1; } // We partition valid and invalid column ids. Valid ones come first. int vote = utils::ballot( a_col_id != -1, active_mask ) & mask_NxN; int ones = __popc( vote ); int dest = __popc( vote & utils::lane_mask_lt() ); if ( a_col_id == -1 ) { dest = ones + lane_id_mod_NxN - dest; } my_s_a_col_ids[dest] = a_col_id; my_s_a_col_its[dest] = a_col_it; // Threads collaborate to load the rows. for ( int k = 0 ; k < NxN ; ++k ) { // Exchange column indices. const int uniform_a_col_id = my_s_a_col_ids[k]; // Early exit. if ( utils::all( uniform_a_col_id == -1, active_mask ) ) { break; } // Load the iterator. const int uniform_a_col_it = my_s_a_col_its[k]; // Load the two matrices. Vector_type my_A(0), my_B(0); if ( uniform_a_col_id != -1 ) { my_A = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN]; my_B = Einv [NxN * uniform_a_col_id + lane_id_mod_NxN]; } my_s_A_mtx[lane_id_mod_NxN] = my_A; my_s_B_mtx[lane_id_mod_NxN] = my_B; utils::syncwarp(active_mask); // Compute the product of matrices. Vector_type tmp(0); #pragma unroll for ( int m = 0 ; m < N ; ++m ) { tmp += my_s_A_mtx[N * lane_id_mod_NxN_div_N + m] * my_s_B_mtx[N * m + lane_id_mod_NxN_mod_N]; } my_s_A_mtx[lane_id_mod_NxN] = tmp; // We looking for columns in the two rows we're interested in. int b_col_it(0), b_col_end(0); if ( is_active && uniform_a_col_id != -1 ) { b_col_it = A_rows[uniform_a_col_id ]; b_col_end = A_rows[uniform_a_col_id + 1]; } // Init the marker to -1. if ( lane_id_mod_NxN == 0 ) { *my_s_A_ji = -1; } // Run the loop. b_col_it += lane_id_mod_NxN; int shared_found = utils::ballot( lane_id_mod_NxN == 0 && uniform_a_col_id == -1, active_mask ); do { bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id; if ( found ) { *my_s_A_ji = b_col_it; } shared_found = shared_found | utils::ballot(found, active_mask); b_col_it += NxN; } while ( __popc( shared_found ) < NUM_ITEMS_PER_WARP && utils::any( b_col_it < b_col_end, active_mask ) ); // Load the blocks. const int w_aji = *my_s_A_ji; Vector_type my_C(0); if ( w_aji != -1 ) { my_C = A_vals[NxN * w_aji + lane_id_mod_NxN]; } my_s_B_mtx[lane_id_mod_NxN] = my_C; // Update e_out. #pragma unroll for ( int m = 0 ; m < N ; ++m ) { e_out -= my_s_A_mtx[N * lane_id_mod_NxN_div_N + m] * my_s_B_mtx[N * m + lane_id_mod_NxN_mod_N]; } } } // a_col_begin < a_col_end } // current_color != 0 // Store e_out in A my_s_A_mtx[lane_id_mod_NxN] = e_out; // Invert the matrices. #pragma unroll for ( int row = 0 ; row < N ; ++row ) { Vector_type diag(0), diag_tmp = my_s_A_mtx[N * row + row]; if ( isNotCloseToZero(diag_tmp) ) { diag = Vector_type(1) / diag_tmp; } else { diag = Vector_type(1) / epsilon(diag_tmp); } if ( is_active && lane_id_mod_NxN_div_N == 0 && lane_id_mod_NxN_mod_N != row ) { my_s_A_mtx[N * row + lane_id_mod_NxN_mod_N] *= diag; } if ( is_active && lane_id_mod_NxN_div_N != row && lane_id_mod_NxN_mod_N != row ) { my_s_A_mtx[lane_id_mod_NxN] -= my_s_A_mtx[N * lane_id_mod_NxN_div_N + row] * my_s_A_mtx[N * row + lane_id_mod_NxN_mod_N]; } if ( is_active && lane_id_mod_NxN_div_N == 0 ) { Vector_type tmp = diag; if ( lane_id_mod_NxN_mod_N != row ) { tmp = -my_s_A_mtx[N * lane_id_mod_NxN_mod_N + row] * diag; } my_s_A_mtx[N * lane_id_mod_NxN_mod_N + row] = tmp; } } // Store the results to Einv. if ( a_row_id != -1 ) { Einv[NxN * a_row_id + lane_id_mod_NxN] = my_s_A_mtx[lane_id_mod_NxN]; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 16 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 16 ) #endif void DILU_setup_1x1_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const int *__restrict A_diag, const Matrix_type *__restrict A_vals, Matrix_type *__restrict Einv, const int *sorted_rows_by_color, const int *row_colors, const int num_rows_per_color, const int current_color ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Number of items per grid. const int NUM_WARPS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_NTPR = lane_id / NUM_THREADS_PER_ROW; const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW; // Shared memory to broadcast column IDs. __shared__ int s_a_col_ids[CTA_SIZE]; // Each thread keeps its own pointer. int *my_s_a_col_ids = &s_a_col_ids[warp_id * WARP_SIZE]; // Shared memory to store the matrices. __shared__ int s_A_ji[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. int *my_s_A_ji = &s_A_ji[warp_id * WARP_SIZE]; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_WARPS_PER_CTA + warp_id; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_WARPS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load the diagonal. Vector_type e_out(0); // Skip the 1st iteration of the outer-loop (that loop runs on the host). if ( current_color != 0 ) { // Ranges of the row. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // Iterate over the elements in the columns. for ( ; a_col_begin < a_col_end ; a_col_begin += WARP_SIZE ) { // Each thread loads a single element. int a_col_it = a_col_begin + lane_id; // The identifier of the column if the iterator is valid. int a_col_tmp = -1, a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_tmp = A_cols[a_col_it]; } if ( a_col_tmp != -1 && row_colors[a_col_tmp] < current_color ) { a_col_id = a_col_tmp; } // When the diagonal is stored inside the matrix, we have to reject it. We // could be using a template parameter but it's not needed since that // rejection is really cheap (a couple of extra cycles -- CMP+MOV). if ( a_col_id == a_row_id ) { a_col_id = -1; } // We partition valid and invalid column ids. Valid ones come first. int vote = utils::ballot( a_col_id != -1 ); int ones = __popc( vote ); int dest = __popc( vote & utils::lane_mask_lt() ); if ( a_col_id == -1 ) { dest = ones + lane_id - dest; } my_s_a_col_ids[dest] = a_col_id; // Reset A_jis. my_s_A_ji[lane_id] = -1; __syncwarp(); // Threads collaborate to load the rows. for ( int k = 0 ; k < ones ; k += WARP_SIZE / NUM_THREADS_PER_ROW ) { const int local_k = k + lane_id_div_NTPR; // Exchange column indices. int uniform_a_col_id = -1; if ( local_k < ones ) { uniform_a_col_id = my_s_a_col_ids[local_k]; } // We look for columns in the rows we're interested in. int b_col_it(0), b_col_end(0); if ( uniform_a_col_id != -1 ) { b_col_it = A_rows[uniform_a_col_id ]; b_col_end = A_rows[uniform_a_col_id + 1]; } // Run the loop. b_col_it += lane_id_mod_NTPR; int shared_found = utils::ballot( lane_id_mod_NTPR == 0 && uniform_a_col_id == -1 ); do { bool found = b_col_it < b_col_end && A_cols[b_col_it] == a_row_id; if ( found ) { my_s_A_ji[local_k] = b_col_it; } shared_found = shared_found | utils::ballot(found); b_col_it += NUM_THREADS_PER_ROW; } while ( __popc( shared_found ) < WARP_SIZE / NUM_THREADS_PER_ROW && utils::any( b_col_it < b_col_end ) ); } __syncwarp(); // Where to get my A_ji from (if any). int a_ji_it = my_s_A_ji[dest]; // Grab A_jis. Matrix_type a_ji(0); if ( a_ji_it != -1 ) { a_ji = A_vals[a_ji_it]; } // Update e_out. if ( a_col_id != -1 ) { e_out += a_ji * Einv[a_col_id] * A_vals[a_col_it]; } } // a_col_begin < a_col_end } // current_color != 0 // Reduce the e_outs in one value. #pragma unroll for ( int mask = WARP_SIZE / 2 ; mask > 0 ; mask >>= 1 ) { e_out += utils::shfl_xor( e_out, mask ); } // Store the result. if ( lane_id == 0 ) { Matrix_type res = A_vals[A_diag[a_row_id]] - e_out; if ( res != Matrix_type(0) ) { res = Matrix_type(1) / res; } Einv[a_row_id] = static_cast<Vector_type>(res); } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< int N, bool ROW_MAJOR, int WARP_SIZE, typename Value_type > static __device__ __forceinline__ Value_type reduce_distributed_vectors( Value_type x, int is_leader ) { if ( N & (N - 1) ) { #pragma unroll for ( int i = 1 ; i < N ; ++i ) { Value_type other_x = utils::shfl_down( x, ROW_MAJOR ? i : N * i ); if ( is_leader ) { x += other_x; } } } else { #pragma unroll for ( int i = 1 ; i < N ; i <<= 1 ) { x += utils::shfl_xor( x, ROW_MAJOR ? i : N * i ); } } return x; } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, bool HAS_EXTERNAL_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_forward_NxN_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_NxN = lane_id / NxN; const int lane_id_mod_NxN = lane_id % NxN; // Useful index to compute matrix products. const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N; const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N; // We to get my data from when I use SHFL. const int shfl_offset = lane_id - lane_id_mod_NxN; // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_NxN]; // Is the thread active? For example, for 5x5 only the first 25 threads are active per warp. // At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals // to false ; that's the common trick to determine if a number is a power of 2). int is_active = true; if ( NxN & (NxN - 1) ) { is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP; } // Determine which NxN block the threads work with. int a_row_it = num_rows_per_color; if ( is_active ) { a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN; } // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_bmAx(0); if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { my_bmAx = __cachingLoad(&b[N * a_row_id + lane_id_mod_NxN_div_N]); } } else { if ( lane_id_mod_NxN_div_N == 0 ) { my_bmAx = b[N * a_row_id + lane_id_mod_NxN_mod_N]; } } // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If the diagonal is stored separately, we have a special treatment. int a_col_max = a_col_end; if ( HAS_EXTERNAL_DIAG ) { ++a_col_max; } // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += NxN ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id_mod_NxN; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end ) { a_col_id = a_row_id; } // Determine if the color is valid. int a_col_is_valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == FIRST ) { a_col_is_valid = a_col_id >= boundary_index; } else { a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color; } } #else if ( a_col_id != -1 && current_color != 0 ) { a_col_is_valid = row_colors[a_col_id] < current_color; } #endif // Count the number of active columns. // int vote = utils::ballot(aColId != -1); // The number of iterations. // int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) ); // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < NxN ; k += N ) { int my_k = k + lane_id_mod_NxN_div_N; // Load N blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k ); int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k ); Vector_type my_x(0); if ( uniform_a_col_id != -1 ) { my_x = __cachingLoad(&x[N * uniform_a_col_id + lane_id_mod_NxN_mod_N]); } if ( uniform_a_col_id != -1 && uniform_a_col_is_valid ) { my_x += delta[N * uniform_a_col_id + lane_id_mod_NxN_mod_N]; } my_s_mem[lane_id_mod_NxN] = my_x; // Load N blocks of A. #pragma unroll for ( int i = 0 ; i < N ; ++i ) { int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } if ( HAS_EXTERNAL_DIAG && is_active && uniform_a_col_tmp == a_col_end ) { uniform_a_col_it = A_diag[a_row_id]; } Matrix_type my_val(0); if ( uniform_a_col_it != -1 ) { my_val = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN]; } if ( ROW_MAJOR ) { my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_mod_N]; } else { my_bmAx -= my_val * my_s_mem[N * i + lane_id_mod_NxN_div_N]; } } } // Loop over k } // Loop over aColIt // Load Einvs. Vector_type my_Einv = Einv[NxN * a_row_id + lane_id_mod_NxN]; // Reduce bmAx terms. int is_leader = lane_id_mod_NxN_div_N == 0; if ( ROW_MAJOR ) { is_leader = lane_id_mod_NxN_mod_N == 0; } my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader ); // Update the shared terms. if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { my_s_mem[lane_id_mod_NxN_div_N] = my_bmAx; } } else { if ( lane_id_mod_NxN_div_N == 0 ) { my_s_mem[lane_id_mod_NxN_mod_N] = my_bmAx; } } // Update the diagonal term. if ( ROW_MAJOR ) { my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_mod_N]; } else { my_bmAx = my_Einv * my_s_mem[lane_id_mod_NxN_div_N]; } // Reduce bmAx terms. my_bmAx = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader ); // Store the results. if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { delta[N * a_row_id + lane_id_mod_NxN_div_N] = my_bmAx; } } else { if ( lane_id_mod_NxN_div_N == 0 ) { delta[N * a_row_id + lane_id_mod_NxN_mod_N] = my_bmAx; } } } } template< typename Matrix_type, typename Vector_type, int N, int CTA_SIZE, int WARP_SIZE, bool HAS_EXTERNAL_DIAG, int NUM_WARP_ITERS_PER_BLOCK > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_forward_NxN_kernel_large( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of rows computed per CTA. const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA; // Number of rows? per grid. const int NUM_ITEMS_PER_GRID = CTA_SIZE; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. // Useful index to compute matrix products. const int lane_id_div_N = lane_id / N; const int lane_id_mod_N = lane_id % N; // id of a lane inside the block const int blocks_per_warp = WARP_SIZE / N; // we process this cols per warp per row const int row_elems_per_warp = blocks_per_warp * N; // Shared to store bmAx __shared__ volatile Vector_type bmAx[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK]; volatile Vector_type *my_bmAx_s = &bmAx[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE]; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_bmAx(0); if ( lane_id < N ) { my_bmAx = __cachingLoad(&b[N * a_row_id + lane_id]); } #pragma unroll for (int i = 0; i < NUM_WARP_ITERS_PER_BLOCK; i++) { my_bmAx_s[WARP_SIZE * i + lane_id] = 0.0; } // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If the diagonal is stored separately, we have a special treatment. int a_col_max = a_col_end; if ( HAS_EXTERNAL_DIAG ) { ++a_col_max; } // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += WARP_SIZE ) // NxN { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end ) { a_col_id = a_row_id; } // Determine if the color is valid. int a_col_is_valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == FIRST ) { a_col_is_valid = a_col_id >= boundary_index; } else { a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color; } } #else if ( a_col_id != -1 && current_color != 0 ) { a_col_is_valid = row_colors[a_col_id] < current_color; } #endif // Loop over columns. We compute blocks_per_warp columns per iteration. for ( int k = 0 ; k < WARP_SIZE ; k += blocks_per_warp ) { // id of the processed block by this thread int my_k = k + lane_id_div_N; // Load N blocks of X (if valid) int uniform_a_col_id = utils::shfl( a_col_id, my_k ); int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, my_k ); Vector_type my_x(0); if ( uniform_a_col_id != -1 && lane_id < row_elems_per_warp) { my_x = __cachingLoad(&x[N * uniform_a_col_id + lane_id_mod_N]); } if ( uniform_a_col_id != -1 && uniform_a_col_is_valid && lane_id < row_elems_per_warp) { my_x += delta[N * uniform_a_col_id + lane_id_mod_N]; } //my_s_mem[lane_id] = my_x; #pragma unroll for ( int i = 0 ; i < blocks_per_warp ; ++i ) { // k-th batch of blocks, i-th block. each thread process a column/row of a_it = uniform_a_col_tmp int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; // check if we are going out of bounds/color if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end ) { uniform_a_col_it = A_diag[a_row_id]; } // swipe with the whole warp if (uniform_a_col_it != -1) { int block_inside_id = lane_id; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { Matrix_type my_val(0); if ( uniform_a_col_it != -1 && block_inside_id < NxN) { my_val = A_vals[NxN * uniform_a_col_it + block_inside_id]; } my_bmAx_s[block_inside_id] -= my_val * utils::shfl(my_x, N * i + block_inside_id % N); // MOD IS SLOW! block_inside_id += WARP_SIZE; } } } } // Loop over k } // Loop over aColIt // Load Einvs. Vector_type my_Einv[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { my_Einv[j] = 0.0; } #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { if ((WARP_SIZE * j + lane_id) < NxN) { my_Einv[j] = Einv[NxN * a_row_id + WARP_SIZE * j + lane_id]; } } // Reduce bmAx terms. { #pragma unroll for ( int i = 0 ; i < N ; ++i ) { if ( lane_id < N ) { my_bmAx += my_bmAx_s[N * lane_id + i]; } } } // Update the diagonal term. int block_inside_id = lane_id; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { my_bmAx_s[block_inside_id] = my_Einv[j] * utils::shfl(my_bmAx, block_inside_id % N); block_inside_id += WARP_SIZE; } // Reduce bmAx terms. { my_bmAx = 0.0; #pragma unroll for ( int i = 0 ; i < N ; ++i ) { int idx = N * lane_id + i; if ( lane_id < N ) { my_bmAx += my_bmAx_s[idx]; } } } // Store the results. if ( lane_id < N ) { delta[N * a_row_id + lane_id] = my_bmAx; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, bool HAS_EXTERNAL_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_forward_4x4_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Number of items per warp. const int NUM_ITEMS_PER_WARP = WARP_SIZE / 16; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_mod_16 = lane_id % 16; // Useful index to compute matrix products. const int lane_id_mod_16_div_4 = lane_id_mod_16 / 4; const int lane_id_mod_16_mod_4 = lane_id_mod_16 % 4; // We to get my data from when I use SHFL. const int shfl_offset = lane_id - lane_id_mod_16; // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_16]; // Determine which 16 block the threads work with. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + threadIdx.x / 16; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_bmAx(0); if ( ROW_MAJOR ) { if ( lane_id_mod_16_mod_4 == 0 ) { my_bmAx = __cachingLoad(&b[4 * a_row_id + lane_id_mod_16_div_4]); } } else { if ( lane_id_mod_16_div_4 == 0 ) { my_bmAx = b[4 * a_row_id + lane_id_mod_16_mod_4]; } } // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If the diagonal is stored separately, we have a special treatment. int a_col_max = a_col_end; if ( HAS_EXTERNAL_DIAG ) { ++a_col_max; } // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_max ) ; a_col_begin += 16 ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id_mod_16; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end ) { a_col_id = a_row_id; } // Determine if the color is valid. int a_col_is_valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == FIRST ) { a_col_is_valid = a_col_id >= boundary_index; } else { a_col_is_valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color; } } #else if ( a_col_id != -1 && current_color != 0 ) { a_col_is_valid = row_colors[a_col_id] < current_color; } #endif // Count the number of active columns. // int vote = utils::ballot(aColId != -1); // The number of iterations. // int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) ); // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < 16 ; k += 4 ) { int my_k = k + lane_id_mod_16_div_4; // Load N blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k ); int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k ); Vector_type my_x(0); if ( uniform_a_col_id != -1 ) { my_x = __cachingLoad(&x[4 * uniform_a_col_id + lane_id_mod_16_mod_4]); } if ( uniform_a_col_id != -1 && uniform_a_col_is_valid ) { my_x += delta[4 * uniform_a_col_id + lane_id_mod_16_mod_4]; } my_s_mem[lane_id_mod_16] = my_x; // Load N blocks of A. #pragma unroll for ( int i = 0 ; i < 4 ; ++i ) { int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end ) { uniform_a_col_it = A_diag[a_row_id]; } Matrix_type my_val(0); if ( uniform_a_col_it != -1 ) { my_val = A_vals[16 * uniform_a_col_it + lane_id_mod_16]; } if ( ROW_MAJOR ) { my_bmAx -= my_val * my_s_mem[4 * i + lane_id_mod_16_mod_4]; } else { my_bmAx -= my_val * my_s_mem[4 * i + lane_id_mod_16_div_4]; } } } // Loop over k } // Loop over aColIt // Load Einvs. Vector_type my_Einv = Einv[16 * a_row_id + lane_id_mod_16]; // Reduce bmAx terms. int is_leader = lane_id_mod_16_div_4 == 0; if ( ROW_MAJOR ) { is_leader = lane_id_mod_16_mod_4 == 0; } my_bmAx = reduce_distributed_vectors<4, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader ); // Update the shared terms. if ( ROW_MAJOR ) { if ( lane_id_mod_16_mod_4 == 0 ) { my_s_mem[lane_id_mod_16_div_4] = my_bmAx; } } else { if ( lane_id_mod_16_div_4 == 0 ) { my_s_mem[lane_id_mod_16_mod_4] = my_bmAx; } } // Update the diagonal term. if ( ROW_MAJOR ) { my_bmAx = my_Einv * my_s_mem[lane_id_mod_16_mod_4]; } else { my_bmAx = my_Einv * my_s_mem[lane_id_mod_16_div_4]; } // Reduce bmAx terms. my_bmAx = reduce_distributed_vectors<4, ROW_MAJOR, WARP_SIZE>( my_bmAx, is_leader ); // Store the results. if ( ROW_MAJOR ) { if ( lane_id_mod_16_mod_4 == 0 ) { delta[4 * a_row_id + lane_id_mod_16_div_4] = my_bmAx; } } else { if ( lane_id_mod_16_div_4 == 0 ) { delta[4 * a_row_id + lane_id_mod_16_mod_4] = my_bmAx; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int CTA_SIZE, bool HAS_EXTERNAL_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_forward_4x4_kernel_row_major_vec4( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *Einv, const ColoringType boundary_coloring, const int boundary_index ) { // Number of half warps per CTA. const int NUM_HALF_WARPS = CTA_SIZE / 16; // Coordinates of the thread. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Coordinates of the thread in the CTA. const int thread_id_div_16 = threadIdx.x / 16; const int thread_id_mod_16 = threadIdx.x % 16; // Useful constants. const int thread_id_mod_16_div_4 = thread_id_mod_16 / 4; const int thread_id_mod_16_mod_4 = thread_id_mod_16 % 4; const int shfl_offset = 16 * (lane_id / 16); // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[16 * thread_id_div_16]; // The iterator over rows. int a_row_it = blockIdx.x * NUM_HALF_WARPS + thread_id_div_16; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_HALF_WARPS ) { unsigned int active_mask = utils::activemask(); int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_bmAx(0); if ( thread_id_mod_16_div_4 == 0 ) { my_bmAx = __cachingLoad(&b[4 * a_row_id + thread_id_mod_16_mod_4]); } // The range of the row. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If it has an external diagonal, we need one more item to put the diag. int a_col_max = a_col_end; if ( HAS_EXTERNAL_DIAG ) { ++a_col_max; } // Each warp load column indices of 32 nonzero blocks for ( ; a_col_begin < a_col_max ; a_col_begin += 16 ) { unsigned int active_mask_inner = utils::activemask(); int a_col_it = a_col_begin + thread_id_mod_16; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = __cachingLoad(&A_cols[a_col_it]); } if ( HAS_EXTERNAL_DIAG && a_col_it == a_col_end ) { a_col_id = a_row_id; } // Determine if the color is valid. int a_col_is_valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == FIRST ) { a_col_is_valid = a_col_id >= boundary_index; } else { a_col_is_valid = a_col_id < boundary_index && __cachingLoad(&row_colors[a_col_id]) < current_color; } } #else if ( a_col_id != -1 && current_color != 0 ) { a_col_is_valid = row_colors[a_col_id] < current_color; } #endif // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < 16 ; k += 4 ) { int my_k = k + thread_id_mod_16_div_4; // Load 8 blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k, warpSize, active_mask_inner ); int uniform_a_col_is_valid = utils::shfl( a_col_is_valid, shfl_offset + my_k, warpSize, active_mask_inner ); Vector_type my_x(0); if ( uniform_a_col_id != -1 ) { my_x = __cachingLoad(&x[4 * uniform_a_col_id + thread_id_mod_16_mod_4]); } if ( uniform_a_col_id != -1 && uniform_a_col_is_valid ) { my_x += delta[4 * uniform_a_col_id + thread_id_mod_16_mod_4]; } my_s_mem[thread_id_mod_16] = my_x; int uniform_a_col_tmp = a_col_begin + my_k, uniform_a_col_it = -1; if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } if ( HAS_EXTERNAL_DIAG && uniform_a_col_tmp == a_col_end ) { uniform_a_col_it = A_diag[a_row_id]; } Matrix_type my_vals[4] = { Matrix_type(0) }; if ( uniform_a_col_it != -1 ) { utils::load_vec4( my_vals, &A_vals[16 * uniform_a_col_it + 4 * thread_id_mod_16_mod_4] ); } my_bmAx -= my_vals[0] * my_s_mem[4 * thread_id_mod_16_div_4 + 0]; my_bmAx -= my_vals[1] * my_s_mem[4 * thread_id_mod_16_div_4 + 1]; my_bmAx -= my_vals[2] * my_s_mem[4 * thread_id_mod_16_div_4 + 2]; my_bmAx -= my_vals[3] * my_s_mem[4 * thread_id_mod_16_div_4 + 3]; } } // Load Einvs. Matrix_type my_Einv = Einv[16 * a_row_id + thread_id_mod_16]; // Reduce bmAx terms. my_bmAx += utils::shfl_xor( my_bmAx, 4, warpSize, active_mask ); my_bmAx += utils::shfl_xor( my_bmAx, 8, warpSize, active_mask ); // Update the shared terms. if ( thread_id_mod_16_div_4 == 0 ) { my_s_mem[thread_id_mod_16_mod_4] = my_bmAx; } // Update the diagonal term. my_bmAx = my_Einv * my_s_mem[thread_id_mod_16_mod_4]; // Reduce bmAx terms. my_bmAx += utils::shfl_xor( my_bmAx, 1, warpSize, active_mask ); my_bmAx += utils::shfl_xor( my_bmAx, 2, warpSize, active_mask ); // Store the results. if ( thread_id_mod_16_mod_4 == 0 ) { delta[4 * a_row_id + thread_id_mod_16_div_4] = my_bmAx; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE, bool HAS_EXTERNAL_DIAG > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_forward_1x1_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index ) { // Number of items per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW; // Number of items per grid. const int NUM_ROWS_PER_GRID = gridDim.x * NUM_ROWS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ROWS_PER_CTA + (threadIdx.x / NUM_THREADS_PER_ROW); // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ROWS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_bmAx = amgx::types::util<Vector_type>::get_zero(); if ( lane_id_mod_NTPR == 0 ) { my_bmAx = __cachingLoad(&b[a_row_id]); } // If it has an external diag. if ( HAS_EXTERNAL_DIAG && lane_id_mod_NTPR == 0 ) { my_bmAx -= A_vals[A_diag[a_row_id]] * x[a_row_id]; } // Don't do anything if X is zero. int a_col_it = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // If the diagonal is stored separately, we have a special treatment. //if( HAS_EXTERNAL_DIAG ) // ++a_col_end; // Each warp load column indices of 32 nonzero blocks for ( a_col_it += lane_id_mod_NTPR ; utils::any( a_col_it < a_col_end ) ; a_col_it += NUM_THREADS_PER_ROW ) { // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } // Ignore the diagonal element since its color is smaller, and been accounted for above if (HAS_EXTERNAL_DIAG && a_col_id == a_row_id) { a_col_id = -1; } // Load x. Vector_type my_x(0); if ( a_col_id != -1 ) { my_x = __cachingLoad(&x[a_col_id]); } // Is it really a valid column (due to coloring). int valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == FIRST ) { valid = a_col_id >= boundary_index; } else { valid = a_col_id < boundary_index && row_colors[a_col_id] < current_color; } } #else if ( a_col_id != -1 && current_color != 0 ) { valid = row_colors[a_col_tmp] < current_color; } #endif // Load my x value. if ( valid ) { my_x += delta[a_col_id]; } // Load my item from A. Matrix_type my_val(0); if ( a_col_it < a_col_end ) { my_val = A_vals[a_col_it]; } // Update bmAx. my_bmAx -= my_val * my_x; } // Reduce bmAx terms. #pragma unroll for ( int mask = NUM_THREADS_PER_ROW / 2 ; mask > 0 ; mask >>= 1 ) { my_bmAx += utils::shfl_xor( my_bmAx, mask ); } // Store the results. if ( lane_id_mod_NTPR == 0 ) { delta[a_row_id] = Einv[a_row_id] * my_bmAx; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_backward_NxN_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ITEMS_PER_WARP = WARP_SIZE / NxN; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_ITEMS_PER_WARP * NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_ITEMS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_NxN = lane_id / NxN; const int lane_id_mod_NxN = lane_id % NxN; // Useful index to compute matrix products. const int lane_id_mod_NxN_div_N = lane_id_mod_NxN / N; const int lane_id_mod_NxN_mod_N = lane_id_mod_NxN % N; // We to get my data from when I use SHFL. const int shfl_offset = lane_id - lane_id_mod_NxN; // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[threadIdx.x - lane_id_mod_NxN]; // Is the thread active? For example, for 5x5 only the first 25 threads are active per warp. // At compile time, the compiler will see is_active == true for 2x2 (since NxN & (NxN-1) evals // to false ; that's the common trick to determine if a number is a power of 2). int is_active = true; if ( NxN & (NxN - 1) ) { is_active = lane_id_div_NxN < NUM_ITEMS_PER_WARP; } // Determine which NxN block the threads work with. int a_row_it = num_rows_per_color; if ( is_active ) { a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id * NUM_ITEMS_PER_WARP + lane_id_div_NxN; } // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_delta(0); // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_end ) ; a_col_begin += NxN ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id_mod_NxN; // Get the ID of the column. int a_col_tmp = -1, a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_tmp = A_cols[a_col_it]; } // Make sure the column is interesting. #ifdef AMGX_ILU_COLORING int valid = false; if ( a_col_tmp != -1 && current_color != 0 ) { if ( boundary_coloring == LAST ) { valid = a_col_tmp >= boundary_index; } else { valid = a_col_tmp < boundary_index && row_colors[a_col_tmp] > current_color; } } #else int valid = false; if ( a_col_tmp != -1 && row_colors[a_col_tmp] > current_color ) { valid = true; } #endif // Set the column id. if ( valid ) { a_col_id = a_col_tmp; } // Count the number of active columns. // int vote = utils::ballot(aColId != -1); // The number of iterations. // int nCols = max( __popc( vote & 0x0000ffff ), __popc( vote & 0xffff0000 ) ); // Loop over columns. We compute 8 columns per iteration. for ( int k = 0 ; k < NxN ; k += N ) { int my_k = k + lane_id_mod_NxN_div_N; // Load N blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k ); Vector_type my_x(0); if ( uniform_a_col_id != -1 ) { my_x = Delta[N * uniform_a_col_id + lane_id_mod_NxN_mod_N]; } my_s_mem[lane_id_mod_NxN] = my_x; // Load N blocks of A. #pragma unroll for ( int i = 0 ; i < N ; ++i ) { //if( uniform_a_col_id == -1 ) // break; int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } Matrix_type my_val(0); if ( uniform_a_col_it != -1 ) { my_val = A_vals[NxN * uniform_a_col_it + lane_id_mod_NxN]; } if ( ROW_MAJOR ) { my_delta += my_val * my_s_mem[N * i + lane_id_mod_NxN_mod_N]; } else { my_delta += my_val * my_s_mem[N * i + lane_id_mod_NxN_div_N]; } } } // Loop over k } // Loop over aColIt // Load Einvs. Matrix_type my_Einv = Einv[NxN * a_row_id + lane_id_mod_NxN]; // Reduce bmAx terms. int is_leader = lane_id_mod_NxN_div_N == 0; if ( ROW_MAJOR ) { is_leader = lane_id_mod_NxN_mod_N == 0; } my_delta = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_delta, is_leader ); // Update the shared terms. if ( ROW_MAJOR ) { if ( lane_id_mod_NxN_mod_N == 0 ) { my_s_mem[lane_id_mod_NxN_div_N] = my_delta; } } else { if ( lane_id_mod_NxN_div_N == 0 ) { my_s_mem[lane_id_mod_NxN_mod_N] = my_delta; } } // Update the diagonal term. if ( ROW_MAJOR ) { my_delta = my_Einv * my_s_mem[lane_id_mod_NxN_mod_N]; } else { my_delta = my_Einv * my_s_mem[lane_id_mod_NxN_div_N]; } // Reduce bmAx terms. my_delta = reduce_distributed_vectors<N, ROW_MAJOR, WARP_SIZE>( my_delta, is_leader ); // Store the results. if ( ROW_MAJOR ) { const int offset = N * a_row_id + lane_id_mod_NxN_div_N; Vector_type my_b(0), my_x(0); if ( lane_id_mod_NxN_mod_N == 0 ) { my_b = __cachingLoad(&delta[offset]); my_x = x [offset]; } my_delta = my_b - my_delta; if ( lane_id_mod_NxN_mod_N == 0 ) { x [offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } else { const int offset = N * a_row_id + lane_id_mod_NxN_mod_N; Vector_type my_b(0), my_x(0); if ( lane_id_mod_NxN_div_N == 0 ) { my_b = __cachingLoad(&delta[offset]); my_x = x [offset]; } my_delta = my_b - my_delta; if ( lane_id_mod_NxN_div_N == 0 ) { x [offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE, int WARP_SIZE, bool ROW_MAJOR, int NUM_WARP_ITERS_PER_BLOCK > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_backward_NxN_kernel_large( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items computer per CTA. const int NUM_ITEMS_PER_CTA = NUM_WARPS_PER_CTA; // Number of items per grid. const int NUM_ITEMS_PER_GRID = gridDim.x * NUM_WARPS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_div_N = lane_id / N; const int lane_id_mod_N = lane_id % N; // id of a lane inside the block const int blocks_per_warp = WARP_SIZE / N; // we process this cols per warp per row const int row_elems_per_warp = blocks_per_warp * N; // Shared to store t_delta __shared__ volatile Vector_type delta_s[CTA_SIZE * NUM_WARP_ITERS_PER_BLOCK]; volatile Vector_type *my_delta_s = &delta_s[warp_id * NUM_WARP_ITERS_PER_BLOCK * WARP_SIZE]; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + warp_id; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ITEMS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Accumulator Vector_type my_delta(0); //Vector_type mAx[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int i = 0; i < NUM_WARP_ITERS_PER_BLOCK; i++) { my_delta_s[WARP_SIZE * i + lane_id] = 0.0; } // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // Each warp load column indices of 32 nonzero blocks for ( ; utils::any( a_col_begin < a_col_end ) ; a_col_begin += WARP_SIZE ) { // Each thread loads a single element. If !is_active, a_col_end == 0. int a_col_it = a_col_begin + lane_id; // Get the ID of the column. int a_col_tmp = -1, a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_tmp = A_cols[a_col_it]; } // Make sure the column is interesting. #ifdef AMGX_ILU_COLORING int valid = false; if ( a_col_tmp != -1 && current_color != 0 ) { if ( boundary_coloring == LAST ) { valid = a_col_tmp >= boundary_index; } else { valid = a_col_tmp < boundary_index && row_colors[a_col_tmp] > current_color; } } #else int valid = false; if ( a_col_tmp != -1 && row_colors[a_col_tmp] > current_color ) { valid = true; } #endif // Set the column id. if ( valid ) { a_col_id = a_col_tmp; } // Loop over columns. We compute blocks_per_warp columns per iteration. for ( int k = 0 ; k < WARP_SIZE ; k += blocks_per_warp ) { // id of the processed block by this thread int my_k = k + lane_id_div_N; // Load N blocks of X (if valid) int uniform_a_col_id = utils::shfl( a_col_id, my_k ); Vector_type my_x(0); if ( uniform_a_col_id != -1 && lane_id < row_elems_per_warp) { my_x = Delta[N * uniform_a_col_id + lane_id_mod_N]; } // Load blocks of A. // for each block in a batch #pragma unroll for ( int i = 0 ; i < blocks_per_warp ; ++i ) { // k-th batch of blocks, i-th block. each thread process a column/row of a_it = uniform_a_col_tmp int uniform_a_col_tmp = a_col_begin + k + i, uniform_a_col_it = -1; // check if we are going out of bounds/color if ( uniform_a_col_tmp < a_col_end ) { uniform_a_col_it = uniform_a_col_tmp; } // swipe with the whole warp if (uniform_a_col_it != -1) { int block_inside_id = lane_id; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { Matrix_type my_val(0); if ( uniform_a_col_it != -1 && block_inside_id < NxN) { my_val = A_vals[NxN * uniform_a_col_it + block_inside_id]; } my_delta_s[block_inside_id] -= my_val * utils::shfl(my_x, N * i + block_inside_id % N); //my_s_mem[N*i + block_inside_id % N]; // MOD IS SLOW! block_inside_id += WARP_SIZE; } } } } // Loop over k } // Loop over aColIt // Load Einvs. Vector_type my_Einv[NUM_WARP_ITERS_PER_BLOCK]; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { my_Einv[j] = 0.0; } #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { if ((WARP_SIZE * j + lane_id) < NxN) { my_Einv[j] = Einv[NxN * a_row_id + WARP_SIZE * j + lane_id]; } } // Reduce bmAx terms. { #pragma unroll for ( int i = 0 ; i < N ; ++i ) { if ( lane_id < N ) { my_delta += my_delta_s[N * lane_id + i]; } } } // Update the diagonal term. if ( ROW_MAJOR ) { int block_inside_id = lane_id; #pragma unroll for (int j = 0; j < NUM_WARP_ITERS_PER_BLOCK; j++) { my_delta_s[block_inside_id] = my_Einv[j] * utils::shfl(my_delta, block_inside_id % N); block_inside_id += WARP_SIZE; } } // Reduce bmAx terms. { my_delta = 0.0; #pragma unroll for ( int i = 0 ; i < N ; ++i ) { if ( lane_id < N ) { my_delta += my_delta_s[N * lane_id + i]; } } } // Store the results. if ( ROW_MAJOR ) { const int offset = N * a_row_id + lane_id; Vector_type my_b(0), my_x(0); if ( lane_id < N ) { my_b = __cachingLoad(&delta[offset]); my_x = x [offset]; } my_delta = my_b - my_delta; if ( lane_id < N ) { x [offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename IndexType, typename ValueTypeA, typename ValueTypeB, typename WeightType, int CTA_SIZE, bool ROW_MAJOR > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 16 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 16 ) #endif void DILU_backward_4x4_kernel( const IndexType *row_offsets, const IndexType *column_indices, const ValueTypeA *nonzero_values, ValueTypeB *x, const WeightType weight, const int *sorted_rows_by_color, const int *__restrict row_colors, const ValueTypeA *Einv, const ValueTypeB *delta, ValueTypeB *Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const IndexType boundary_index) { const int nHalfWarps = CTA_SIZE / 16; // Number of half warps per CTA. const int laneId = utils::lane_id(); const int halfWarpId = threadIdx.x / 16; const int halfLaneId = threadIdx.x % 16; const int halfLaneId_div_4 = halfLaneId / 4; const int halfLaneId_mod_4 = halfLaneId % 4; const int upperHalf = 16 * (laneId / 16); // Shared memory needed to exchange X and delta. __shared__ volatile ValueTypeB s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile ValueTypeB *my_s_mem = &s_mem[16 * halfWarpId]; // Iterate over the rows of the matrix. One warp per two rows. for ( int aRowIt = blockIdx.x * nHalfWarps + halfWarpId ; aRowIt < num_rows_per_color ; aRowIt += gridDim.x * nHalfWarps ) { int aRowId = sorted_rows_by_color[aRowIt]; // Load one block of B. ValueTypeB my_delta(0); // The range of the rows. int aColBegin = row_offsets[aRowId ]; int aColEnd = row_offsets[aRowId + 1]; // Each warp load column indices of 16 nonzero blocks for ( ; aColBegin < aColEnd ; aColBegin += 16 ) { int aColIt = aColBegin + halfLaneId; // Get the ID of the column. int aColTmp = -1, aColId = -1; if ( aColIt < aColEnd ) { aColTmp = column_indices[aColIt]; } #ifdef AMGX_ILU_COLORING bool valid = (((aColTmp < boundary_index || boundary_coloring == SYNC_COLORS) && (row_colors[aColTmp] > current_color)) || (aColTmp >= boundary_index && boundary_coloring == LAST)); if ( aColTmp != -1 && valid ) { aColId = aColTmp; } #else if ( aColTmp != -1 && row_colors[aColTmp] > current_color ) { aColId = aColTmp; } #endif for ( int k = 0 ; k < 16 ; k += 4 ) { int my_k = k + halfLaneId_div_4; // Exchange column indices. int waColId = utils::shfl( aColId, upperHalf + my_k ); // Load 8 blocks of X if needed. ValueTypeB my_x(0); if ( waColId != -1 ) { my_x = Delta[4 * waColId + halfLaneId_mod_4]; } my_s_mem[halfLaneId] = my_x; // Load 8 blocks of A. #pragma unroll for ( int i = 0 ; i < 4 ; ++i ) { const int k_i = k + i; int w_aColTmp = aColBegin + k_i, w_aColIt = -1; if ( utils::shfl( aColId, upperHalf + k_i ) != -1 && w_aColTmp < aColEnd ) w_aColIt = w_aColTmp; ValueTypeA my_val(0); if ( w_aColIt != -1 ) { my_val = nonzero_values[16 * w_aColIt + halfLaneId]; } if ( ROW_MAJOR ) { my_delta += my_val * my_s_mem[4 * i + halfLaneId_mod_4]; } else { my_delta += my_val * my_s_mem[4 * i + halfLaneId_div_4]; } } } // Loop over k } // Loop over aColIt // Load EINV values. ValueTypeA my_Einv = Einv[16 * aRowId + halfLaneId]; // Reduce delta terms. if ( ROW_MAJOR ) { my_delta += utils::shfl_xor( my_delta, 1 ); my_delta += utils::shfl_xor( my_delta, 2 ); } else { my_delta += utils::shfl_xor( my_delta, 4 ); my_delta += utils::shfl_xor( my_delta, 8 ); } // Update the shared terms. if ( ROW_MAJOR ) { if ( halfLaneId_mod_4 == 0 ) { my_s_mem[halfLaneId_div_4] = my_delta; } } else { if ( halfLaneId_div_4 == 0 ) { my_s_mem[halfLaneId_mod_4] = my_delta; } } // Update the diagonal term. if ( ROW_MAJOR ) { my_delta = my_Einv * my_s_mem[halfLaneId_mod_4]; } else { my_delta = my_Einv * my_s_mem[halfLaneId_div_4]; } // Regroup results. if ( ROW_MAJOR ) { my_delta += utils::shfl_xor( my_delta, 1 ); my_delta += utils::shfl_xor( my_delta, 2 ); } else { my_delta += utils::shfl_xor( my_delta, 4 ); my_delta += utils::shfl_xor( my_delta, 8 ); } // Store the results. if ( ROW_MAJOR ) { int offset = 4 * aRowId + halfLaneId_div_4; ValueTypeB my_b(0), my_x(0); if ( halfLaneId_mod_4 == 0 ) { my_b = __cachingLoad(&delta[offset]); my_x = x[offset]; } my_delta = my_b - my_delta; if ( halfLaneId_mod_4 == 0 ) { x[offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } else { int offset = 4 * aRowId + halfLaneId_mod_4; ValueTypeB my_b(0), my_x(0); if ( halfLaneId_div_4 == 0 ) { my_b = __cachingLoad(&delta[offset]); my_x = x[offset]; } my_delta = my_b - my_delta; if ( halfLaneId_div_4 == 0 ) { x[offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int CTA_SIZE > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 16 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 16 ) #endif void DILU_backward_4x4_kernel_row_major_vec4( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index ) { // Number of half warps per CTA. const int NUM_HALF_WARPS = CTA_SIZE / 16; // Coordinates of the thread. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Coordinates of the thread in the CTA. const int thread_id_div_16 = threadIdx.x / 16; const int thread_id_mod_16 = threadIdx.x % 16; // Useful constants. const int thread_id_mod_16_div_4 = thread_id_mod_16 / 4; const int thread_id_mod_16_mod_4 = thread_id_mod_16 % 4; const int shfl_offset = 16 * (lane_id / 16); // Shared memory needed to exchange X and delta. __shared__ volatile Vector_type s_mem[CTA_SIZE]; // Each thread keeps its own pointer to shared memory to avoid some extra computations. volatile Vector_type *my_s_mem = &s_mem[16 * thread_id_div_16]; // The iterator over rows. int a_row_it = blockIdx.x * NUM_HALF_WARPS + thread_id_div_16; // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_HALF_WARPS ) { unsigned int active_mask = utils::activemask(); int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_delta(0); // Don't do anything if X is zero. int a_col_begin = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // Each warp load column indices of 32 nonzero blocks for ( ; a_col_begin < a_col_end ; a_col_begin += 16 ) { unsigned int active_mask_inner = utils::activemask(); int a_col_it = a_col_begin + thread_id_mod_16; // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = __cachingLoad(&A_cols[a_col_it]); } #ifdef AMGX_ILU_COLORING int valid = false; if ( a_col_id != -1 && current_color != 0 ) { if ( boundary_coloring == LAST ) { valid = a_col_id >= boundary_index; } else { valid = a_col_id < boundary_index && __cachingLoad(&row_colors[a_col_id]) > current_color; } } #else int valid = false; if ( a_col_id != -1 && row_colors[a_col_id] > current_color ) { valid = true; } #endif // Set the column id. if ( !valid ) { a_col_id = -1; } // Loop over columns. We compute 8 columns per iteration. #pragma unroll 2 for ( int k = 0 ; k < 16 ; k += 4 ) { int my_k = k + thread_id_mod_16_div_4; // Load 8 blocks of X. int uniform_a_col_id = utils::shfl( a_col_id, shfl_offset + my_k, warpSize, active_mask_inner ); Vector_type my_Delta(0); if ( uniform_a_col_id != -1 ) { my_Delta = Delta[4 * uniform_a_col_id + thread_id_mod_16_mod_4]; } my_s_mem[thread_id_mod_16] = my_Delta; int uniform_a_col_it = a_col_begin + my_k; if ( uniform_a_col_id == -1 || uniform_a_col_it >= a_col_end ) { uniform_a_col_it = -1; } Matrix_type my_vals[4] = { Matrix_type(0) }; if ( uniform_a_col_it != -1 ) { utils::load_vec4( my_vals, &A_vals[16 * uniform_a_col_it + 4 * thread_id_mod_16_mod_4] ); } my_delta += my_vals[0] * my_s_mem[4 * thread_id_mod_16_div_4 + 0]; my_delta += my_vals[1] * my_s_mem[4 * thread_id_mod_16_div_4 + 1]; my_delta += my_vals[2] * my_s_mem[4 * thread_id_mod_16_div_4 + 2]; my_delta += my_vals[3] * my_s_mem[4 * thread_id_mod_16_div_4 + 3]; } // Loop over k } // Loop over aColIt // Load EINV values. Matrix_type my_Einv = Einv[16 * a_row_id + thread_id_mod_16]; // Reduce delta terms. my_delta += utils::shfl_xor( my_delta, 4, warpSize, active_mask ); my_delta += utils::shfl_xor( my_delta, 8, warpSize, active_mask ); // Update the shared terms. if ( thread_id_mod_16_div_4 == 0 ) { my_s_mem[thread_id_mod_16_mod_4] = my_delta; } // Update the diagonal term. my_delta = my_Einv * my_s_mem[thread_id_mod_16_mod_4]; // Regroup results. my_delta += utils::shfl_xor( my_delta, 1, warpSize, active_mask ); my_delta += utils::shfl_xor( my_delta, 2, warpSize, active_mask ); // Store the results. int offset = 4 * a_row_id + thread_id_mod_16_div_4; Vector_type my_b(0), my_x(0); if ( thread_id_mod_16_mod_4 == 0 ) { my_b = __cachingLoad(&delta[offset]); my_x = x [offset]; } my_delta = my_b - my_delta; if ( thread_id_mod_16_mod_4 == 0 ) { x [offset] = my_x + weight * my_delta; Delta[offset] = my_delta; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int NUM_THREADS_PER_ROW, int CTA_SIZE, int WARP_SIZE > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 12 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 12 ) #endif void DILU_backward_1x1_kernel( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index ) { // Number of items per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW; // Number of items per grid. const int NUM_ROWS_PER_GRID = gridDim.x * NUM_ROWS_PER_CTA; // The coordinates of the thread inside the CTA/warp. const int warp_id = utils::warp_id(); const int lane_id = utils::lane_id(); // Constants. const int lane_id_mod_NTPR = lane_id % NUM_THREADS_PER_ROW; // Determine which NxN block the threads work with. int a_row_it = blockIdx.x * NUM_ROWS_PER_CTA + (threadIdx.x / NUM_THREADS_PER_ROW); // Iterate over the rows of the matrix. One warp per row. for ( ; a_row_it < num_rows_per_color ; a_row_it += NUM_ROWS_PER_GRID ) { int a_row_id = sorted_rows_by_color[a_row_it]; // Load one block of B. Vector_type my_delta(0); // Don't do anything if X is zero. int a_col_it = A_rows[a_row_id ]; int a_col_end = A_rows[a_row_id + 1]; // Each warp load column indices of 32 nonzero blocks for ( a_col_it += lane_id_mod_NTPR ; utils::any( a_col_it < a_col_end ) ; a_col_it += NUM_THREADS_PER_ROW ) { // Get the ID of the column. int a_col_id = -1; if ( a_col_it < a_col_end ) { a_col_id = A_cols[a_col_it]; } // Is it really a valid column (due to coloring). int valid = false; #ifdef AMGX_ILU_COLORING if ( a_col_id != -1 && current_color != 0 ) { //if( boundary_coloring == LAST ) // valid = a_col_id >= boundary_index; //else // valid = a_col_id < boundary_index && row_colors[a_col_id] > current_color; valid = (((a_col_id < boundary_index || boundary_coloring == SYNC_COLORS) && (row_colors[a_col_id] > current_color)) || (a_col_id >= boundary_index && boundary_coloring == LAST)); } #else //if( a_col_id != -1 && current_color != 0 ) if ( a_col_id != -1 ) { valid = row_colors[a_col_id] > current_color; } #endif // Load my Delta value. Vector_type my_Delta(0); if ( valid ) { my_Delta = Delta[a_col_id]; } // Load my item from A. Matrix_type my_val(0); if ( valid ) { my_val = A_vals[a_col_it]; } // Update bmAx. my_delta += my_val * my_Delta; } // Reduce bmAx terms. #pragma unroll for ( int mask = NUM_THREADS_PER_ROW / 2 ; mask > 0 ; mask >>= 1 ) { my_delta += utils::shfl_xor( my_delta, mask ); } // Store the results. if ( lane_id_mod_NTPR == 0 ) { Vector_type my_x = __cachingLoad(&delta[a_row_id]) - Einv[a_row_id] * my_delta; x [a_row_id] += weight * my_x; Delta[a_row_id] = my_x; } } } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int CTA_SIZE > __global__ #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 700 __launch_bounds__( CTA_SIZE, 16 ) #elif defined(__CUDA_ARCH__) __launch_bounds__( CTA_SIZE, 16 ) #endif void DILU_backward_NxN_kernel_skip( Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color ) { const int NUM_ITEMS_PER_CTA = CTA_SIZE / N; // Number of updated block items per CTA const int ITEM_ID = threadIdx.x / N; const int ITEM_BLOCK_OFFSET = threadIdx.x % N; const int is_active = ITEM_ID < NUM_ITEMS_PER_CTA; // The first row. int a_row_it = blockIdx.x * NUM_ITEMS_PER_CTA + ITEM_ID; // Iterate over the rows of the matrix. One warp per two rows. for ( ; a_row_it < num_rows_per_color ; a_row_it += gridDim.x * NUM_ITEMS_PER_CTA ) { if ( is_active ) { int a_row_id = sorted_rows_by_color[a_row_it]; const int idx = N * a_row_id + ITEM_BLOCK_OFFSET; Vector_type my_b = __cachingLoad(&delta[idx]); Vector_type my_x = x[idx]; x[idx] = my_x + weight * my_b; Delta[idx] = my_b; } } } // ---------- // Methods // ---------- template< typename Matrix_type, typename Vector_type, int N > void DILU_forward_NxN_dispatch( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index, const int row_major, const int has_external_diag ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ROWS_PER_WARP = std::max(WARP_SIZE / NxN, 1); // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = NUM_ROWS_PER_WARP * NUM_WARPS_PER_CTA; // The number of threads to launch. const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); // Branch to the correct kernel call. int code = 2 * (row_major ? 1 : 0) + (has_external_diag ? 1 : 0); switch ( code ) { case 0: // Column-major, no external diagonal. DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, false> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; case 1: // Column-major, external diagonal. DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, true> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; case 2: // Row-major, no external diagonal. DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, false> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; case 3: // Row-major, external diagonal. DILU_forward_NxN_kernel<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, true> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; default: FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED ); } cudaCheckError(); } template< typename Matrix_type, typename Vector_type, int N > void DILU_forward_NxN_dispatch_large( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index, const int row_major, const int has_external_diag ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = NUM_WARPS_PER_CTA; // Each warp is going to sweep through bloock this many times const int NUM_WARP_ITERS_PER_BLOCK = ((NxN - 1) / WARP_SIZE) + 1; // The number of threads to launch. const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); // Branch to the correct kernel call. if (!row_major) { FatalError("COL MAJOR is not supported for this large block_size", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } switch ( has_external_diag ) { case 0: // Row-major, no external diagonal. DILU_forward_NxN_kernel_large<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, false, NUM_WARP_ITERS_PER_BLOCK> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; case 1: // Row-major, external diagonal. DILU_forward_NxN_kernel_large<Matrix_type, Vector_type, N, CTA_SIZE, WARP_SIZE, true, NUM_WARP_ITERS_PER_BLOCK> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); break; default: FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED ); } cudaCheckError(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type > void DILU_forward_NxN_dispatch( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, const int *__restrict A_diag, const Vector_type *x, const Vector_type *b, Vector_type *__restrict delta, const int *__restrict sorted_rows_by_color, const int num_rows_per_color, const int current_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const ColoringType boundary_coloring, const int boundary_index, const int block_size, const int row_major, const int has_external_diag ) { switch ( block_size ) { case 1: { const int NUM_THREADS_PER_ROW = 8; // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW; // The number of threads to launch. const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); if ( has_external_diag ) { DILU_forward_1x1_kernel<Matrix_type, Vector_type, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE, true> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); } else { DILU_forward_1x1_kernel<Matrix_type, Vector_type, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE, false> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); } cudaCheckError(); } break; case 2: DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 2>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; case 3: DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 3>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; case 4: if ( row_major ) { // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / 16; // The number of threads to launch. const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); if ( has_external_diag ) //DILU_forward_4x4_kernel<Matrix_type, Vector_type, CTA_SIZE, WARP_SIZE, true, true><<<grid_size, CTA_SIZE>>>( DILU_forward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, CTA_SIZE, true> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); else DILU_forward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, CTA_SIZE, false> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index ); cudaCheckError(); } else DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 4>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; case 5: DILU_forward_NxN_dispatch<Matrix_type, Vector_type, 5>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; case 8: DILU_forward_NxN_dispatch_large<Matrix_type, Vector_type, 8>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; case 10: DILU_forward_NxN_dispatch_large<Matrix_type, Vector_type, 10>( A_rows, A_cols, A_vals, A_diag, x, b, delta, sorted_rows_by_color, num_rows_per_color, current_color, row_colors, Einv, boundary_coloring, boundary_index, row_major, has_external_diag ); break; default: FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED ); } cudaCheckError(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType, int N > void DILU_backward_NxN_dispatch( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index, const int row_major ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Squared N. const int NxN = N * N; // Number of items per warp. const int NUM_ROWS_PER_WARP = std::max(WARP_SIZE / NxN, 1); // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = NUM_ROWS_PER_WARP * NUM_WARPS_PER_CTA; // The number of threads to launch. const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); // Branch to the correct kernel call. if ( row_major ) { DILU_backward_NxN_kernel<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, true> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index ); } else { DILU_backward_NxN_kernel<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, false> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index ); } cudaCheckError(); } template< typename Matrix_type, typename Vector_type, typename WeightType, int N, int NUM_WARP_ITERS_PER_BLOCK > void DILU_backward_NxN_dispatch_large( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index, const int row_major ) { const int NUM_WARPS_PER_CTA = CTA_SIZE / WARP_SIZE; // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = NUM_WARPS_PER_CTA; // The number of threads to launch. const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); // Branch to the correct kernel call. if ( row_major ) { DILU_backward_NxN_kernel_large<Matrix_type, Vector_type, WeightType, N, CTA_SIZE, WARP_SIZE, true, NUM_WARP_ITERS_PER_BLOCK> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index ); } else { FatalError("col major is not supported for this blocksize in multicolor DILU solver", AMGX_ERR_NOT_IMPLEMENTED); } cudaCheckError(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< typename Matrix_type, typename Vector_type, typename WeightType> void DILU_backward_NxN_dispatch( const int *__restrict A_rows, const int *__restrict A_cols, const Matrix_type *__restrict A_vals, Vector_type *__restrict x, const WeightType weight, const int *__restrict sorted_rows_by_color, const int *__restrict row_colors, const Matrix_type *__restrict Einv, const Vector_type *delta, Vector_type *__restrict Delta, const int num_rows_per_color, const int current_color, const ColoringType boundary_coloring, const int boundary_index, const int block_size, const int row_major ) { switch ( block_size ) { case 1: { const int NUM_THREADS_PER_ROW = 8; // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / NUM_THREADS_PER_ROW; // The number of threads to launch. const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); DILU_backward_1x1_kernel<Matrix_type, Vector_type, WeightType, NUM_THREADS_PER_ROW, CTA_SIZE, WARP_SIZE> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index ); cudaCheckError(); } break; case 2: DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 2>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; case 3: DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 3>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; case 4: //if( false ) if ( row_major ) { // Number of items computer per CTA. const int NUM_ROWS_PER_CTA = CTA_SIZE / 16; // The number of threads to launch. const int grid_size = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); //DILU_backward_NxN_kernel<Matrix_type, Vector_type, 4, CTA_SIZE, WARP_SIZE, true><<<grid_size, CTA_SIZE>>>( DILU_backward_4x4_kernel_row_major_vec4<Matrix_type, Vector_type, WeightType, CTA_SIZE> <<< grid_size, CTA_SIZE>>>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index ); cudaCheckError(); } else DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 4>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; case 5: DILU_backward_NxN_dispatch<Matrix_type, Vector_type, WeightType, 5>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; case 8: DILU_backward_NxN_dispatch_large<Matrix_type, Vector_type, WeightType, 8, 2>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; case 10: DILU_backward_NxN_dispatch_large<Matrix_type, Vector_type, WeightType, 10, 4>( A_rows, A_cols, A_vals, x, weight, sorted_rows_by_color, row_colors, Einv, delta, Delta, num_rows_per_color, current_color, boundary_coloring, boundary_index, row_major ); break; default: FatalError( "Internal error", AMGX_ERR_NOT_IMPLEMENTED ); } cudaCheckError(); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< class T_Config > MulticolorDILUSolver_Base<T_Config>::MulticolorDILUSolver_Base( AMG_Config &cfg, const std::string &cfg_scope, ThreadManager *tmng ) : Solver<T_Config>( cfg, cfg_scope, tmng ) { this->weight = cfg.AMG_Config::template getParameter<double>("relaxation_factor", cfg_scope); this->m_reorder_cols_by_color_desired = (cfg.AMG_Config::template getParameter<int>("reorder_cols_by_color", cfg_scope) != 0); this->m_insert_diagonal_desired = (cfg.AMG_Config::template getParameter<int>("insert_diag_while_reordering", cfg_scope) != 0); this->m_boundary_coloring = cfg.AMG_Config::template getParameter<ColoringType>("boundary_coloring", cfg_scope); this->always_obey_coloring = 0; if (weight == 0) { weight = 1.; amgx_printf("Warning, setting weight to 1 instead of estimating largest_eigen_value in Multicolor DILU smoother\n"); } } // Destructor template<class T_Config> MulticolorDILUSolver_Base<T_Config>::~MulticolorDILUSolver_Base() { Einv.clear(); Einv.shrink_to_fit(); } template<class T_Config> void MulticolorDILUSolver_Base<T_Config>::computeEinv(Matrix<T_Config> &A) { ViewType oldView = A.currentView(); A.setView(this->m_explicit_A->getViewExterior()); if ( A.get_block_dimx() != A.get_block_dimy() ) { FatalError("DILU implemented only for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if ( A.get_block_dimx() > 32) // actually much more less than 32 doe to register file limitations, but... { FatalError("DILU implemented only for squared blocks of size <= 32", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } computeEinv_NxN( A, A.get_block_dimx() ); A.setView(oldView); } template< class T_Config > void MulticolorDILUSolver_Base<T_Config>::printSolverParameters() const { std::cout << "relaxation_factor = " << this->weight << std::endl; } // Solver setup template< class T_Config > void MulticolorDILUSolver_Base<T_Config>::solver_setup(bool reuse_matrix_structure) { m_explicit_A = dynamic_cast<Matrix<T_Config>*>(this->m_A); if (!this->m_explicit_A) { FatalError("MulticolorDILUSolver only works with explicit matrices", AMGX_ERR_INTERNAL); } int N = this->m_explicit_A->get_num_cols() * this->m_explicit_A->get_block_dimy(); if (this->m_explicit_A->getColoringLevel() < 1) { FatalError("Matrix must be colored to use multicolor dilu solver. Try setting: coloring_level=1 in the configuration file", AMGX_ERR_NOT_IMPLEMENTED); } m_delta.resize(N); m_Delta.resize(N); m_delta.set_block_dimy(this->m_explicit_A->get_block_dimy()); m_Delta.set_block_dimy(this->m_explicit_A->get_block_dimy()); m_delta.set_block_dimx(1); m_Delta.set_block_dimx(1); if ( this->m_explicit_A->getBlockFormat() != ROW_MAJOR ) { FatalError("Multicolor DILU solver only supports row major format for the blocks", AMGX_ERR_CONFIGURATION); } computeEinv( *this->m_explicit_A ); } // template< class T_Config > void MulticolorDILUSolver_Base<T_Config>::solve_init( VVector &b, VVector &x, bool xIsZero ) { } // Solve one iteration template<class T_Config> bool MulticolorDILUSolver_Base<T_Config>::solve_iteration( VVector &b, VVector &x, bool xIsZero ) { if ( this->m_explicit_A->get_block_dimx() != this->m_explicit_A->get_block_dimy() ) { FatalError("DILU implemented only for squared blocks", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if ( this->m_explicit_A->get_block_dimx() > 32) // actually much more less than 32 doe to register file limitations, but... { FatalError("DILU implemented only for squared blocks of size <= 32", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE); } if (xIsZero) { x.dirtybit = 0; } if (!this->m_explicit_A->is_matrix_singleGPU()) { this->m_explicit_A->manager->exchange_halo_async(x, x.tag); this->m_explicit_A->manager->exchange_halo_async(b, b.tag); } if (this->m_explicit_A->getViewExterior() == this->m_explicit_A->getViewInterior()) { if (!this->m_explicit_A->is_matrix_singleGPU()) { this->m_explicit_A->manager->exchange_halo_wait(x, x.tag); this->m_explicit_A->manager->exchange_halo_wait(b, b.tag); } } ViewType oldView = this->m_explicit_A->currentView(); ViewType flags; bool latencyHiding = true; if (this->m_explicit_A->is_matrix_singleGPU() || (x.dirtybit == 0 && b.dirtybit == 0)) { latencyHiding = false; this->m_explicit_A->setViewExterior(); flags = (ViewType)(this->m_explicit_A->getViewExterior()); } else { flags = (ViewType)(this->m_explicit_A->getViewInterior()); this->m_explicit_A->setViewInterior(); } if (xIsZero) { thrust::fill(x.begin(), x.end(), types::util<ValueTypeB>::get_zero()); cudaCheckError(); } this->smooth_NxN(*this->m_explicit_A, b, x, flags); if (latencyHiding) { if (!this->m_explicit_A->is_matrix_singleGPU()) { this->m_explicit_A->manager->exchange_halo_wait(x, x.tag); this->m_explicit_A->manager->exchange_halo_wait(b, b.tag); } this->m_explicit_A->setViewExterior(); flags = (ViewType)(~(this->m_explicit_A->getViewInterior()) & this->m_explicit_A->getViewExterior()); if (flags != 0) { this->smooth_NxN(*this->m_explicit_A, b, x, flags); } } x.dirtybit = 1; this->m_explicit_A->setView(oldView); return (this->converged(b, x)); } template<class T_Config> void MulticolorDILUSolver_Base<T_Config>::solve_finalize( VVector &b, VVector &x ) {} /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void MulticolorDILUSolver<TemplateConfig<AMGX_host, V, M, I> >::computeEinv_NxN(const Matrix_h &A, const int bsize) { FatalError("Multicolor DILU smoother not implemented for host format, exiting", AMGX_ERR_NOT_SUPPORTED_TARGET); } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void MulticolorDILUSolver<TemplateConfig<AMGX_host, V, M, I> >::smooth_NxN( const Matrix_h &A, VVector &b, VVector &x, ViewType separation_flag ) { FatalError("Haven't implemented Multicolor DILU smoother for host format", AMGX_ERR_NOT_SUPPORTED_TARGET); } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::MulticolorDILUSolver( AMG_Config &cfg, const std::string &cfg_scope, ThreadManager *tmng ) : MulticolorDILUSolver_Base<TemplateConfig<AMGX_device, V, M, I> >( cfg, cfg_scope, tmng ) { int device = 0; cudaGetDevice( &device ); cudaDeviceProp properties; cudaGetDeviceProperties( &properties, device ); m_is_kepler = properties.major >= 3; } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::computeEinv_NxN(const Matrix_d &A, const int bsize) { const int bsize_sq = bsize * bsize; this->Einv.resize( A.get_num_cols()*bsize_sq, 0.0 ); // sol::prof_start(); for ( int i = 0, num_colors = A.getMatrixColoring().getNumColors() ; i < num_colors ; ++i ) { const int color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i]; const int num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1] - color_offset; if ( num_rows_per_color == 0 ) { continue; } const int CTA_SIZE = 128; const int NUM_WARPS_PER_CTA = CTA_SIZE / 32; int ROWS_PER_WARP = 1; if ( bsize_sq > 1 && bsize_sq < 6 ) { ROWS_PER_WARP = 32 / bsize_sq; } const int ROWS_PER_CTA = ROWS_PER_WARP * NUM_WARPS_PER_CTA; const int GRID_SIZE = std::min( 4096, (num_rows_per_color + ROWS_PER_CTA - 1) / ROWS_PER_CTA ); cudaStream_t stream = thrust::global_thread_handle::get_stream(); switch ( bsize ) { case 1: DILU_setup_1x1_kernel<ValueTypeA, ValueTypeB, 8, CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>( A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 2: DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 2, CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>( A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 3: DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 3, CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>( A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 4: DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 4, CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>( A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 5: DILU_setup_NxN_kernel<ValueTypeA, ValueTypeB, 5, CTA_SIZE, 32> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>( A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 8: DILU_setup_NxN_kernel_large<ValueTypeA, ValueTypeB, 8, CTA_SIZE, 32, 2> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>( A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; case 10: DILU_setup_NxN_kernel_large<ValueTypeA, ValueTypeB, 10, CTA_SIZE, 32, 4> <<< GRID_SIZE, CTA_SIZE, 0, stream>>>( A.row_offsets.raw(), A.col_indices.raw(), A.diag.raw(), A.values.raw(), this->Einv.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), num_rows_per_color, i ); break; default: FatalError( "Multicolor-DILU Setup: block size was not enabled in the code, contact AMGX developers.", AMGX_ERR_NOT_SUPPORTED_BLOCKSIZE ); } cudaCheckError(); } } template< AMGX_VecPrecision V, AMGX_MatPrecision M, AMGX_IndPrecision I > void MulticolorDILUSolver<TemplateConfig<AMGX_device, V, M, I> >::smooth_NxN( const Matrix_d &A, VVector &b, VVector &x, ViewType separation_flag ) { AMGX_CPU_PROFILER( "MulticolorDILUSolver::smooth_NxN " ); int offset = 0, separation = 0; A.getOffsetAndSizeForView(INTERIOR, &offset, &separation); // Only have separation=num interior rows if we are only working on the interior // and the boundary coloring is FIRST or LAST, otherwise set separation offset to // total number of rows if ( separation_flag != this->m_explicit_A->getViewInterior() || this->m_explicit_A->getViewExterior() == this->m_explicit_A->getViewInterior() || this->m_boundary_coloring != LAST && this->m_boundary_coloring != FIRST ) { separation = A.row_offsets.size() - 1; } else { amgx_printf("separation active\n"); } // -------------------- // Forward Sweep // -------------------- const int num_colors = this->m_explicit_A->getMatrixColoring().getNumColors(); for ( int i = 0 ; i < num_colors ; ++i ) { int color_offset(0); if ( separation_flag & INTERIOR ) { color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i]; } else { color_offset = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i]; } int num_rows_per_color(0); if ( separation_flag == this->m_explicit_A->getViewInterior() ) { num_rows_per_color = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i]; } else { num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1]; } num_rows_per_color -= color_offset; if ( num_rows_per_color == 0 ) { continue; } int boundary_index = separation; if ( this->m_boundary_coloring == SYNC_COLORS ) { boundary_index = A.get_num_rows(); } DILU_forward_NxN_dispatch( A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), A.diag.raw(), x.raw(), b.raw(), this->m_delta.raw(), A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, num_rows_per_color, i, A.getMatrixColoring().getRowColors().raw(), this->Einv.raw(), this->m_boundary_coloring, boundary_index, A.get_block_dimy(), A.getBlockFormat() == ROW_MAJOR, A.hasProps(DIAG) ); cudaCheckError(); } // -------------------- // Backward Sweep // -------------------- for ( int i = num_colors - 1 ; i >= 0 ; --i ) { int color_offset(0); if ( separation_flag & INTERIOR ) { color_offset = A.getMatrixColoring().getOffsetsRowsPerColor()[i]; } else { color_offset = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i]; } int num_rows_per_color(0); if ( separation_flag == this->m_explicit_A->getViewInterior() ) { num_rows_per_color = A.getMatrixColoring().getSeparationOffsetsRowsPerColor()[i]; } else { num_rows_per_color = A.getMatrixColoring().getOffsetsRowsPerColor()[i + 1]; } num_rows_per_color -= color_offset; if ( num_rows_per_color == 0 ) { continue; } if ( i == num_colors - 1 ) { const int NUM_ROWS_PER_CTA = CTA_SIZE / A.get_block_dimy(); const int GRID_SIZE = std::min( 4096, (num_rows_per_color + NUM_ROWS_PER_CTA - 1) / NUM_ROWS_PER_CTA ); switch ( A.get_block_dimy() ) { case 1: DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 1, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 2: DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 2, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 3: DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 3, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 4: DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 4, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 5: DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 5, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 8: DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 8, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; case 10: DILU_backward_NxN_kernel_skip<ValueTypeA, ValueTypeB, WeightType, 10, CTA_SIZE> <<< GRID_SIZE, CTA_SIZE>>>( x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color ); break; } cudaCheckError(); } else { DILU_backward_NxN_dispatch( A.row_offsets.raw(), A.col_indices.raw(), A.values.raw(), x.raw(), this->weight, A.getMatrixColoring().getSortedRowsByColor().raw() + color_offset, A.getMatrixColoring().getRowColors().raw(), this->Einv.raw(), this->m_delta.raw(), this->m_Delta.raw(), num_rows_per_color, i, this->m_boundary_coloring, separation, A.get_block_dimy(), A.getBlockFormat() == ROW_MAJOR ); cudaCheckError(); } } } /**************************************** * Explict instantiations ***************************************/ #define AMGX_CASE_LINE(CASE) template class MulticolorDILUSolver_Base<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) // AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE #define AMGX_CASE_LINE(CASE) template class MulticolorDILUSolver<TemplateMode<CASE>::Type>; AMGX_FORALL_BUILDS(AMGX_CASE_LINE) // AMGX_FORCOMPLEX_BUILDS(AMGX_CASE_LINE) #undef AMGX_CASE_LINE } } // namespace amgx
6e5e31a03812c0a22d99820815044bfa98074a00.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /***************************************** Emitting C Generated Code *******************************************/ #include <string.h> #include <stdlib.h> #include "cuda_header.h" #include <stdio.h> #include <stdint.h> #include <stdbool.h> /************* Functions **************/ __global__ void x13(int* x14, int* x15, int x16, int x17) { // Cuda Coalesced Transpose // arg0: 2D Input Matrix (n x m) // arg1: 2D Output Transposed Matrix (m x n) // arg2: number of rows for input matrix // arg3: number of columns for input matrix // kernel launch config <<dim3((TILE_DIM * m - 1) / TILE_DIM, (TILE_DIM * n - 1) / TILE_DIM), dim3(TILE_DIM, BLOCK_ROWS)>> // TILE_DIM = 32, BLOCK_ROWS = 8 __shared__ int x18[1056]; int x19 = blockIdx.x * 32 + threadIdx.x; int x20 = blockIdx.y * 32 + threadIdx.y; int x21 = 0; while (x21 < 32) { int x22 = x21; if (x19 < x17 && x20 < x16) x18[33 * (threadIdx.y + x22) + threadIdx.x] = x14[x20 * x17 + x19]; x20 = x20 + 8; x21 = x21 + 8; } __syncthreads(); x19 = blockIdx.y * 32 + threadIdx.x; x20 = blockIdx.x * 32 + threadIdx.y; int x23 = 0; while (x23 < 32) { int x24 = x23; if (x19 < x16 && x20 < x17) x15[x20 * x16 + x19] = x18[33 * threadIdx.x + (threadIdx.y + x24)]; x20 = x20 + 8; x23 = x23 + 8; } } /**************** Snippet ****************/ void Snippet(int x0) { int* x1 = (int*)malloc(11928 * sizeof(int)); int* x2 = (int*)malloc(11928 * sizeof(int)); int* x3 = (int*)malloc(11928 * sizeof(int)); int x4 = 0; while (x4 != 11928) { int x5 = x4; x1[x5] = x5; x4 = x4 + 1; } int x6 = 0; while (x6 != 56) { int x7 = x6; int x8 = 0; int x9 = x7 * 213; while (x8 != 213) { int x10 = x8; x3[x9 + x10] = x1[x10 * 56 + x7]; x8 = x8 + 1; } x6 = x6 + 1; } int* x11 = (int*)malloc(0 * sizeof(int)); CUDA_CALL(hipMalloc(&x11, (size_t)(11928 * sizeof(int)))); int* x12 = (int*)malloc(0 * sizeof(int)); CUDA_CALL(hipMalloc(&x12, (size_t)(11928 * sizeof(int)))); CUDA_CALL(hipMemcpy(x11, x1, (size_t)(11928 * sizeof(int)), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( x13), dim3(dim3(2, 7, 1)), dim3(dim3(32, 8, 1)), 0, 0, x11, x12, 213, 56); CUDA_CALL(hipMemcpy(x2, x12, (size_t)(11928 * sizeof(int)), hipMemcpyDeviceToHost)); int x25 = 0; while (x25 != 11928) { int x26 = x25; if (x3[x26] != x2[x26]) { printf("Transpose Incorrect!\n"); fflush(stdout); fflush(stderr); exit(1); } x25 = x25 + 1; } printf("Transpose Correct\n"); } /***************************************** End of C Generated Code *******************************************/ int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: %s <arg>\n", argv[0]); return 0; } Snippet(atoi(argv[1])); return 0; }
6e5e31a03812c0a22d99820815044bfa98074a00.cu
/***************************************** Emitting C Generated Code *******************************************/ #include <string.h> #include <stdlib.h> #include "cuda_header.h" #include <stdio.h> #include <stdint.h> #include <stdbool.h> /************* Functions **************/ __global__ void x13(int* x14, int* x15, int x16, int x17) { // Cuda Coalesced Transpose // arg0: 2D Input Matrix (n x m) // arg1: 2D Output Transposed Matrix (m x n) // arg2: number of rows for input matrix // arg3: number of columns for input matrix // kernel launch config <<dim3((TILE_DIM * m - 1) / TILE_DIM, (TILE_DIM * n - 1) / TILE_DIM), dim3(TILE_DIM, BLOCK_ROWS)>> // TILE_DIM = 32, BLOCK_ROWS = 8 __shared__ int x18[1056]; int x19 = blockIdx.x * 32 + threadIdx.x; int x20 = blockIdx.y * 32 + threadIdx.y; int x21 = 0; while (x21 < 32) { int x22 = x21; if (x19 < x17 && x20 < x16) x18[33 * (threadIdx.y + x22) + threadIdx.x] = x14[x20 * x17 + x19]; x20 = x20 + 8; x21 = x21 + 8; } __syncthreads(); x19 = blockIdx.y * 32 + threadIdx.x; x20 = blockIdx.x * 32 + threadIdx.y; int x23 = 0; while (x23 < 32) { int x24 = x23; if (x19 < x16 && x20 < x17) x15[x20 * x16 + x19] = x18[33 * threadIdx.x + (threadIdx.y + x24)]; x20 = x20 + 8; x23 = x23 + 8; } } /**************** Snippet ****************/ void Snippet(int x0) { int* x1 = (int*)malloc(11928 * sizeof(int)); int* x2 = (int*)malloc(11928 * sizeof(int)); int* x3 = (int*)malloc(11928 * sizeof(int)); int x4 = 0; while (x4 != 11928) { int x5 = x4; x1[x5] = x5; x4 = x4 + 1; } int x6 = 0; while (x6 != 56) { int x7 = x6; int x8 = 0; int x9 = x7 * 213; while (x8 != 213) { int x10 = x8; x3[x9 + x10] = x1[x10 * 56 + x7]; x8 = x8 + 1; } x6 = x6 + 1; } int* x11 = (int*)malloc(0 * sizeof(int)); CUDA_CALL(cudaMalloc(&x11, (size_t)(11928 * sizeof(int)))); int* x12 = (int*)malloc(0 * sizeof(int)); CUDA_CALL(cudaMalloc(&x12, (size_t)(11928 * sizeof(int)))); CUDA_CALL(cudaMemcpy(x11, x1, (size_t)(11928 * sizeof(int)), cudaMemcpyHostToDevice)); x13<<<dim3(2, 7, 1), dim3(32, 8, 1)>>>(x11, x12, 213, 56); CUDA_CALL(cudaMemcpy(x2, x12, (size_t)(11928 * sizeof(int)), cudaMemcpyDeviceToHost)); int x25 = 0; while (x25 != 11928) { int x26 = x25; if (x3[x26] != x2[x26]) { printf("Transpose Incorrect!\n"); fflush(stdout); fflush(stderr); exit(1); } x25 = x25 + 1; } printf("Transpose Correct\n"); } /***************************************** End of C Generated Code *******************************************/ int main(int argc, char *argv[]) { if (argc != 2) { printf("usage: %s <arg>\n", argv[0]); return 0; } Snippet(atoi(argv[1])); return 0; }
project_3(1).hip
// !!! This is a file automatically generated by hipify!!! /* The data set that we are using has 4 attributes but we are using only 2 attributes. Those 2 attributes are 1)Study Time 2) Exam Performance. These 2 attributes will be used to calculate the student's "KnowledgeLevel" KnowledgeLevel can be High or Low in our program but in the data set "KnowledgeLevel" has High, Low and Middle. We will represent High as 1 and low as 0. For now we will consider Middle as High so it will be 1. */ #include<stdio.h> #include<math.h> #include<string.h> #include<stdlib.h> #include <hip/hip_runtime.h> void readDataset(); /*<--- This function reads data set into below arrays.*/ void displayDataset();/*<--- This function will display our data set*/ __global__ void startClustering(float*,float*,float*,int,int,int,int,int,int,int,int,int,int,struct cluster*, struct cluster*); void remapSeedValues(); void displayClusters(); int highCluster = 1, lowCluster = 1; float study_time[260]/*<--- attribute number: 1*/, examPerformance[260]/*<--- attribute number: 2*/, targetAnswers[260]/*<---- real knowledge level: 3*/; float ourAnswer[258]/*<--- this will store our answer for knowledge level 1 = High, 0 = Low*/; /*sample seed value to create 2 clusters or knowledge levels*/ /*These seed values will change during different iterations*/ float svLX = 0.2, svLY = 0.2, svHX = 0.78, svHY = 0.78; float psvLX, psvLY, psvHX, psvHY; float seedDistance = 0.0; /*examPerformance is on Y-axis and study_time is on X-axis*/ struct cluster { float study_time; float exam_performance; float target_value; }cluster_one[260], cluster_two[250]; int cluster_one_index = 0, cluster_two_index = 0; __global__ void startClustering(float *study_time, float *examPerformance, float *targetAnswers,int N, int svHX, int svHY, int svLX, int svLY, int seedDistance, int lowCluster, int highCluster, int cluster_one_index, int cluster_two_index, struct cluster *cluster_one, struct cluster *cluster_two) { int idx = blockIdx.x * blockDim.x + threadIdx.x; float distance_1, distance_2; distance_1 = (svHX-study_time[idx]) + (svHY-examPerformance[idx]); distance_2 = (svLX-study_time[idx]) + (svLY-examPerformance[idx]); if((distance_1 <= seedDistance) && (highCluster==1)) { cluster_one[idx].study_time = study_time[idx]; cluster_one[idx].exam_performance = examPerformance[idx]; cluster_one[idx].target_value = targetAnswers[idx]; } else if((distance_2 <= seedDistance) && (lowCluster==1)) { cluster_two[idx].study_time = study_time[idx]; cluster_two[idx].exam_performance = examPerformance[idx]; cluster_two[idx].target_value = targetAnswers[idx]; } } float calculateDistanceSerial(float x1, float x2, float y1,float y2) { return sqrt(((x2-x1)*(x2-x1))+((y2-y1)*(y2-y1))); } void startParallelProcessing() { float *sT, *eP, *tA; struct cluster *c1, *c2; int N = 260; size_t size = N * sizeof(float); hipMalloc((void **) &sT, size); hipMalloc((void **) &eP, size); hipMalloc((void **) &tA, size); hipMemcpy(sT, study_time, size, hipMemcpyHostToDevice); hipMemcpy(eP, examPerformance, size, hipMemcpyHostToDevice); hipMemcpy(tA, targetAnswers, size, hipMemcpyHostToDevice); size = N * sizeof(struct cluster); hipMalloc((void **) &c1, size); hipMalloc((void **) &c2, size); hipMemcpy(c1, cluster_one, size, hipMemcpyHostToDevice); hipMemcpy(c2, cluster_two, size, hipMemcpyHostToDevice); int block_size = 1; int n_blocks = N/block_size + (N%block_size == 0 ? 0:1); hipLaunchKernelGGL(( startClustering) , dim3(n_blocks), dim3(block_size) , 0, 0, sT, eP, tA, N, svHX, svHY, svLX, svLY, seedDistance, lowCluster, highCluster, cluster_one_index, cluster_two_index, c1, c2); remapSeedValues(); if(psvHX == svHX && psvHY == svHY) { printf("\nHigh Knowledge Cluster Seed Value Achieved Successfully.\n"); highCluster = 0; } if(psvLX = svLX && psvLY == svLY) { printf("\nLow Knowledge Cluster Seed Value Achieved Successfully.\n"); lowCluster = 0; } if(lowCluster==1 || highCluster==1) { hipLaunchKernelGGL(( startClustering) , dim3(n_blocks), dim3(block_size) , 0, 0, sT, eP, tA, N, svHX, svHY, svLX, svLY, seedDistance, lowCluster, highCluster, cluster_one_index, cluster_two_index, c1, c2); } hipMemcpy(cluster_one, c1, sizeof(struct cluster)*N, hipMemcpyDeviceToHost); hipMemcpy(cluster_two, c2, sizeof(struct cluster)*N, hipMemcpyDeviceToHost); hipFree(sT); hipFree(eP); hipFree(tA); hipFree(c1); hipFree(c2); } int main() { //freopen("output.txt","w",stdout); readDataset(); psvLX = svLX; psvLY = svLY; psvHX = svHX; psvHY = svHY; seedDistance = calculateDistanceSerial(svLX,svHX,svLY,svHY); displayDataset(); startParallelProcessing(); displayClusters(); return 0; } void remapSeedValues() { int counter, count = 0; float StudyTime = 0, ExamPerformance = 0; psvHX = svHX; psvHY = svHY; psvLX = svLX; psvLY = svLY; for(counter=2;counter<260;counter++) { if(cluster_one[counter].study_time==0 && cluster_one[counter].exam_performance==0) { break; } count++; StudyTime = cluster_one[counter].study_time + StudyTime; ExamPerformance = cluster_one[counter].exam_performance + ExamPerformance; } svHX = StudyTime/count; svHY = ExamPerformance/count; ExamPerformance = StudyTime = 0; count = 0; for(counter=2;counter<260;counter++) { if(cluster_two[counter].study_time==0 && cluster_two[counter].exam_performance==0) { break; } count++; StudyTime = cluster_one[counter].study_time + StudyTime; ExamPerformance = cluster_one[counter].exam_performance + ExamPerformance; } svLX = StudyTime/count; svLY = ExamPerformance/count; printf("\nPrevious Low X: %f\tNew Low X: %f\nPrevious Low Y: %f\tNew Low Y: %f\n",psvLX,svLX,psvLY,svLY); printf("\nPrevious High X: %f\tNew High X: %f\nPrevious High Y: %f\tNew High Y: %f\n",psvHX,svHX,psvHY,svHY); seedDistance = calculateDistanceSerial(svLX,svHX,svLY,svHY); /*count = 0; for(counter=2;counter<260;counter++) { if(cluster_three[counter].study_time==0 && cluster_three[counter].examPerformance==0) { break; } count++; StudyTime = cluster_one[counter].study_time + StudyTime; ExamPerformance = cluster_one[counter].examPerformance + ExamPerformance; }*/ } void readDataset() { FILE *ptr_file; char buf[1000]; ptr_file =fopen("input.txt","r"); if (!ptr_file) { printf("Requested Input File Not Found :("); return; } int studyTimeIndex = 0, examPerformanceIndex = 0, targetAnswersIndex = 0; while (fgets(buf,1000, ptr_file)!=NULL) { int counter; char *p = strtok(buf,"\t"); for(counter = 0;counter<=5 && p!=NULL;counter++) { if(p!=NULL) { switch(counter) { case 0: study_time[studyTimeIndex++] = atof(p); break; case 4: examPerformance[examPerformanceIndex++] = atof(p); break; case 5: if(strcmp(p,"High")==0 || strcmp(p,"Middle")==0) { targetAnswers[targetAnswersIndex++] = 1; } else { targetAnswers[targetAnswersIndex++] = 0; } break; default: break; } } p = strtok(NULL,"\t"); } } fclose(ptr_file); } void displayDataset() { printf("\n\nDisplaying Dataset Entries\n\nStudy Time\tExam Performance\tKnowledge Level\n\n"); int counter = 0; for(counter = 0;counter<259;counter++) { printf("%f\t%f\t\t%f\n",study_time[counter],examPerformance[counter],targetAnswers[counter]); } printf("\n\nEnd of Displaying Dataset\n\n"); } void displayClusters() { int counter, total_ones = 0, total_zeroes = 0, total = 0; printf("\n\nDisplaying Cluster of Users with High KnowledgeLevel: \nStudy Time\tExam Performance\tTarget Cluster (1: High, 0: Low)\n\n"); for(counter = 2;counter<260;counter++) { if(cluster_one[counter].study_time==0 && cluster_one[counter].exam_performance==0) { break; } if(cluster_one[counter].target_value==1.0) { total_ones++; } if(cluster_one[counter].target_value==0.0) { total_zeroes++; } total++; printf("%f\t%f\t\t%f\n",cluster_one[counter].study_time, cluster_one[counter].exam_performance, cluster_one[counter].target_value); } printf("\n\nTotal Entries: %d\tLow Entried: %d\tHigh Entries: %d\n",total,total_zeroes,total_ones); printf("-------------------------------------------------------------------------------"); printf("\n\nDisplaying Cluster of Users with Low KnowledgeLevel: \nStudy Time\tExam Performance\tTarget Cluster (1: High, 0: Low)\n\n"); total = total_zeroes = total_ones = 0; for(counter = 0;counter<260;counter++) { if(cluster_two[counter].study_time==0 && cluster_two[counter].exam_performance==0) { break; } if(cluster_two[counter].target_value==1.0) { total_ones++; } if(cluster_two[counter].target_value==0.0) { total_zeroes++; } total++; printf("%f\t%f\t\t%f\n",cluster_two[counter].study_time, cluster_two[counter].exam_performance, cluster_two[counter].target_value); } printf("\n\nTotal Entries: %d\tLow Entried: %d\tHigh Entries: %d\n",total,total_zeroes,total_ones); printf("--------------------------------------------------------------------------------"); }
project_3(1).cu
/* The data set that we are using has 4 attributes but we are using only 2 attributes. Those 2 attributes are 1)Study Time 2) Exam Performance. These 2 attributes will be used to calculate the student's "KnowledgeLevel" KnowledgeLevel can be High or Low in our program but in the data set "KnowledgeLevel" has High, Low and Middle. We will represent High as 1 and low as 0. For now we will consider Middle as High so it will be 1. */ #include<stdio.h> #include<math.h> #include<string.h> #include<stdlib.h> #include <cuda.h> void readDataset(); /*<--- This function reads data set into below arrays.*/ void displayDataset();/*<--- This function will display our data set*/ __global__ void startClustering(float*,float*,float*,int,int,int,int,int,int,int,int,int,int,struct cluster*, struct cluster*); void remapSeedValues(); void displayClusters(); int highCluster = 1, lowCluster = 1; float study_time[260]/*<--- attribute number: 1*/, examPerformance[260]/*<--- attribute number: 2*/, targetAnswers[260]/*<---- real knowledge level: 3*/; float ourAnswer[258]/*<--- this will store our answer for knowledge level 1 = High, 0 = Low*/; /*sample seed value to create 2 clusters or knowledge levels*/ /*These seed values will change during different iterations*/ float svLX = 0.2, svLY = 0.2, svHX = 0.78, svHY = 0.78; float psvLX, psvLY, psvHX, psvHY; float seedDistance = 0.0; /*examPerformance is on Y-axis and study_time is on X-axis*/ struct cluster { float study_time; float exam_performance; float target_value; }cluster_one[260], cluster_two[250]; int cluster_one_index = 0, cluster_two_index = 0; __global__ void startClustering(float *study_time, float *examPerformance, float *targetAnswers,int N, int svHX, int svHY, int svLX, int svLY, int seedDistance, int lowCluster, int highCluster, int cluster_one_index, int cluster_two_index, struct cluster *cluster_one, struct cluster *cluster_two) { int idx = blockIdx.x * blockDim.x + threadIdx.x; float distance_1, distance_2; distance_1 = (svHX-study_time[idx]) + (svHY-examPerformance[idx]); distance_2 = (svLX-study_time[idx]) + (svLY-examPerformance[idx]); if((distance_1 <= seedDistance) && (highCluster==1)) { cluster_one[idx].study_time = study_time[idx]; cluster_one[idx].exam_performance = examPerformance[idx]; cluster_one[idx].target_value = targetAnswers[idx]; } else if((distance_2 <= seedDistance) && (lowCluster==1)) { cluster_two[idx].study_time = study_time[idx]; cluster_two[idx].exam_performance = examPerformance[idx]; cluster_two[idx].target_value = targetAnswers[idx]; } } float calculateDistanceSerial(float x1, float x2, float y1,float y2) { return sqrt(((x2-x1)*(x2-x1))+((y2-y1)*(y2-y1))); } void startParallelProcessing() { float *sT, *eP, *tA; struct cluster *c1, *c2; int N = 260; size_t size = N * sizeof(float); cudaMalloc((void **) &sT, size); cudaMalloc((void **) &eP, size); cudaMalloc((void **) &tA, size); cudaMemcpy(sT, study_time, size, cudaMemcpyHostToDevice); cudaMemcpy(eP, examPerformance, size, cudaMemcpyHostToDevice); cudaMemcpy(tA, targetAnswers, size, cudaMemcpyHostToDevice); size = N * sizeof(struct cluster); cudaMalloc((void **) &c1, size); cudaMalloc((void **) &c2, size); cudaMemcpy(c1, cluster_one, size, cudaMemcpyHostToDevice); cudaMemcpy(c2, cluster_two, size, cudaMemcpyHostToDevice); int block_size = 1; int n_blocks = N/block_size + (N%block_size == 0 ? 0:1); startClustering <<< n_blocks, block_size >>> (sT, eP, tA, N, svHX, svHY, svLX, svLY, seedDistance, lowCluster, highCluster, cluster_one_index, cluster_two_index, c1, c2); remapSeedValues(); if(psvHX == svHX && psvHY == svHY) { printf("\nHigh Knowledge Cluster Seed Value Achieved Successfully.\n"); highCluster = 0; } if(psvLX = svLX && psvLY == svLY) { printf("\nLow Knowledge Cluster Seed Value Achieved Successfully.\n"); lowCluster = 0; } if(lowCluster==1 || highCluster==1) { startClustering <<< n_blocks, block_size >>> (sT, eP, tA, N, svHX, svHY, svLX, svLY, seedDistance, lowCluster, highCluster, cluster_one_index, cluster_two_index, c1, c2); } cudaMemcpy(cluster_one, c1, sizeof(struct cluster)*N, cudaMemcpyDeviceToHost); cudaMemcpy(cluster_two, c2, sizeof(struct cluster)*N, cudaMemcpyDeviceToHost); cudaFree(sT); cudaFree(eP); cudaFree(tA); cudaFree(c1); cudaFree(c2); } int main() { //freopen("output.txt","w",stdout); readDataset(); psvLX = svLX; psvLY = svLY; psvHX = svHX; psvHY = svHY; seedDistance = calculateDistanceSerial(svLX,svHX,svLY,svHY); displayDataset(); startParallelProcessing(); displayClusters(); return 0; } void remapSeedValues() { int counter, count = 0; float StudyTime = 0, ExamPerformance = 0; psvHX = svHX; psvHY = svHY; psvLX = svLX; psvLY = svLY; for(counter=2;counter<260;counter++) { if(cluster_one[counter].study_time==0 && cluster_one[counter].exam_performance==0) { break; } count++; StudyTime = cluster_one[counter].study_time + StudyTime; ExamPerformance = cluster_one[counter].exam_performance + ExamPerformance; } svHX = StudyTime/count; svHY = ExamPerformance/count; ExamPerformance = StudyTime = 0; count = 0; for(counter=2;counter<260;counter++) { if(cluster_two[counter].study_time==0 && cluster_two[counter].exam_performance==0) { break; } count++; StudyTime = cluster_one[counter].study_time + StudyTime; ExamPerformance = cluster_one[counter].exam_performance + ExamPerformance; } svLX = StudyTime/count; svLY = ExamPerformance/count; printf("\nPrevious Low X: %f\tNew Low X: %f\nPrevious Low Y: %f\tNew Low Y: %f\n",psvLX,svLX,psvLY,svLY); printf("\nPrevious High X: %f\tNew High X: %f\nPrevious High Y: %f\tNew High Y: %f\n",psvHX,svHX,psvHY,svHY); seedDistance = calculateDistanceSerial(svLX,svHX,svLY,svHY); /*count = 0; for(counter=2;counter<260;counter++) { if(cluster_three[counter].study_time==0 && cluster_three[counter].examPerformance==0) { break; } count++; StudyTime = cluster_one[counter].study_time + StudyTime; ExamPerformance = cluster_one[counter].examPerformance + ExamPerformance; }*/ } void readDataset() { FILE *ptr_file; char buf[1000]; ptr_file =fopen("input.txt","r"); if (!ptr_file) { printf("Requested Input File Not Found :("); return; } int studyTimeIndex = 0, examPerformanceIndex = 0, targetAnswersIndex = 0; while (fgets(buf,1000, ptr_file)!=NULL) { int counter; char *p = strtok(buf,"\t"); for(counter = 0;counter<=5 && p!=NULL;counter++) { if(p!=NULL) { switch(counter) { case 0: study_time[studyTimeIndex++] = atof(p); break; case 4: examPerformance[examPerformanceIndex++] = atof(p); break; case 5: if(strcmp(p,"High")==0 || strcmp(p,"Middle")==0) { targetAnswers[targetAnswersIndex++] = 1; } else { targetAnswers[targetAnswersIndex++] = 0; } break; default: break; } } p = strtok(NULL,"\t"); } } fclose(ptr_file); } void displayDataset() { printf("\n\nDisplaying Dataset Entries\n\nStudy Time\tExam Performance\tKnowledge Level\n\n"); int counter = 0; for(counter = 0;counter<259;counter++) { printf("%f\t%f\t\t%f\n",study_time[counter],examPerformance[counter],targetAnswers[counter]); } printf("\n\nEnd of Displaying Dataset\n\n"); } void displayClusters() { int counter, total_ones = 0, total_zeroes = 0, total = 0; printf("\n\nDisplaying Cluster of Users with High KnowledgeLevel: \nStudy Time\tExam Performance\tTarget Cluster (1: High, 0: Low)\n\n"); for(counter = 2;counter<260;counter++) { if(cluster_one[counter].study_time==0 && cluster_one[counter].exam_performance==0) { break; } if(cluster_one[counter].target_value==1.0) { total_ones++; } if(cluster_one[counter].target_value==0.0) { total_zeroes++; } total++; printf("%f\t%f\t\t%f\n",cluster_one[counter].study_time, cluster_one[counter].exam_performance, cluster_one[counter].target_value); } printf("\n\nTotal Entries: %d\tLow Entried: %d\tHigh Entries: %d\n",total,total_zeroes,total_ones); printf("-------------------------------------------------------------------------------"); printf("\n\nDisplaying Cluster of Users with Low KnowledgeLevel: \nStudy Time\tExam Performance\tTarget Cluster (1: High, 0: Low)\n\n"); total = total_zeroes = total_ones = 0; for(counter = 0;counter<260;counter++) { if(cluster_two[counter].study_time==0 && cluster_two[counter].exam_performance==0) { break; } if(cluster_two[counter].target_value==1.0) { total_ones++; } if(cluster_two[counter].target_value==0.0) { total_zeroes++; } total++; printf("%f\t%f\t\t%f\n",cluster_two[counter].study_time, cluster_two[counter].exam_performance, cluster_two[counter].target_value); } printf("\n\nTotal Entries: %d\tLow Entried: %d\tHigh Entries: %d\n",total,total_zeroes,total_ones); printf("--------------------------------------------------------------------------------"); }
30c3dceb048a1966a9c92cfa108c98532591d043.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <image/image.h> #include <image/tga.h> #include "../../CudaBench.h" #define TILE_SIZE 128 #define PROCESSED_SIZE 32 #define MAX_RADIUS ((TILE_SIZE - PROCESSED_SIZE - 1) / 2) __global__ void erosion2(uint8_t *inData, uint8_t *outData, int radiusX, int radiusY, int width, int height) { __shared__ uint8_t localData[TILE_SIZE * TILE_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int gx = blockIdx.x * blockDim.x; int gy = blockIdx.y * blockDim.y; localData[TILE_SIZE * (radiusY + ty) + radiusX + tx] = inData[width * (gy + ty) + gx + tx]; int x1 = tx, y1 = ty, x2 = tx, y2 = ty; if (tx == 0) { x1 = max(0, gx - radiusX) - gx; } else if (tx == blockDim.x - 1) { x2 = min(width - 1 - gx, blockDim.x + radiusX - 1); } if (ty == 0) { y1 = max(0, gy - radiusY) - gy; } else if (ty == blockDim.y - 1) { y2 = min(height - 1 - gy, blockDim.y + radiusY - 1); } __syncthreads(); for (int y = y1; y <= y2; ++y) { for (int x = x1; x <= x2; ++x) { localData[TILE_SIZE * (radiusY + y) + radiusX + x] = inData[width * (gy + y) + gx + x]; } } __syncthreads(); x1 = tx - radiusX; x2 = tx + radiusX; y1 = ty - radiusY; y2 = ty + radiusY; if (gx + x1 < 0) { x1 = 0; } else if (gx + x2 >= width) { x2 = width - gx - 1; } if (gy + y1 < 0) { y1 = 0; } else if (gy + y2 >= height) { y2 = height - gy - 1; } uint8_t minimum = 255; for (int y = y1; y <= y2; ++y) { for (int x = x1; x <= x2; ++x) { minimum = min(minimum, localData[TILE_SIZE * (radiusY + y) + radiusX + x]); } } outData[width * (gy + ty) + gx + tx] = minimum; } __global__ void erosion(uint8_t *inData, uint8_t *outData, int radiusX, int radiusY, int width, int height) { int gx = blockIdx.x * blockDim.x + threadIdx.x; int gy = blockIdx.y * blockDim.y + threadIdx.y; int x1 = gx - radiusX; int x2 = gx + radiusX; int y1 = gy - radiusY; int y2 = gy + radiusY; if (x1 < 0) { x1 = 0; } else if (x2 >= width) { x2 = width - 1; } if (y1 < 0) { y1 = 0; } else if (y2 >= height) { y2 = height - 1; } uint8_t minimum = 255; for (int y = y1; y <= y2; ++y) { for (int x = x1; x <= x2; ++x) { minimum = min(minimum, inData[width * y + x]); } } outData[width * gy + gx] = minimum; } int main(int argc, char *argv[]) { if (argc != 5) { printf("Invalid number of arguments.\n"); return 1; } Image *input_image; Image *output_image; int error; if ((error = TGA_readImage(argv[1], &input_image)) != 0) { printf("Error when opening image: %d\n", error); return 1; } int rx = atoi(argv[2]); int ry = atoi(argv[3]); if (rx < 0 || rx >= MAX_RADIUS || ry < 0 || ry >= MAX_RADIUS) { printf("Invalid radius value.\n"); Image_delete(input_image); return 1; } if ((error = Image_copy(input_image, &output_image)) != 0) { printf("Error when copying image: %d\n", error); Image_delete(input_image); return 1; } CudaBench allBench, sendBench, retrieveBench, kernelBench; allBench = CudaBench_new(); sendBench = CudaBench_new(); retrieveBench = CudaBench_new(); kernelBench = CudaBench_new(); int c, size; uint8_t *c_inData, *c_outData; size = input_image->width * input_image->height * sizeof(uint8_t); dim3 threadsPerBlock(PROCESSED_SIZE, PROCESSED_SIZE, 1); dim3 blocks(ceil(input_image->width / PROCESSED_SIZE), ceil(input_image->height / PROCESSED_SIZE), 1); CudaBench_start(allBench); hipMalloc(&c_inData, size); hipMalloc(&c_outData, size); for (c = 0; c < input_image->channels; ++c) { CudaBench_start(sendBench); hipMemcpy(c_inData, input_image->data[c], size, hipMemcpyHostToDevice); CudaBench_end(sendBench); CudaBench_start(kernelBench); hipLaunchKernelGGL(( erosion), dim3(blocks), dim3(threadsPerBlock), 0, 0, c_inData, c_outData, rx, 0, input_image->width, input_image->height); hipLaunchKernelGGL(( erosion), dim3(blocks), dim3(threadsPerBlock), 0, 0, c_outData, c_inData, 0, ry, input_image->width, input_image->height); CudaBench_end(kernelBench); CudaBench_start(retrieveBench); hipMemcpy(output_image->data[c], c_inData, size, hipMemcpyDeviceToHost); CudaBench_end(retrieveBench); } hipFree(c_inData); hipFree(c_outData); CudaBench_end(allBench); hipEventSynchronize(allBench.end); float timeAll, timeSend, timeKernel, timeRetrieve; timeAll = CudaBench_elapsedTime(allBench); timeSend = CudaBench_elapsedTime(sendBench); timeRetrieve = CudaBench_elapsedTime(retrieveBench); timeKernel = CudaBench_elapsedTime(kernelBench); printf("All: %f ms\nSend: %f ms\nRetrieve: %f ms\nKernel: %f ms\n", timeAll, timeSend, timeRetrieve, timeKernel); CubaBench_delete(allBench); CubaBench_delete(sendBench); CubaBench_delete(retrieveBench); CubaBench_delete(kernelBench); if ((error = TGA_writeImage(argv[4], output_image)) != 0) { printf("Error when writing image: %d\n", error); } Image_delete(input_image); Image_delete(output_image); hipDeviceReset(); return 0; }
30c3dceb048a1966a9c92cfa108c98532591d043.cu
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <image/image.h> #include <image/tga.h> #include "../../CudaBench.h" #define TILE_SIZE 128 #define PROCESSED_SIZE 32 #define MAX_RADIUS ((TILE_SIZE - PROCESSED_SIZE - 1) / 2) __global__ void erosion2(uint8_t *inData, uint8_t *outData, int radiusX, int radiusY, int width, int height) { __shared__ uint8_t localData[TILE_SIZE * TILE_SIZE]; int tx = threadIdx.x; int ty = threadIdx.y; int gx = blockIdx.x * blockDim.x; int gy = blockIdx.y * blockDim.y; localData[TILE_SIZE * (radiusY + ty) + radiusX + tx] = inData[width * (gy + ty) + gx + tx]; int x1 = tx, y1 = ty, x2 = tx, y2 = ty; if (tx == 0) { x1 = max(0, gx - radiusX) - gx; } else if (tx == blockDim.x - 1) { x2 = min(width - 1 - gx, blockDim.x + radiusX - 1); } if (ty == 0) { y1 = max(0, gy - radiusY) - gy; } else if (ty == blockDim.y - 1) { y2 = min(height - 1 - gy, blockDim.y + radiusY - 1); } __syncthreads(); for (int y = y1; y <= y2; ++y) { for (int x = x1; x <= x2; ++x) { localData[TILE_SIZE * (radiusY + y) + radiusX + x] = inData[width * (gy + y) + gx + x]; } } __syncthreads(); x1 = tx - radiusX; x2 = tx + radiusX; y1 = ty - radiusY; y2 = ty + radiusY; if (gx + x1 < 0) { x1 = 0; } else if (gx + x2 >= width) { x2 = width - gx - 1; } if (gy + y1 < 0) { y1 = 0; } else if (gy + y2 >= height) { y2 = height - gy - 1; } uint8_t minimum = 255; for (int y = y1; y <= y2; ++y) { for (int x = x1; x <= x2; ++x) { minimum = min(minimum, localData[TILE_SIZE * (radiusY + y) + radiusX + x]); } } outData[width * (gy + ty) + gx + tx] = minimum; } __global__ void erosion(uint8_t *inData, uint8_t *outData, int radiusX, int radiusY, int width, int height) { int gx = blockIdx.x * blockDim.x + threadIdx.x; int gy = blockIdx.y * blockDim.y + threadIdx.y; int x1 = gx - radiusX; int x2 = gx + radiusX; int y1 = gy - radiusY; int y2 = gy + radiusY; if (x1 < 0) { x1 = 0; } else if (x2 >= width) { x2 = width - 1; } if (y1 < 0) { y1 = 0; } else if (y2 >= height) { y2 = height - 1; } uint8_t minimum = 255; for (int y = y1; y <= y2; ++y) { for (int x = x1; x <= x2; ++x) { minimum = min(minimum, inData[width * y + x]); } } outData[width * gy + gx] = minimum; } int main(int argc, char *argv[]) { if (argc != 5) { printf("Invalid number of arguments.\n"); return 1; } Image *input_image; Image *output_image; int error; if ((error = TGA_readImage(argv[1], &input_image)) != 0) { printf("Error when opening image: %d\n", error); return 1; } int rx = atoi(argv[2]); int ry = atoi(argv[3]); if (rx < 0 || rx >= MAX_RADIUS || ry < 0 || ry >= MAX_RADIUS) { printf("Invalid radius value.\n"); Image_delete(input_image); return 1; } if ((error = Image_copy(input_image, &output_image)) != 0) { printf("Error when copying image: %d\n", error); Image_delete(input_image); return 1; } CudaBench allBench, sendBench, retrieveBench, kernelBench; allBench = CudaBench_new(); sendBench = CudaBench_new(); retrieveBench = CudaBench_new(); kernelBench = CudaBench_new(); int c, size; uint8_t *c_inData, *c_outData; size = input_image->width * input_image->height * sizeof(uint8_t); dim3 threadsPerBlock(PROCESSED_SIZE, PROCESSED_SIZE, 1); dim3 blocks(ceil(input_image->width / PROCESSED_SIZE), ceil(input_image->height / PROCESSED_SIZE), 1); CudaBench_start(allBench); cudaMalloc(&c_inData, size); cudaMalloc(&c_outData, size); for (c = 0; c < input_image->channels; ++c) { CudaBench_start(sendBench); cudaMemcpy(c_inData, input_image->data[c], size, cudaMemcpyHostToDevice); CudaBench_end(sendBench); CudaBench_start(kernelBench); erosion<<<blocks, threadsPerBlock>>>(c_inData, c_outData, rx, 0, input_image->width, input_image->height); erosion<<<blocks, threadsPerBlock>>>(c_outData, c_inData, 0, ry, input_image->width, input_image->height); CudaBench_end(kernelBench); CudaBench_start(retrieveBench); cudaMemcpy(output_image->data[c], c_inData, size, cudaMemcpyDeviceToHost); CudaBench_end(retrieveBench); } cudaFree(c_inData); cudaFree(c_outData); CudaBench_end(allBench); cudaEventSynchronize(allBench.end); float timeAll, timeSend, timeKernel, timeRetrieve; timeAll = CudaBench_elapsedTime(allBench); timeSend = CudaBench_elapsedTime(sendBench); timeRetrieve = CudaBench_elapsedTime(retrieveBench); timeKernel = CudaBench_elapsedTime(kernelBench); printf("All: %f ms\nSend: %f ms\nRetrieve: %f ms\nKernel: %f ms\n", timeAll, timeSend, timeRetrieve, timeKernel); CubaBench_delete(allBench); CubaBench_delete(sendBench); CubaBench_delete(retrieveBench); CubaBench_delete(kernelBench); if ((error = TGA_writeImage(argv[4], output_image)) != 0) { printf("Error when writing image: %d\n", error); } Image_delete(input_image); Image_delete(output_image); cudaDeviceReset(); return 0; }
eb91c2b9d0da59e3ac20c3a68e16d94088ca51e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #ifdef USE_ROCM #include "thrust/device_vector.h" #endif #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea_im2col.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_ROCM template<typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template<typename Dtype> __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } template<typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template<typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template<typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template<typename Dtype> __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } #endif template<typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = bottom[0]->count(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int spatial_dim = bottom[0]->height() * bottom[0]->width(); if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM // CUDA backend code caffe_copy(count, bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_max<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, scale_data, top_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) kernel_exp<Dtype> CUDA_KERNEL( CAFFE_GET_BLOCKS(num * channels * spatial_dim), CAFFE_CUDA_NUM_THREADS)(num * channels * spatial_dim, top_data, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_sum<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_div<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, scale_data, top_data); #endif } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); greentea_copy<Dtype>(count, (cl_mem)bottom_data, 0, (cl_mem)top_data, 0, &ctx); viennacl::ocl::kernel &oclk_channel_max = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_max")); viennacl::ocl::enqueue( oclk_channel_max(num, channels, spatial_dim, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)scale_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_subtract")); viennacl::ocl::enqueue( oclk_channel_subtract(count, num, channels, spatial_dim, WrapHandle((cl_mem)scale_data, &ctx), WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_exp = program.get_kernel( CL_KERNEL_SELECT("kernel_exp")); viennacl::ocl::enqueue( oclk_exp(num * channels * spatial_dim, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_sum")); viennacl::ocl::enqueue( oclk_channel_sum(num, channels, spatial_dim, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)scale_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_channel_div = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_div")); viennacl::ocl::enqueue( oclk_channel_div(count, num, channels, spatial_dim, WrapHandle((cl_mem)scale_data, &ctx), WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); #endif } } template<typename Dtype> void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = top[0]->count(); int num = top[0]->num(); int channels = top[0]->channels(); int spatial_dim = top[0]->height() * top[0]->width(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM caffe_copy(top[0]->count(), top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and // subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_dot<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_diff, top_data, scale_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, scale_data, bottom_diff); // elementwise multiplication caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff); #endif } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); greentea_copy<Dtype>(top[0]->count(), (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0, &ctx); viennacl::ocl::kernel &oclk_channel_dot = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_dot")); viennacl::ocl::enqueue( oclk_channel_dot(num, channels, spatial_dim, WrapHandle((cl_mem)top_diff, &ctx), WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)scale_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_subtract")); viennacl::ocl::enqueue( oclk_channel_subtract(count, num, channels, spatial_dim, WrapHandle((cl_mem)scale_data, &ctx), WrapHandle((cl_mem)bottom_diff, &ctx)), ctx.get_queue()); greentea_gpu_mul<Dtype>(this->device_context_->id(), top[0]->count(), (cl_mem)bottom_diff, 0, (cl_mem)top_data, 0, (cl_mem)bottom_diff, 0); #endif } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
eb91c2b9d0da59e3ac20c3a68e16d94088ca51e4.cu
#include <algorithm> #include <cfloat> #include <vector> #ifdef USE_CUDA #include "thrust/device_vector.h" #endif #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea_im2col.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_CUDA template<typename Dtype> __global__ void kernel_channel_max(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype maxval = -FLT_MAX; for (int c = 0; c < channels; ++c) { maxval = max(data[(n * channels + c) * spatial_dim + s], maxval); } out[index] = maxval; } } template<typename Dtype> __global__ void kernel_channel_subtract(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_max, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] -= channel_max[n * spatial_dim + s]; } } template<typename Dtype> __global__ void kernel_exp(const int count, const Dtype* data, Dtype* out) { CUDA_KERNEL_LOOP(index, count) { out[index] = exp(data[index]); } } template<typename Dtype> __global__ void kernel_channel_sum(const int num, const int channels, const int spatial_dim, const Dtype* data, Dtype* channel_sum) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype sum = 0; for (int c = 0; c < channels; ++c) { sum += data[(n * channels + c) * spatial_dim + s]; } channel_sum[index] = sum; } } template<typename Dtype> __global__ void kernel_channel_div(const int count, const int num, const int channels, const int spatial_dim, const Dtype* channel_sum, Dtype* data) { CUDA_KERNEL_LOOP(index, count) { int n = index / channels / spatial_dim; int s = index % spatial_dim; data[index] /= channel_sum[n * spatial_dim + s]; } } template<typename Dtype> __global__ void kernel_channel_dot(const int num, const int channels, const int spatial_dim, const Dtype* data_1, const Dtype* data_2, Dtype* channel_dot) { CUDA_KERNEL_LOOP(index, num * spatial_dim) { int n = index / spatial_dim; int s = index % spatial_dim; Dtype dot = 0; for (int c = 0; c < channels; ++c) { dot += (data_1[(n * channels + c) * spatial_dim + s] * data_2[(n * channels + c) * spatial_dim + s]); } channel_dot[index] = dot; } } #endif template<typename Dtype> void SoftmaxLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); int count = bottom[0]->count(); int num = bottom[0]->num(); int channels = bottom[0]->channels(); int spatial_dim = bottom[0]->height() * bottom[0]->width(); if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // CUDA backend code caffe_copy(count, bottom_data, top_data); // We need to subtract the max to avoid numerical issues, compute the exp, // and then normalize. // compute max // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_max<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_data, scale_data); // subtract // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, scale_data, top_data); // exponentiate // NOLINT_NEXT_LINE(whitespace/operators) kernel_exp<Dtype> CUDA_KERNEL( CAFFE_GET_BLOCKS(num * channels * spatial_dim), CAFFE_CUDA_NUM_THREADS)(num * channels * spatial_dim, top_data, top_data); // sum after exp // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_sum<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_data, scale_data); // divide // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_div<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, scale_data, top_data); #endif } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); greentea_copy<Dtype>(count, (cl_mem)bottom_data, 0, (cl_mem)top_data, 0, &ctx); viennacl::ocl::kernel &oclk_channel_max = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_max")); viennacl::ocl::enqueue( oclk_channel_max(num, channels, spatial_dim, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)scale_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_subtract")); viennacl::ocl::enqueue( oclk_channel_subtract(count, num, channels, spatial_dim, WrapHandle((cl_mem)scale_data, &ctx), WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_exp = program.get_kernel( CL_KERNEL_SELECT("kernel_exp")); viennacl::ocl::enqueue( oclk_exp(num * channels * spatial_dim, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_channel_sum = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_sum")); viennacl::ocl::enqueue( oclk_channel_sum(num, channels, spatial_dim, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)scale_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_channel_div = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_div")); viennacl::ocl::enqueue( oclk_channel_div(count, num, channels, spatial_dim, WrapHandle((cl_mem)scale_data, &ctx), WrapHandle((cl_mem)top_data, &ctx)), ctx.get_queue()); #endif } } template<typename Dtype> void SoftmaxLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int count = top[0]->count(); int num = top[0]->num(); int channels = top[0]->channels(); int spatial_dim = top[0]->height() * top[0]->width(); const Dtype* top_diff = top[0]->gpu_diff(); const Dtype* top_data = top[0]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); Dtype* scale_data = scale_.mutable_gpu_data(); if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA caffe_copy(top[0]->count(), top_diff, bottom_diff); // Compute inner1d(top_diff, top_data) and // subtract them from the bottom diff. // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_dot<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(num * spatial_dim), CAFFE_CUDA_NUM_THREADS)(num, channels, spatial_dim, top_diff, top_data, scale_data); // NOLINT_NEXT_LINE(whitespace/operators) kernel_channel_subtract<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)(count, num, channels, spatial_dim, scale_data, bottom_diff); // elementwise multiplication caffe_gpu_mul<Dtype>(top[0]->count(), bottom_diff, top_data, bottom_diff); #endif } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); greentea_copy<Dtype>(top[0]->count(), (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0, &ctx); viennacl::ocl::kernel &oclk_channel_dot = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_dot")); viennacl::ocl::enqueue( oclk_channel_dot(num, channels, spatial_dim, WrapHandle((cl_mem)top_diff, &ctx), WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)scale_data, &ctx)), ctx.get_queue()); viennacl::ocl::kernel &oclk_channel_subtract = program.get_kernel( CL_KERNEL_SELECT("kernel_channel_subtract")); viennacl::ocl::enqueue( oclk_channel_subtract(count, num, channels, spatial_dim, WrapHandle((cl_mem)scale_data, &ctx), WrapHandle((cl_mem)bottom_diff, &ctx)), ctx.get_queue()); greentea_gpu_mul<Dtype>(this->device_context_->id(), top[0]->count(), (cl_mem)bottom_diff, 0, (cl_mem)top_data, 0, (cl_mem)bottom_diff, 0); #endif } } INSTANTIATE_LAYER_GPU_FUNCS(SoftmaxLayer); } // namespace caffe
6c7b19ad3cb2645481f055301976ad43adfe4c53.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Based on the work of Andrew Krepps #include <stdio.h> #include <stdlib.h> #define ARRAY_SIZE N #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE)) #define FIRST_ASCII_SYMBOL 65 //65=A #define LAST_ASCII_SYMBOL 122 //122 = z #define SHIFT 4 #define LETTER_RANGE LAST_ASCII_SYMBOL - FIRST_ASCII_SYMBOL ////////////////////////OPERATIONS////////////////////////////////////////////// //ADD=1 __global__ void add(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]+array2[i]; } //SUBTRACT=2 __global__ void subtract(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]-array2[i]; } //MULTIPLY=3 __global__ void multiply(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]*array2[i]; } //MOD=4 __global__ void mod(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]%array2[i]; } //Caesar Cipher = 5 __global__ void cipherEncrypt(int *message, int* cipher_key, int* encodedMsg) { const unsigned int z = (blockIdx.x * blockDim.x) + threadIdx.x; //shift all letter values to zero char zeroed_char = message[z] - FIRST_ASCII_SYMBOL; //make the cipher key cipher_key[z] = ((zeroed_char + SHIFT) % LETTER_RANGE)+ FIRST_ASCII_SYMBOL; char cipher_char = (char) cipher_key[z]+ FIRST_ASCII_SYMBOL; //change back to ascii and store in encodedMsg encodedMsg[z] = (int) zeroed_char +SHIFT+ FIRST_ASCII_SYMBOL; } //////////////////////////GPU FUNCTION////////////////////////////////// void main_sub(int N, int BLOCK_SIZE, int NUM_BLOCKS, int whichOperation, int pinnable) { printf("/////NUM THREADS:%i\t BLOCK SIZE:%i \t",N,BLOCK_SIZE); //create timing hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); ///////////////DECLARE PAGABLE MEMORY///////////////// int *h_pagable1; //pagable int *h_pagable2; int *h_pagable3; //PAGEABLE MEMORY /* Declare statically 3 arrays of ARRAY_SIZE (N) each */ h_pagable1 = (int*)malloc(ARRAY_SIZE*sizeof(int)); h_pagable2 = (int*)malloc(ARRAY_SIZE*sizeof(int)); h_pagable3 = (int*)malloc(ARRAY_SIZE*sizeof(int)); /////////////////FILL ARRAYS//////////////// // fill the arrays with values described by module3; //comment out for arrays of //txt //using cipher if (whichOperation == 5) { for(int k=0; k < ARRAY_SIZE; k++) { h_pagable1[k] = (char)(k + 64);//just fill with alphabet h_pagable2[k] = 0; //cipher starts with all 0s } } //doing operation else{ for(int i = 0; i < N; i++) { h_pagable1[i] = i; h_pagable2[i] = (rand()%4); //Check that array1 and array 2 inputs are correct //printf("ARRAY1 at %i\nARRAY2 at %i\n\n", h_pagable1[i], h_pagable[i]); } } ///////////////DECLARE DEVICE MEMORY///////////////// int *d_1; //device memory int *d_2; int *d_3; hipMalloc((void**)&d_1, ARRAY_SIZE_IN_BYTES); // device hipMalloc((void**)&d_2, ARRAY_SIZE_IN_BYTES); hipMalloc((void**)&d_3, ARRAY_SIZE_IN_BYTES); ///////////////DECLARE PINNED MEMORY && COPY DATA FROM CPU TO GPU///////// int *h_pinnable1; //pinnable memory int *h_pinnable2; int *h_pinnable3; //USING PINNABLE MEMORY if (pinnable ==1) { printf("Memory type: Pinned\t"); hipHostMalloc((void**)&h_pinnable1, ARRAY_SIZE_IN_BYTES); // host pinned hipHostMalloc((void**)&h_pinnable2, ARRAY_SIZE_IN_BYTES); hipHostMalloc((void**)&h_pinnable3, ARRAY_SIZE_IN_BYTES); //hipMemcpy( array1, gpu_block1, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost ) memcpy( h_pinnable1, h_pagable1, ARRAY_SIZE_IN_BYTES ); memcpy( h_pinnable2, h_pagable2, ARRAY_SIZE_IN_BYTES ); memcpy( h_pinnable3, h_pagable3, ARRAY_SIZE_IN_BYTES ); hipMemcpy( d_1, h_pinnable1, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice ); hipMemcpy( d_2, h_pinnable2, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice ); hipMemcpy( d_3, h_pinnable3, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice ); } ////USING ONLY PAGABLE MEMORY else{ printf("Memory type: Pagable\t"); hipMemcpy( d_1, h_pagable1, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice ); hipMemcpy( d_2, h_pagable2, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice ); hipMemcpy( d_3, h_pagable3, ARRAY_SIZE_IN_BYTES, hipMemcpyHostToDevice ); } ///////////////////EXECUTE KERNEL//////////////////////////////// hipEventRecord(start); switch(whichOperation) { //ADD case 1 : printf("Operation: ADD///////////\n"); hipLaunchKernelGGL(( add), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_1,d_2,d_3); break; //SUBTRACT case 2 : printf("Operation: SUBTRACT///////////\n"); hipLaunchKernelGGL(( subtract), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_1,d_2,d_3); break; //MULTIPLY case 3 : printf("Operation: MUTIPLY///////////\n"); hipLaunchKernelGGL(( multiply), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_1,d_2,d_3); break; //MOD case 4 : printf("Operation: MOD///////////\n"); hipLaunchKernelGGL(( mod), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_1,d_2,d_3); break; //caesar cipher case 5 : printf("Operation:///////////\n"); hipLaunchKernelGGL(( cipherEncrypt), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, 0, d_1,d_2,d_3); break; } ///////////////COPY BACK DATA FROM GPU TO CPU//////////////////////// hipEventRecord(stop); hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); printf("Time elapsed: %f\n", milliseconds); //////////////////// hipMemcpy( h_pagable1, d_1, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost ); hipMemcpy( h_pagable2, d_2, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost ); hipMemcpy( h_pagable3, d_3, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost ); ///////////////PRINT RESULTS//////////////////////////////////////// /* Iterate through the arrays and print */ for(int i = 0; i < ARRAY_SIZE; i++) { if (whichOperation ==5) { char ogLetter = (char) h_pagable1[i]; char cipherLetter = (char) h_pagable2[i]+FIRST_ASCII_SYMBOL; printf("\n\nOG Letter int was: %i\nOG Letter char was: %c\nCipher int is: %i\nEncoded int is:%i\nEncoded char is now: %c\n", h_pagable1[i], ogLetter, h_pagable2[i], h_pagable3[i], cipherLetter); } else{ printf("Index %i:\t %i\n", i, h_pagable3[i]); } } ////////////////FREE MEMORY/////////////////////////////////////// /* Free the arrays on the GPU as now we're done with them */ hipFree(d_1); hipFree(d_2); hipFree(d_3); hipHostFree(h_pinnable1); hipHostFree(h_pinnable2); hipHostFree(h_pinnable3); free(h_pagable1); free(h_pagable2); free(h_pagable3); } //////////////////////////MAIN/////////////////////////////////// int main(int argc, char** argv) { // read command line arguments int totalThreads = (1 << 20); int blockSize = 256; int operationNum = 0; int pinnable = 0; //total threads if (argc >= 2) { totalThreads = atoi(argv[1]); } //block size if (argc >= 3) { blockSize = atoi(argv[2]); } //using pinned memory? if (argc >= 4) { pinnable = atoi(argv[3]); } //operation/kernel execution number if (argc >= 5) { operationNum = atoi(argv[4]); } int numBlocks = totalThreads/blockSize; // validate command line arguments if (totalThreads % blockSize != 0) { ++numBlocks; totalThreads = numBlocks*blockSize; printf("Warning: Total thread count is not evenly divisible by the block size\n"); printf("The total number of threads will be rounded up to %d\n", totalThreads); } //int N, int BLOCK_SIZE, int NUM_BLOCKS, int whichOperation, int pinnable main_sub(totalThreads,blockSize,numBlocks, operationNum, pinnable); }
6c7b19ad3cb2645481f055301976ad43adfe4c53.cu
//Based on the work of Andrew Krepps #include <stdio.h> #include <stdlib.h> #define ARRAY_SIZE N #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * (ARRAY_SIZE)) #define FIRST_ASCII_SYMBOL 65 //65=A #define LAST_ASCII_SYMBOL 122 //122 = z #define SHIFT 4 #define LETTER_RANGE LAST_ASCII_SYMBOL - FIRST_ASCII_SYMBOL ////////////////////////OPERATIONS////////////////////////////////////////////// //ADD=1 __global__ void add(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]+array2[i]; } //SUBTRACT=2 __global__ void subtract(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]-array2[i]; } //MULTIPLY=3 __global__ void multiply(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]*array2[i]; } //MOD=4 __global__ void mod(int * array1,int * array2,int * array3) { const unsigned int i = (blockIdx.x * blockDim.x) + threadIdx.x; array3[i]=array1[i]%array2[i]; } //Caesar Cipher = 5 __global__ void cipherEncrypt(int *message, int* cipher_key, int* encodedMsg) { const unsigned int z = (blockIdx.x * blockDim.x) + threadIdx.x; //shift all letter values to zero char zeroed_char = message[z] - FIRST_ASCII_SYMBOL; //make the cipher key cipher_key[z] = ((zeroed_char + SHIFT) % LETTER_RANGE)+ FIRST_ASCII_SYMBOL; char cipher_char = (char) cipher_key[z]+ FIRST_ASCII_SYMBOL; //change back to ascii and store in encodedMsg encodedMsg[z] = (int) zeroed_char +SHIFT+ FIRST_ASCII_SYMBOL; } //////////////////////////GPU FUNCTION////////////////////////////////// void main_sub(int N, int BLOCK_SIZE, int NUM_BLOCKS, int whichOperation, int pinnable) { printf("/////NUM THREADS:%i\t BLOCK SIZE:%i \t",N,BLOCK_SIZE); //create timing cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); ///////////////DECLARE PAGABLE MEMORY///////////////// int *h_pagable1; //pagable int *h_pagable2; int *h_pagable3; //PAGEABLE MEMORY /* Declare statically 3 arrays of ARRAY_SIZE (N) each */ h_pagable1 = (int*)malloc(ARRAY_SIZE*sizeof(int)); h_pagable2 = (int*)malloc(ARRAY_SIZE*sizeof(int)); h_pagable3 = (int*)malloc(ARRAY_SIZE*sizeof(int)); /////////////////FILL ARRAYS//////////////// // fill the arrays with values described by module3; //comment out for arrays of //txt //using cipher if (whichOperation == 5) { for(int k=0; k < ARRAY_SIZE; k++) { h_pagable1[k] = (char)(k + 64);//just fill with alphabet h_pagable2[k] = 0; //cipher starts with all 0s } } //doing operation else{ for(int i = 0; i < N; i++) { h_pagable1[i] = i; h_pagable2[i] = (rand()%4); //Check that array1 and array 2 inputs are correct //printf("ARRAY1 at %i\nARRAY2 at %i\n\n", h_pagable1[i], h_pagable[i]); } } ///////////////DECLARE DEVICE MEMORY///////////////// int *d_1; //device memory int *d_2; int *d_3; cudaMalloc((void**)&d_1, ARRAY_SIZE_IN_BYTES); // device cudaMalloc((void**)&d_2, ARRAY_SIZE_IN_BYTES); cudaMalloc((void**)&d_3, ARRAY_SIZE_IN_BYTES); ///////////////DECLARE PINNED MEMORY && COPY DATA FROM CPU TO GPU///////// int *h_pinnable1; //pinnable memory int *h_pinnable2; int *h_pinnable3; //USING PINNABLE MEMORY if (pinnable ==1) { printf("Memory type: Pinned\t"); cudaMallocHost((void**)&h_pinnable1, ARRAY_SIZE_IN_BYTES); // host pinned cudaMallocHost((void**)&h_pinnable2, ARRAY_SIZE_IN_BYTES); cudaMallocHost((void**)&h_pinnable3, ARRAY_SIZE_IN_BYTES); //cudaMemcpy( array1, gpu_block1, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost ) memcpy( h_pinnable1, h_pagable1, ARRAY_SIZE_IN_BYTES ); memcpy( h_pinnable2, h_pagable2, ARRAY_SIZE_IN_BYTES ); memcpy( h_pinnable3, h_pagable3, ARRAY_SIZE_IN_BYTES ); cudaMemcpy( d_1, h_pinnable1, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); cudaMemcpy( d_2, h_pinnable2, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); cudaMemcpy( d_3, h_pinnable3, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); } ////USING ONLY PAGABLE MEMORY else{ printf("Memory type: Pagable\t"); cudaMemcpy( d_1, h_pagable1, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); cudaMemcpy( d_2, h_pagable2, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); cudaMemcpy( d_3, h_pagable3, ARRAY_SIZE_IN_BYTES, cudaMemcpyHostToDevice ); } ///////////////////EXECUTE KERNEL//////////////////////////////// cudaEventRecord(start); switch(whichOperation) { //ADD case 1 : printf("Operation: ADD///////////\n"); add<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_1,d_2,d_3); break; //SUBTRACT case 2 : printf("Operation: SUBTRACT///////////\n"); subtract<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_1,d_2,d_3); break; //MULTIPLY case 3 : printf("Operation: MUTIPLY///////////\n"); multiply<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_1,d_2,d_3); break; //MOD case 4 : printf("Operation: MOD///////////\n"); mod<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_1,d_2,d_3); break; //caesar cipher case 5 : printf("Operation:///////////\n"); cipherEncrypt<<<NUM_BLOCKS, BLOCK_SIZE>>>(d_1,d_2,d_3); break; } ///////////////COPY BACK DATA FROM GPU TO CPU//////////////////////// cudaEventRecord(stop); cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); printf("Time elapsed: %f\n", milliseconds); //////////////////// cudaMemcpy( h_pagable1, d_1, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost ); cudaMemcpy( h_pagable2, d_2, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost ); cudaMemcpy( h_pagable3, d_3, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost ); ///////////////PRINT RESULTS//////////////////////////////////////// /* Iterate through the arrays and print */ for(int i = 0; i < ARRAY_SIZE; i++) { if (whichOperation ==5) { char ogLetter = (char) h_pagable1[i]; char cipherLetter = (char) h_pagable2[i]+FIRST_ASCII_SYMBOL; printf("\n\nOG Letter int was: %i\nOG Letter char was: %c\nCipher int is: %i\nEncoded int is:%i\nEncoded char is now: %c\n", h_pagable1[i], ogLetter, h_pagable2[i], h_pagable3[i], cipherLetter); } else{ printf("Index %i:\t %i\n", i, h_pagable3[i]); } } ////////////////FREE MEMORY/////////////////////////////////////// /* Free the arrays on the GPU as now we're done with them */ cudaFree(d_1); cudaFree(d_2); cudaFree(d_3); cudaFreeHost(h_pinnable1); cudaFreeHost(h_pinnable2); cudaFreeHost(h_pinnable3); free(h_pagable1); free(h_pagable2); free(h_pagable3); } //////////////////////////MAIN/////////////////////////////////// int main(int argc, char** argv) { // read command line arguments int totalThreads = (1 << 20); int blockSize = 256; int operationNum = 0; int pinnable = 0; //total threads if (argc >= 2) { totalThreads = atoi(argv[1]); } //block size if (argc >= 3) { blockSize = atoi(argv[2]); } //using pinned memory? if (argc >= 4) { pinnable = atoi(argv[3]); } //operation/kernel execution number if (argc >= 5) { operationNum = atoi(argv[4]); } int numBlocks = totalThreads/blockSize; // validate command line arguments if (totalThreads % blockSize != 0) { ++numBlocks; totalThreads = numBlocks*blockSize; printf("Warning: Total thread count is not evenly divisible by the block size\n"); printf("The total number of threads will be rounded up to %d\n", totalThreads); } //int N, int BLOCK_SIZE, int NUM_BLOCKS, int whichOperation, int pinnable main_sub(totalThreads,blockSize,numBlocks, operationNum, pinnable); }
dcc4095546f40379b1c0b5d2b370ccfa9fa4af96.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <stdio.h> #include <hip/hip_runtime.h> /* * A simple example of nested kernel launches from the GPU. Each thread displays * its information when execution begins, and also diagnostics when the next * lowest nesting layer completes. */ __global__ void nestedHelloWorld(int const iSize, int iDepth, int maxDepth) { int tid = threadIdx.x; printf("Recursion=%d: Hello World from thread %d block %d\n", iDepth, tid, blockIdx.x); // condition to stop recursive execution if (iSize == 1 || iDepth >= maxDepth) return; // reduce block size to half int nthreads = iSize >> 1; // thread 0 launches child grid recursively if(tid == 0 && nthreads > 0) { // nestedHelloWorld<<<1, nthreads>>>(nthreads, ++iDepth, maxDepth); // GT 740 doesn't supported printf("-------> nested execution depth: %d\n", iDepth); } } int main(int argc, char **argv) { int size = 8; int blocksize = 8; // initial block size int igrid = 1; if(argc > 1) { igrid = atoi(argv[1]); size = igrid * blocksize; } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("%s Execution Configuration: grid %d block %d\n", argv[0], grid.x, block.x); hipLaunchKernelGGL(( nestedHelloWorld), dim3(grid), dim3(block), 0, 0, block.x, 0, 2); CHECK(hipGetLastError()); CHECK(hipDeviceReset()); return 0; }
dcc4095546f40379b1c0b5d2b370ccfa9fa4af96.cu
#include "../common/common.h" #include <stdio.h> #include <cuda_runtime.h> /* * A simple example of nested kernel launches from the GPU. Each thread displays * its information when execution begins, and also diagnostics when the next * lowest nesting layer completes. */ __global__ void nestedHelloWorld(int const iSize, int iDepth, int maxDepth) { int tid = threadIdx.x; printf("Recursion=%d: Hello World from thread %d block %d\n", iDepth, tid, blockIdx.x); // condition to stop recursive execution if (iSize == 1 || iDepth >= maxDepth) return; // reduce block size to half int nthreads = iSize >> 1; // thread 0 launches child grid recursively if(tid == 0 && nthreads > 0) { // nestedHelloWorld<<<1, nthreads>>>(nthreads, ++iDepth, maxDepth); // GT 740 doesn't supported printf("-------> nested execution depth: %d\n", iDepth); } } int main(int argc, char **argv) { int size = 8; int blocksize = 8; // initial block size int igrid = 1; if(argc > 1) { igrid = atoi(argv[1]); size = igrid * blocksize; } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("%s Execution Configuration: grid %d block %d\n", argv[0], grid.x, block.x); nestedHelloWorld<<<grid, block>>>(block.x, 0, 2); CHECK(cudaGetLastError()); CHECK(cudaDeviceReset()); return 0; }
c0a9ac8dd39063127b01c7b36cb445303f984b54.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // // Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. // NVIDIA/apex is licensed under the // BSD 3 - Clause "New" or "Revised" License // /* Modifications Copyright (c) Microsoft. */ #ifdef _WIN32 #pragma warning(disable : 4244) #endif #include "orttraining/training_ops/cuda/nn/layer_norm_impl.h" #include "core/providers/cuda/cu_inc/common.cuh" namespace onnxruntime { namespace cuda { using namespace onnxruntime::cuda; namespace { // This is the un-specialized struct. Note that we prevent instantiation of this // struct by putting an undefined symbol in the function body so it won't compile. // template <typename T> // struct SharedMemory // { // // Ensure that we won't compile any un-specialized types // __device__ T *getPointer() // { // extern __device__ void error(void); // error(); // return NULL; // } // }; // https://github.com/NVIDIA/apex/issues/246 template <typename T> struct SharedMemory; template <> struct SharedMemory<float> { __device__ float* getPointer() { extern __shared__ float s_float[]; return s_float; } }; template <> struct SharedMemory<double> { __device__ double* getPointer() { extern __shared__ double s_double[]; return s_double; } }; } // namespace template <typename T, typename U, bool use_mean, bool simplified> __device__ void cuLoadWriteStridedInputs( const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const T* output, const T* dout, const int i1_end, const int n2, const T* __restrict__ gamma, const T* __restrict__ beta, const U* __restrict__ mean, const U* __restrict__ invvar) { int i1 = i1_block + thr_load_row_off; if (i1 < i1_end) { U curr_mean = (use_mean && !simplified) ? mean[i1] : U(0); U curr_invvar = use_mean ? invvar[i1] : U(0); for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1 * n2 + i2; int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; if (i2 < n2) { U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] = curr_dout; if (use_mean) { U curr_input = static_cast<U>(input[load_idx]); warp_buf2[write_idx] = curr_dout * (curr_input - curr_mean) * curr_invvar; } else { U curr_gamma = static_cast<U>(gamma[i2]); U curr_beta = static_cast<U>(beta[i2]); U curr_output = static_cast<U>(output[load_idx]); warp_buf2[write_idx] = curr_dout * (curr_output - curr_beta) / curr_gamma; } } else { warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } else { for (int k = 0; k < blockDim.y; ++k) { int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } template <typename T, typename U, bool use_mean, bool simplified> __device__ void cuLoadAddStridedInputs( const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const T* output, const T* dout, const int i1_end, const int n2, const T* __restrict__ gamma, const T* __restrict__ beta, const U* __restrict__ mean, const U* __restrict__ invvar) { int i1 = i1_block + thr_load_row_off; if (i1 < i1_end) { U curr_mean = (use_mean && !simplified) ? mean[i1] : U(0); U curr_invvar = use_mean ? invvar[i1] : U(0); for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1 * n2 + i2; int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; if (i2 < n2) { U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] += curr_dout; if (use_mean) { U curr_input = static_cast<U>(input[load_idx]); warp_buf2[write_idx] += curr_dout * (curr_input - curr_mean) * curr_invvar; } else { U curr_gamma = static_cast<U>(gamma[i2]); U curr_beta = static_cast<U>(beta[i2]); U curr_output = static_cast<U>(output[load_idx]); warp_buf2[write_idx] += curr_dout * (curr_output - curr_beta) / curr_gamma; } } } } } template <typename T, typename U, bool use_mean, bool simplified> __global__ void cuComputePartGradGammaBeta( const T* __restrict__ dout, const T* __restrict__ input, const T* __restrict__ output, const T* __restrict__ gamma, const T* __restrict__ beta, const U* __restrict__ mean, const U* __restrict__ invvar, const int n1, const int n2, U* part_grad_gamma, U* part_grad_beta) { const int numsegs_n1 = (n1 + blockDim.y * blockDim.y - 1) / (blockDim.y * blockDim.y); const int segs_per_block = (numsegs_n1 + gridDim.y - 1) / gridDim.y; const int i1_beg = blockIdx.y * segs_per_block * blockDim.y * blockDim.y; const int i1_beg_plus_one = (blockIdx.y + 1) * segs_per_block * blockDim.y * blockDim.y; const int i1_end = i1_beg_plus_one < n1 ? i1_beg_plus_one : n1; const int row_stride = blockDim.x + 1; const int thr_load_col_off = (threadIdx.x * blockDim.y) & (blockDim.x - 1); const int thr_load_row_off = (threadIdx.x * blockDim.y) / blockDim.x + threadIdx.y * blockDim.y; const int i2_off = blockIdx.x * blockDim.x + thr_load_col_off; SharedMemory<U> shared; U* buf = shared.getPointer(); // buf has at least blockDim.x * blockDim.y * blockDim.y + (blockDim.y - 1)*(blockDim.x/blockDim.y) elements U* warp_buf1 = (U*)buf; U* warp_buf2 = warp_buf1 + blockDim.y * blockDim.y * row_stride; // compute partial sums from strided inputs // do this to increase number of loads in flight cuLoadWriteStridedInputs<T, U, use_mean, simplified>(i1_beg, thr_load_row_off, thr_load_col_off, i2_off, row_stride, warp_buf1, warp_buf2, input, output, dout, i1_end, n2, gamma, beta, mean, invvar); for (int i1_block = i1_beg + blockDim.y * blockDim.y; i1_block < i1_end; i1_block += blockDim.y * blockDim.y) { cuLoadAddStridedInputs<T, U, use_mean, simplified>(i1_block, thr_load_row_off, thr_load_col_off, i2_off, row_stride, warp_buf1, warp_buf2, input, output, dout, i1_end, n2, gamma, beta, mean, invvar); } __syncthreads(); // inter-warp reductions // sum within each warp U acc1 = U(0); U acc2 = U(0); for (int k = 0; k < blockDim.y; ++k) { int row1 = threadIdx.y + k * blockDim.y; int idx1 = row1 * row_stride + threadIdx.x; acc1 += warp_buf1[idx1]; acc2 += warp_buf2[idx1]; } warp_buf1[threadIdx.y * row_stride + threadIdx.x] = acc1; warp_buf2[threadIdx.y * row_stride + threadIdx.x] = acc2; __syncthreads(); // sum all warps for (int offset = blockDim.y / 2; offset > 1; offset /= 2) { if (threadIdx.y < offset) { int row1 = threadIdx.y; int row2 = threadIdx.y + offset; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; warp_buf1[idx1] += warp_buf1[idx2]; warp_buf2[idx1] += warp_buf2[idx2]; } __syncthreads(); } int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (threadIdx.y == 0 && i2 < n2) { int row1 = threadIdx.y; int row2 = threadIdx.y + 1; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; part_grad_beta[blockIdx.y * n2 + i2] = warp_buf1[idx1] + warp_buf1[idx2]; part_grad_gamma[blockIdx.y * n2 + i2] = warp_buf2[idx1] + warp_buf2[idx2]; } } template <typename T, typename U, bool simplified> __global__ void cuComputeGradGammaBeta( const U* part_grad_gamma, const U* part_grad_beta, const int part_size, const int n1, const int n2, T* grad_gamma, T* grad_beta) { // sum partial gradients for gamma and beta SharedMemory<U> shared; U* buf = shared.getPointer(); int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < n2) { // each warp does sequential reductions until reduced part_size is num_warps int num_warp_reductions = part_size / blockDim.y; U sum_gamma = U(0); U sum_beta = U(0); const U* part_grad_gamma_ptr = part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2; const U* part_grad_beta_ptr = part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2; for (int warp_offset = 0; warp_offset < num_warp_reductions; ++warp_offset) { sum_gamma += part_grad_gamma_ptr[warp_offset * n2]; sum_beta += part_grad_beta_ptr[warp_offset * n2]; } // inter-warp reductions const int nbsize3 = blockDim.x * blockDim.y / 2; for (int offset = blockDim.y / 2; offset >= 1; offset /= 2) { // top half write to shared memory if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[write_idx] = sum_gamma; buf[write_idx + nbsize3] = sum_beta; } __syncthreads(); // bottom half sums if (threadIdx.y < offset) { const int read_idx = threadIdx.y * blockDim.x + threadIdx.x; sum_gamma += buf[read_idx]; sum_beta += buf[read_idx + nbsize3]; } __syncthreads(); } // write out fully summed gradients if (threadIdx.y == 0) { grad_gamma[i2] = sum_gamma; if (!simplified) { grad_beta[i2] = sum_beta; } } } } template <typename T, typename U, bool use_mean, bool use_gamma, bool simplified> __global__ void cuComputeGradInput( const T* __restrict__ dout, const T* __restrict__ input, const T* __restrict__ output, const T* gamma, const T* beta, const U* __restrict__ mean, const U* __restrict__ invvar, const int n1, const int n2, T* grad_input) { for (int i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) { U sum_loss1 = U(0); U sum_loss2 = U(0); const U c_mean = (use_mean && !simplified) ? mean[i1] : U(0); const U c_invvar = invvar[i1]; const T* k_input = use_mean ? input + i1 * n2 : nullptr; const T* k_output = use_mean ? nullptr: output + i1 * n2; const T* k_dout = dout + i1 * n2; const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; if (use_gamma) { #ifndef __HIP_PLATFORM_HCC__ int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss * U(gamma[l + k]); if (use_mean) { const U c_h = static_cast<U>(k_input[l + k]); sum_loss2 += c_loss * U(gamma[l + k]) * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>(k_output[l + k]); sum_loss2 += c_loss * (c_output - U(beta[l + k])); } } } for (; l < n2; ++l) { const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss * U(gamma[l]); if (use_mean) { const U c_h = static_cast<U>(k_input[l]); sum_loss2 += c_loss * U(gamma[l]) * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>(k_output[l]); sum_loss2 += c_loss * (c_output - U(beta[l])); } } #else // Optimization for ROCm MI100 for( int l = 0; l < n2 ; l += numx) { int idx = l + thrx; T gamma_idx = (idx<n2)?gamma[ idx ]:T(0); const U c_loss = static_cast<U>( (idx<n2)?k_dout[ idx ]:T(0) ); sum_loss1 += c_loss * U( gamma_idx ); if (use_mean) { const U c_h = static_cast<U>( (idx<n2)?k_input[ idx ]:T(0) ); sum_loss2 += c_loss * U(gamma_idx) * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>( (idx<n2)?k_output[idx]:T(0) ); sum_loss2 += c_loss * (c_output - U( (idx<n2)?beta[idx]:T(0) )); } } #endif } else { #ifndef __HIP_PLATFORM_HCC__ int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss; if (use_mean) { const U c_h = static_cast<U>(k_input[l + k]); sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>(k_output[l + k]); sum_loss2 += c_loss * c_output; } } } for (; l < n2; ++l) { const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss; if (use_mean) { const U c_h = static_cast<U>(k_input[l]); sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>(k_output[l]); sum_loss2 += c_loss * c_output; } } #else // Optimization for ROCm MI100 for( int l = 0; l < n2 ; l += numx) { int idx = l + thrx; const U c_loss = static_cast<U>((idx<n2)?k_dout[idx]:T(0)); sum_loss1 += c_loss; if (use_mean) { const U c_h = static_cast<U>((idx<n2)?k_input[idx]:T(0)); sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>((idx<n2)?k_output[idx]:T(0)); sum_loss2 += c_loss * c_output; } } #endif } // intra-warp reductions for (int mask = blockDim.x / 2; mask > 0; mask /= 2) { sum_loss1 += WARP_SHFL_XOR(sum_loss1, mask); sum_loss2 += WARP_SHFL_XOR(sum_loss2, mask); } // inter-warp reductions if (blockDim.y > 1) { SharedMemory<U> shared; U* buf = shared.getPointer(); for (int offset = blockDim.y / 2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int wrt_i = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[2 * wrt_i] = sum_loss1; buf[2 * wrt_i + 1] = sum_loss2; } __syncthreads(); // lower half merges if (threadIdx.y < offset) { const int read_i = threadIdx.y * blockDim.x + threadIdx.x; sum_loss1 += buf[2 * read_i]; sum_loss2 += buf[2 * read_i + 1]; } __syncthreads(); } if (threadIdx.y == 0) { buf[2 * threadIdx.x] = sum_loss1; buf[2 * threadIdx.x + 1] = sum_loss2; } __syncthreads(); if (threadIdx.y != 0) { sum_loss1 = buf[2 * threadIdx.x]; sum_loss2 = buf[2 * threadIdx.x + 1]; } } // all threads now have the two sums over l // U sum_loss2 = X_mean_difference_over_std_var in cpu kernel U fH = (U)n2; U term1 = (U(1) / fH) * c_invvar; T* k_grad_input = grad_input + i1 * n2; if (use_gamma) { for (int l = thrx; l < n2; l += numx) { const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss * U(gamma[l]); if (!simplified) { f_grad_input -= sum_loss1; } if (use_mean) { const U c_h = static_cast<U>(k_input[l]); f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; } else { const U c_output = static_cast<U>(k_output[l]); f_grad_input -= (c_output - U(beta[l])) / U(gamma[l]) * sum_loss2; } f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } else { for (int l = thrx; l < n2; l += numx) { const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss; if (!simplified) { f_grad_input -= sum_loss1; } if (use_mean) { const U c_h = static_cast<U>(k_input[l]); f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; } else { const U c_output = static_cast<U>(k_output[l]); f_grad_input -= c_output * sum_loss2; } f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } } } template <typename T, typename U, bool simplified> void HostLayerNormGradient( const hipDeviceProp_t& prop, hipStream_t stream, const T* dout, const T* input, const T* output, const T* gamma, const T* beta, const U* mean, const U* invvar, int64_t n1, int64_t n2, T* grad_input, T* grad_gamma, T* grad_beta, U* part_grad_gamma, U* part_grad_beta, const int part_size) { const int warp_size = prop.warpSize; ORT_ENFORCE(warp_size == GPU_WARP_SIZE); const dim3 threads2(warp_size, 4, 1); const dim3 blocks2((n2 + threads2.x - 1) / threads2.x, part_size, 1); const int nshared2_a = 2 * sizeof(U) * threads2.y * threads2.y * (threads2.x + 1); const int nshared2_b = threads2.x * threads2.y * sizeof(U); const int nshared2 = nshared2_a > nshared2_b ? nshared2_a : nshared2_b; if (mean == nullptr && !simplified) { // use_mean == false, simplified == false -> Inverted Layer Norm hipLaunchKernelGGL(( cuComputePartGradGammaBeta<T, U, false, false>), dim3(blocks2), dim3(threads2), nshared2, stream, dout, input, output, gamma, beta, mean, invvar, n1, n2, part_grad_gamma, part_grad_beta); } else { // use_mean == true, simplified == false -> Layer Norm // use_mean == true, simplified == true -> Simplified Layer Norm hipLaunchKernelGGL(( cuComputePartGradGammaBeta<T, U, true, simplified>), dim3(blocks2), dim3(threads2), nshared2, stream, dout, input, output, gamma, beta, mean, invvar, n1, n2, part_grad_gamma, part_grad_beta); } const dim3 threads3(warp_size, 8, 1); const dim3 blocks3((n2 + threads2.x - 1) / threads2.x, 1, 1); const int nshared3 = threads3.x * threads3.y * sizeof(U); hipLaunchKernelGGL(( cuComputeGradGammaBeta<T, U, simplified>), dim3(blocks3), dim3(threads3), nshared3, stream, part_grad_gamma, part_grad_beta, part_size, n1, n2, grad_gamma, grad_beta); // compute grad_input const uint64_t maxGridY = prop.maxGridSize[1]; const dim3 blocks1(1, std::min<unsigned int>(static_cast<unsigned int>(n1), static_cast<unsigned int>(maxGridY)), 1); dim3 threads1(warp_size, 4, 1); #ifdef __HIP_PLATFORM_HCC__ // Optimization for ROCm MI100 threads1.y = 2; #endif int nshared = threads1.y > 1 ? threads1.y * threads1.x * sizeof(U) : 0; if (mean == nullptr && !simplified) { if (gamma == nullptr) { hipLaunchKernelGGL(( cuComputeGradInput<T, U, false, false, false>), dim3(blocks1), dim3(threads1), nshared, stream, dout, input, output, gamma, beta, mean, invvar, n1, n2, grad_input); } else { hipLaunchKernelGGL(( cuComputeGradInput<T, U, false, true, false>), dim3(blocks1), dim3(threads1), nshared, stream, dout, input, output, gamma, beta, mean, invvar, n1, n2, grad_input); } } else { if (gamma == nullptr) { hipLaunchKernelGGL(( cuComputeGradInput<T, U, true, false, simplified>), dim3(blocks1), dim3(threads1), nshared, stream, dout, input, output, gamma, beta, mean, invvar, n1, n2, grad_input); } else { hipLaunchKernelGGL(( cuComputeGradInput<T, U, true, true, simplified>), dim3(blocks1), dim3(threads1), nshared, stream, dout, input, output, gamma, beta, mean, invvar, n1, n2, grad_input); } } } #define LAYERNORMGRAD_IMPL(T, U, simplified) \ template void HostLayerNormGradient<T, U, simplified>(const hipDeviceProp_t& prop, hipStream_t stream, const T* dout, const T* input, const T* output, \ const T* gamma, const T* beta, const U* mean, const U* invvar, int64_t n1, int64_t n2, \ T* grad_input, T* grad_gamma, T* grad_beta, U* part_grad_gamma, U* part_grad_beta, const int part_size); LAYERNORMGRAD_IMPL(float, float, true) LAYERNORMGRAD_IMPL(double, double, true) LAYERNORMGRAD_IMPL(half, float, true) LAYERNORMGRAD_IMPL(float, float, false) LAYERNORMGRAD_IMPL(double, double, false) LAYERNORMGRAD_IMPL(half, float, false) #if TORCH_HIP_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) LAYERNORMGRAD_IMPL(nv_bfloat16, float, true) LAYERNORMGRAD_IMPL(nv_bfloat16, float, false) #endif } // namespace cuda } // namespace onnxruntime
c0a9ac8dd39063127b01c7b36cb445303f984b54.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // // Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved. // NVIDIA/apex is licensed under the // BSD 3 - Clause "New" or "Revised" License // /* Modifications Copyright (c) Microsoft. */ #ifdef _WIN32 #pragma warning(disable : 4244) #endif #include "orttraining/training_ops/cuda/nn/layer_norm_impl.h" #include "core/providers/cuda/cu_inc/common.cuh" namespace onnxruntime { namespace cuda { using namespace onnxruntime::cuda; namespace { // This is the un-specialized struct. Note that we prevent instantiation of this // struct by putting an undefined symbol in the function body so it won't compile. // template <typename T> // struct SharedMemory // { // // Ensure that we won't compile any un-specialized types // __device__ T *getPointer() // { // extern __device__ void error(void); // error(); // return NULL; // } // }; // https://github.com/NVIDIA/apex/issues/246 template <typename T> struct SharedMemory; template <> struct SharedMemory<float> { __device__ float* getPointer() { extern __shared__ float s_float[]; return s_float; } }; template <> struct SharedMemory<double> { __device__ double* getPointer() { extern __shared__ double s_double[]; return s_double; } }; } // namespace template <typename T, typename U, bool use_mean, bool simplified> __device__ void cuLoadWriteStridedInputs( const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const T* output, const T* dout, const int i1_end, const int n2, const T* __restrict__ gamma, const T* __restrict__ beta, const U* __restrict__ mean, const U* __restrict__ invvar) { int i1 = i1_block + thr_load_row_off; if (i1 < i1_end) { U curr_mean = (use_mean && !simplified) ? mean[i1] : U(0); U curr_invvar = use_mean ? invvar[i1] : U(0); for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1 * n2 + i2; int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; if (i2 < n2) { U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] = curr_dout; if (use_mean) { U curr_input = static_cast<U>(input[load_idx]); warp_buf2[write_idx] = curr_dout * (curr_input - curr_mean) * curr_invvar; } else { U curr_gamma = static_cast<U>(gamma[i2]); U curr_beta = static_cast<U>(beta[i2]); U curr_output = static_cast<U>(output[load_idx]); warp_buf2[write_idx] = curr_dout * (curr_output - curr_beta) / curr_gamma; } } else { warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } else { for (int k = 0; k < blockDim.y; ++k) { int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; warp_buf1[write_idx] = U(0); warp_buf2[write_idx] = U(0); } } } template <typename T, typename U, bool use_mean, bool simplified> __device__ void cuLoadAddStridedInputs( const int i1_block, const int thr_load_row_off, const int thr_load_col_off, const int i2_off, const int row_stride, U* warp_buf1, U* warp_buf2, const T* input, const T* output, const T* dout, const int i1_end, const int n2, const T* __restrict__ gamma, const T* __restrict__ beta, const U* __restrict__ mean, const U* __restrict__ invvar) { int i1 = i1_block + thr_load_row_off; if (i1 < i1_end) { U curr_mean = (use_mean && !simplified) ? mean[i1] : U(0); U curr_invvar = use_mean ? invvar[i1] : U(0); for (int k = 0; k < blockDim.y; ++k) { int i2 = i2_off + k; int load_idx = i1 * n2 + i2; int write_idx = thr_load_row_off * row_stride + thr_load_col_off + k; if (i2 < n2) { U curr_dout = static_cast<U>(dout[load_idx]); warp_buf1[write_idx] += curr_dout; if (use_mean) { U curr_input = static_cast<U>(input[load_idx]); warp_buf2[write_idx] += curr_dout * (curr_input - curr_mean) * curr_invvar; } else { U curr_gamma = static_cast<U>(gamma[i2]); U curr_beta = static_cast<U>(beta[i2]); U curr_output = static_cast<U>(output[load_idx]); warp_buf2[write_idx] += curr_dout * (curr_output - curr_beta) / curr_gamma; } } } } } template <typename T, typename U, bool use_mean, bool simplified> __global__ void cuComputePartGradGammaBeta( const T* __restrict__ dout, const T* __restrict__ input, const T* __restrict__ output, const T* __restrict__ gamma, const T* __restrict__ beta, const U* __restrict__ mean, const U* __restrict__ invvar, const int n1, const int n2, U* part_grad_gamma, U* part_grad_beta) { const int numsegs_n1 = (n1 + blockDim.y * blockDim.y - 1) / (blockDim.y * blockDim.y); const int segs_per_block = (numsegs_n1 + gridDim.y - 1) / gridDim.y; const int i1_beg = blockIdx.y * segs_per_block * blockDim.y * blockDim.y; const int i1_beg_plus_one = (blockIdx.y + 1) * segs_per_block * blockDim.y * blockDim.y; const int i1_end = i1_beg_plus_one < n1 ? i1_beg_plus_one : n1; const int row_stride = blockDim.x + 1; const int thr_load_col_off = (threadIdx.x * blockDim.y) & (blockDim.x - 1); const int thr_load_row_off = (threadIdx.x * blockDim.y) / blockDim.x + threadIdx.y * blockDim.y; const int i2_off = blockIdx.x * blockDim.x + thr_load_col_off; SharedMemory<U> shared; U* buf = shared.getPointer(); // buf has at least blockDim.x * blockDim.y * blockDim.y + (blockDim.y - 1)*(blockDim.x/blockDim.y) elements U* warp_buf1 = (U*)buf; U* warp_buf2 = warp_buf1 + blockDim.y * blockDim.y * row_stride; // compute partial sums from strided inputs // do this to increase number of loads in flight cuLoadWriteStridedInputs<T, U, use_mean, simplified>(i1_beg, thr_load_row_off, thr_load_col_off, i2_off, row_stride, warp_buf1, warp_buf2, input, output, dout, i1_end, n2, gamma, beta, mean, invvar); for (int i1_block = i1_beg + blockDim.y * blockDim.y; i1_block < i1_end; i1_block += blockDim.y * blockDim.y) { cuLoadAddStridedInputs<T, U, use_mean, simplified>(i1_block, thr_load_row_off, thr_load_col_off, i2_off, row_stride, warp_buf1, warp_buf2, input, output, dout, i1_end, n2, gamma, beta, mean, invvar); } __syncthreads(); // inter-warp reductions // sum within each warp U acc1 = U(0); U acc2 = U(0); for (int k = 0; k < blockDim.y; ++k) { int row1 = threadIdx.y + k * blockDim.y; int idx1 = row1 * row_stride + threadIdx.x; acc1 += warp_buf1[idx1]; acc2 += warp_buf2[idx1]; } warp_buf1[threadIdx.y * row_stride + threadIdx.x] = acc1; warp_buf2[threadIdx.y * row_stride + threadIdx.x] = acc2; __syncthreads(); // sum all warps for (int offset = blockDim.y / 2; offset > 1; offset /= 2) { if (threadIdx.y < offset) { int row1 = threadIdx.y; int row2 = threadIdx.y + offset; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; warp_buf1[idx1] += warp_buf1[idx2]; warp_buf2[idx1] += warp_buf2[idx2]; } __syncthreads(); } int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (threadIdx.y == 0 && i2 < n2) { int row1 = threadIdx.y; int row2 = threadIdx.y + 1; int idx1 = row1 * row_stride + threadIdx.x; int idx2 = row2 * row_stride + threadIdx.x; part_grad_beta[blockIdx.y * n2 + i2] = warp_buf1[idx1] + warp_buf1[idx2]; part_grad_gamma[blockIdx.y * n2 + i2] = warp_buf2[idx1] + warp_buf2[idx2]; } } template <typename T, typename U, bool simplified> __global__ void cuComputeGradGammaBeta( const U* part_grad_gamma, const U* part_grad_beta, const int part_size, const int n1, const int n2, T* grad_gamma, T* grad_beta) { // sum partial gradients for gamma and beta SharedMemory<U> shared; U* buf = shared.getPointer(); int i2 = blockIdx.x * blockDim.x + threadIdx.x; if (i2 < n2) { // each warp does sequential reductions until reduced part_size is num_warps int num_warp_reductions = part_size / blockDim.y; U sum_gamma = U(0); U sum_beta = U(0); const U* part_grad_gamma_ptr = part_grad_gamma + threadIdx.y * num_warp_reductions * n2 + i2; const U* part_grad_beta_ptr = part_grad_beta + threadIdx.y * num_warp_reductions * n2 + i2; for (int warp_offset = 0; warp_offset < num_warp_reductions; ++warp_offset) { sum_gamma += part_grad_gamma_ptr[warp_offset * n2]; sum_beta += part_grad_beta_ptr[warp_offset * n2]; } // inter-warp reductions const int nbsize3 = blockDim.x * blockDim.y / 2; for (int offset = blockDim.y / 2; offset >= 1; offset /= 2) { // top half write to shared memory if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int write_idx = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[write_idx] = sum_gamma; buf[write_idx + nbsize3] = sum_beta; } __syncthreads(); // bottom half sums if (threadIdx.y < offset) { const int read_idx = threadIdx.y * blockDim.x + threadIdx.x; sum_gamma += buf[read_idx]; sum_beta += buf[read_idx + nbsize3]; } __syncthreads(); } // write out fully summed gradients if (threadIdx.y == 0) { grad_gamma[i2] = sum_gamma; if (!simplified) { grad_beta[i2] = sum_beta; } } } } template <typename T, typename U, bool use_mean, bool use_gamma, bool simplified> __global__ void cuComputeGradInput( const T* __restrict__ dout, const T* __restrict__ input, const T* __restrict__ output, const T* gamma, const T* beta, const U* __restrict__ mean, const U* __restrict__ invvar, const int n1, const int n2, T* grad_input) { for (int i1 = blockIdx.y; i1 < n1; i1 += gridDim.y) { U sum_loss1 = U(0); U sum_loss2 = U(0); const U c_mean = (use_mean && !simplified) ? mean[i1] : U(0); const U c_invvar = invvar[i1]; const T* k_input = use_mean ? input + i1 * n2 : nullptr; const T* k_output = use_mean ? nullptr: output + i1 * n2; const T* k_dout = dout + i1 * n2; const int numx = blockDim.x * blockDim.y; const int thrx = threadIdx.x + threadIdx.y * blockDim.x; if (use_gamma) { #ifndef __HIP_PLATFORM_HCC__ int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss * U(gamma[l + k]); if (use_mean) { const U c_h = static_cast<U>(k_input[l + k]); sum_loss2 += c_loss * U(gamma[l + k]) * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>(k_output[l + k]); sum_loss2 += c_loss * (c_output - U(beta[l + k])); } } } for (; l < n2; ++l) { const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss * U(gamma[l]); if (use_mean) { const U c_h = static_cast<U>(k_input[l]); sum_loss2 += c_loss * U(gamma[l]) * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>(k_output[l]); sum_loss2 += c_loss * (c_output - U(beta[l])); } } #else // Optimization for ROCm MI100 for( int l = 0; l < n2 ; l += numx) { int idx = l + thrx; T gamma_idx = (idx<n2)?gamma[ idx ]:T(0); const U c_loss = static_cast<U>( (idx<n2)?k_dout[ idx ]:T(0) ); sum_loss1 += c_loss * U( gamma_idx ); if (use_mean) { const U c_h = static_cast<U>( (idx<n2)?k_input[ idx ]:T(0) ); sum_loss2 += c_loss * U(gamma_idx) * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>( (idx<n2)?k_output[idx]:T(0) ); sum_loss2 += c_loss * (c_output - U( (idx<n2)?beta[idx]:T(0) )); } } #endif } else { #ifndef __HIP_PLATFORM_HCC__ int l = 4 * thrx; for (; l + 3 < n2; l += 4 * numx) { for (int k = 0; k < 4; ++k) { const U c_loss = static_cast<U>(k_dout[l + k]); sum_loss1 += c_loss; if (use_mean) { const U c_h = static_cast<U>(k_input[l + k]); sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>(k_output[l + k]); sum_loss2 += c_loss * c_output; } } } for (; l < n2; ++l) { const U c_loss = static_cast<U>(k_dout[l]); sum_loss1 += c_loss; if (use_mean) { const U c_h = static_cast<U>(k_input[l]); sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>(k_output[l]); sum_loss2 += c_loss * c_output; } } #else // Optimization for ROCm MI100 for( int l = 0; l < n2 ; l += numx) { int idx = l + thrx; const U c_loss = static_cast<U>((idx<n2)?k_dout[idx]:T(0)); sum_loss1 += c_loss; if (use_mean) { const U c_h = static_cast<U>((idx<n2)?k_input[idx]:T(0)); sum_loss2 += c_loss * (c_h - c_mean) * c_invvar; } else { const U c_output = static_cast<U>((idx<n2)?k_output[idx]:T(0)); sum_loss2 += c_loss * c_output; } } #endif } // intra-warp reductions for (int mask = blockDim.x / 2; mask > 0; mask /= 2) { sum_loss1 += WARP_SHFL_XOR(sum_loss1, mask); sum_loss2 += WARP_SHFL_XOR(sum_loss2, mask); } // inter-warp reductions if (blockDim.y > 1) { SharedMemory<U> shared; U* buf = shared.getPointer(); for (int offset = blockDim.y / 2; offset > 0; offset /= 2) { // upper half of warps write to shared if (threadIdx.y >= offset && threadIdx.y < 2 * offset) { const int wrt_i = (threadIdx.y - offset) * blockDim.x + threadIdx.x; buf[2 * wrt_i] = sum_loss1; buf[2 * wrt_i + 1] = sum_loss2; } __syncthreads(); // lower half merges if (threadIdx.y < offset) { const int read_i = threadIdx.y * blockDim.x + threadIdx.x; sum_loss1 += buf[2 * read_i]; sum_loss2 += buf[2 * read_i + 1]; } __syncthreads(); } if (threadIdx.y == 0) { buf[2 * threadIdx.x] = sum_loss1; buf[2 * threadIdx.x + 1] = sum_loss2; } __syncthreads(); if (threadIdx.y != 0) { sum_loss1 = buf[2 * threadIdx.x]; sum_loss2 = buf[2 * threadIdx.x + 1]; } } // all threads now have the two sums over l // U sum_loss2 = X_mean_difference_over_std_var in cpu kernel U fH = (U)n2; U term1 = (U(1) / fH) * c_invvar; T* k_grad_input = grad_input + i1 * n2; if (use_gamma) { for (int l = thrx; l < n2; l += numx) { const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss * U(gamma[l]); if (!simplified) { f_grad_input -= sum_loss1; } if (use_mean) { const U c_h = static_cast<U>(k_input[l]); f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; } else { const U c_output = static_cast<U>(k_output[l]); f_grad_input -= (c_output - U(beta[l])) / U(gamma[l]) * sum_loss2; } f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } else { for (int l = thrx; l < n2; l += numx) { const U c_loss = static_cast<U>(k_dout[l]); U f_grad_input = fH * c_loss; if (!simplified) { f_grad_input -= sum_loss1; } if (use_mean) { const U c_h = static_cast<U>(k_input[l]); f_grad_input -= (c_h - c_mean) * c_invvar * sum_loss2; } else { const U c_output = static_cast<U>(k_output[l]); f_grad_input -= c_output * sum_loss2; } f_grad_input *= term1; k_grad_input[l] = static_cast<T>(f_grad_input); } } } } template <typename T, typename U, bool simplified> void HostLayerNormGradient( const cudaDeviceProp& prop, cudaStream_t stream, const T* dout, const T* input, const T* output, const T* gamma, const T* beta, const U* mean, const U* invvar, int64_t n1, int64_t n2, T* grad_input, T* grad_gamma, T* grad_beta, U* part_grad_gamma, U* part_grad_beta, const int part_size) { const int warp_size = prop.warpSize; ORT_ENFORCE(warp_size == GPU_WARP_SIZE); const dim3 threads2(warp_size, 4, 1); const dim3 blocks2((n2 + threads2.x - 1) / threads2.x, part_size, 1); const int nshared2_a = 2 * sizeof(U) * threads2.y * threads2.y * (threads2.x + 1); const int nshared2_b = threads2.x * threads2.y * sizeof(U); const int nshared2 = nshared2_a > nshared2_b ? nshared2_a : nshared2_b; if (mean == nullptr && !simplified) { // use_mean == false, simplified == false -> Inverted Layer Norm cuComputePartGradGammaBeta<T, U, false, false><<<blocks2, threads2, nshared2, stream>>>( dout, input, output, gamma, beta, mean, invvar, n1, n2, part_grad_gamma, part_grad_beta); } else { // use_mean == true, simplified == false -> Layer Norm // use_mean == true, simplified == true -> Simplified Layer Norm cuComputePartGradGammaBeta<T, U, true, simplified><<<blocks2, threads2, nshared2, stream>>>( dout, input, output, gamma, beta, mean, invvar, n1, n2, part_grad_gamma, part_grad_beta); } const dim3 threads3(warp_size, 8, 1); const dim3 blocks3((n2 + threads2.x - 1) / threads2.x, 1, 1); const int nshared3 = threads3.x * threads3.y * sizeof(U); cuComputeGradGammaBeta<T, U, simplified><<<blocks3, threads3, nshared3, stream>>>( part_grad_gamma, part_grad_beta, part_size, n1, n2, grad_gamma, grad_beta); // compute grad_input const uint64_t maxGridY = prop.maxGridSize[1]; const dim3 blocks1(1, std::min<unsigned int>(static_cast<unsigned int>(n1), static_cast<unsigned int>(maxGridY)), 1); dim3 threads1(warp_size, 4, 1); #ifdef __HIP_PLATFORM_HCC__ // Optimization for ROCm MI100 threads1.y = 2; #endif int nshared = threads1.y > 1 ? threads1.y * threads1.x * sizeof(U) : 0; if (mean == nullptr && !simplified) { if (gamma == nullptr) { cuComputeGradInput<T, U, false, false, false><<<blocks1, threads1, nshared, stream>>>( dout, input, output, gamma, beta, mean, invvar, n1, n2, grad_input); } else { cuComputeGradInput<T, U, false, true, false><<<blocks1, threads1, nshared, stream>>>( dout, input, output, gamma, beta, mean, invvar, n1, n2, grad_input); } } else { if (gamma == nullptr) { cuComputeGradInput<T, U, true, false, simplified><<<blocks1, threads1, nshared, stream>>>( dout, input, output, gamma, beta, mean, invvar, n1, n2, grad_input); } else { cuComputeGradInput<T, U, true, true, simplified><<<blocks1, threads1, nshared, stream>>>( dout, input, output, gamma, beta, mean, invvar, n1, n2, grad_input); } } } #define LAYERNORMGRAD_IMPL(T, U, simplified) \ template void HostLayerNormGradient<T, U, simplified>(const cudaDeviceProp& prop, cudaStream_t stream, const T* dout, const T* input, const T* output, \ const T* gamma, const T* beta, const U* mean, const U* invvar, int64_t n1, int64_t n2, \ T* grad_input, T* grad_gamma, T* grad_beta, U* part_grad_gamma, U* part_grad_beta, const int part_size); LAYERNORMGRAD_IMPL(float, float, true) LAYERNORMGRAD_IMPL(double, double, true) LAYERNORMGRAD_IMPL(half, float, true) LAYERNORMGRAD_IMPL(float, float, false) LAYERNORMGRAD_IMPL(double, double, false) LAYERNORMGRAD_IMPL(half, float, false) #if CUDA_VERSION >= 11000 && (__CUDA_ARCH__ >= 800 || !defined(__CUDA_ARCH__)) LAYERNORMGRAD_IMPL(nv_bfloat16, float, true) LAYERNORMGRAD_IMPL(nv_bfloat16, float, false) #endif } // namespace cuda } // namespace onnxruntime
c9d4e37316e1f5a2655355456809083bb5c80249.hip
// !!! This is a file automatically generated by hipify!!! /* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <hip/hip_runtime.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> #define CHUNK_K 4 #define SKEW 1 #define WARPS_PER_BLOCK 8 #define WARP_SIZE 32 #define THREADS_PER_BLOCK WARP_SIZE * WARPS_PER_BLOCK #define CHUNK_LINE_BYTES CHUNK_K * sizeof(int4) #define WARP_COPY_BYTES WARP_SIZE * sizeof(int4) #define CHUNK_COPY_LINES_PER_WARP WARP_COPY_BYTES / CHUNK_LINE_BYTES #define CHUNK_COPY_LINE_LANES WARP_SIZE / CHUNK_COPY_LINES_PER_WARP #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 8 #define WARP_COL_TILES 4 #define BLOCK_ROW_TILES WARP_ROW_TILES * BLOCK_ROW_WARPS #define BLOCK_COL_TILES WARP_COL_TILES * BLOCK_COL_WARPS #define M 8 #define N 8 #define K 128 #define checkKernelErrors(expr) \ do { \ expr; \ \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ hipGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; typedef union { int4 vec; int a[4]; } U4; // Assume that Kernel size is 3x3. // Assume CIN is 128. __global__ void compute_conv_imma(const int4 *W, const int4 *X, int *Output, int Height, int Width, int CIN, int COUT) { // GEMM Configuration int X_bit_offset = (Height+2) * (Width+2) * CIN/128; int W_bit_offset = 9*CIN*COUT/128; int X_ROW_BIT = (Width+2)*CIN/128; int W_ROW_BIT = 9*(CIN/128); // if (blockIdx.x == 0 && threadIdx.x == 0) { // // for(int i = 0; i<Height*Width*CIN/32*BIT; i++) { // // printf("X[%d]: %x\n", i, *((int*)X+i)); // // } // for(int i = 0; i<COUT*9*CIN/32; i++) { // printf("W[%d]: %x\n", i, *((int*)W+i)); // } // } extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. wmma::fragment<wmma::accumulator, 8, 8, 128, int> c[WARP_COL_TILES][WARP_ROW_TILES]; wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_i = (block_pos/(COUT/128)) / (Width/8) * 4; const unsigned int block_j = (block_pos/(COUT/128)) % (Width/8) * 8; const unsigned int block_z = block_pos % (COUT/128) * 128; if (block_i >= Height) { break; } int image_starting_idx = block_i * (Width+2) * CIN/128 + block_j * CIN/128; for(int i=0; i < WARP_COL_TILES; i++) for(int j=0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); // On the K dimension, there are 9*CIN/128 element to solve. // This for loop computes [0,1,2,...,int(9*CIN/128/CHUNK_K)*CHUNK_K-1]. Next for loop computes [int(9*CIN/128/CHUNK_K)*CHUNK_K, ..., 9*CIN/128-1] // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k+CHUNK_K < 9*CIN/128; tile_k += CHUNK_K) { int SHMEM_i = threadIdx.x/4; int bit_flag = SHMEM_i / 32; int SHMEM_offset = SHMEM_i % 32; int row = SHMEM_offset / 8; int col = SHMEM_offset % 8; int t = threadIdx.x % 4; int sub_row = (tile_k+t)/(3*CIN/128); int sub_col = (tile_k+t)%(3*CIN/128); int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col; // if (block_pos == 0 && tile_k ==0 && SHMEM_i == 1) { // printf("tile_k: %d, block_i: %d, block_j: %d, row: %d, col: %d, sub_row: %d, sub_col: %d, GL_idx: %d\n", tile_k, block_i, block_j, row, col, sub_row, sub_col, GL_idx); // printf("X[17]: %x %x %x %x\n", *((int*)X+ 4*17), *((int*)X+ 4*17+1), *((int*)X+ 4*17+2), *((int*)X+ 4*17+3)); // } shmem[SHMEM_i][t] = X[GL_idx]; SHMEM_i += 64; shmem[SHMEM_i][t] = X[GL_idx+2*X_bit_offset]; SHMEM_i += 64; int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k + t; shmem[SHMEM_i][t] = W[weight_load_idx]; SHMEM_i += 64; shmem[SHMEM_i][t] = W[weight_load_idx+64*W_ROW_BIT]; __syncthreads(); // if (block_pos == 6 && warpId == 0 && laneId == 0) { // for(int i = 0; i < 256; i++) { // for(int j = 0; j < 16; j++) { // int *tile_ptr = (int*)&shmem[0][0] + i*20 + j; // printf("tile_k: %d, i: %d, j: %d, val: %x\n", tile_k, i, j, *tile_ptr); // } // } // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 4 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); // if (block_pos == 0 && warpId == 0) { // // printf("tile_k: %d, k_step: %d, shmem_idx_a: %d\n", tile_k, k_step, shmem_idx_a); // for(int t = 0; t<a[i].num_elements; t++) { // printf("tile_k: %d, k_step: %d, a[%d].x[%d]: %x\n", tile_k, k_step, i, t, a[i].x[t]); // } // } #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = 128 + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); // wmma::fill_fragment(b[j], 0x5); } // printf("ckpt4\n"); // if (block_pos == 0 && warpId == 0 && laneId == 0 && tile_k == 0) { // for(int t = 0; t<b[j].num_elements; t++) { // printf("b[%d].x[%d]: %x\n", j, t, b[j].x[t]); // } // } wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND); } } } __syncthreads(); // if (block_pos == 0 && warpId == 0) { // for(int t = 0; t<c[0][0].num_elements; t++) { // printf("tile_k: %d, c[0][0].x[%d]: %d\n", tile_k, t, c[0][0].x[t]); // } // } } // int *shmem_warp_tile_ptr = (int*)&shmem[0][0] + // (warpId / 2) * 128 * 32 + // (warpId % 2) * 64; // Will be used only when writing back D. May be moved outside the for loop. TODO. // // Store the D fragments to shared memory. // #pragma unroll // for (int i = 0; i < WARP_COL_TILES; i++) { // #pragma unroll // for (int j = 0; j < WARP_ROW_TILES; j++) { // int *tile_ptr = shmem_warp_tile_ptr + i*128*8 + j*8; // wmma::store_matrix_sync(tile_ptr, c[i][j], 128, wmma::mem_row_major); // } // } // __syncthreads(); // if (block_pos == 0 && warpId == 0 && laneId == 0) { // for(int i = 1; i < 2; i++) { // for(int j = 0; j < 2; j++) { // int *tile_ptr = (int*)&shmem[0][0] + i*128 + j; // printf("i: %d, j: %d, val: %d\n", i, j, *tile_ptr); // } // } // } #pragma unroll for (int tile_k = int(9*CIN/128/CHUNK_K)*CHUNK_K; tile_k < 9*CIN/128; tile_k++) { int SHMEM_i = threadIdx.x/4; int bit_flag = SHMEM_i / 32; int SHMEM_offset = SHMEM_i % 32; int row = SHMEM_offset / 8; int col = SHMEM_offset % 8; int t = threadIdx.x % 4; int sub_row = (tile_k)/(3*CIN/128); int sub_col = (tile_k)%(3*CIN/128); int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col; *((int*)&shmem[SHMEM_i][0] + t) = *((int*)&X[GL_idx] + t); SHMEM_i += 64; *((int*)&shmem[SHMEM_i][0] + t) = *((int*)&X[GL_idx+2*X_bit_offset] + t); SHMEM_i += 64; int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k; *((int*)&shmem[SHMEM_i][0] + t) = *((int*)&W[weight_load_idx] + t); SHMEM_i += 64; *((int*)&shmem[SHMEM_i][0] + t) = *((int*)&W[weight_load_idx+64*W_ROW_BIT] + t); __syncthreads(); // if (block_pos == 6 && warpId == 0 && laneId == 0) { // for(int i = 0; i < 256; i++) { // for(int j = 0; j < 4; j++) { // int *tile_ptr = (int*)&shmem[0][0] + i*20 + j; // printf("tile_k: %d, i: %d, j: %d, val: %x\n", tile_k, i, j, *tile_ptr); // } // } // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 4 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][0]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = 128 + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][0]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND); } } __syncthreads(); } // if (block_pos == 0){ // && warpId == 4 && laneId == 0) { // for(int t = 0; t<c[0][0].num_elements; t++) { // printf("c[0][0].x[%d]: %d\n", t, c[0][0].x[t]); // } // } // This pointer is used to access the C and D matrix tiles this warp computes. int *shmem_warp_tile_ptr = (int*)&shmem[0][0] + (warpId / 2) * 128 * 32 + (warpId % 2) * 64; // Will be used only when writing back D. May be moved outside the for loop. TODO. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = shmem_warp_tile_ptr + i*128*8 + j*8; wmma::store_matrix_sync(tile_ptr, c[i][j], 128, wmma::mem_row_major); } } __syncthreads(); // if (block_pos == 6 && warpId == 0 && laneId == 0) { // for(int i = 0; i < 128; i++) { // for(int j = 0; j < 64; j++) { // int *tile_ptr = (int*)&shmem[0][0] + i*128 + j; // printf("i: %d, j: %d, val: %d\n", i, j, *tile_ptr); // } // } // } U4 tmp0; U4 tmp1; U4 tmp2; U4 tmp3; U4 val[4]; int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x/32*4*128 + (threadIdx.x%32)*4; #pragma unroll for(int i = 0; i < 4; i++) { tmp0.vec = *((int4*)shmem_warp_stream_ptr); tmp1.vec = *((int4*)shmem_warp_stream_ptr+32*32); tmp2.vec = *((int4*)shmem_warp_stream_ptr+64*32); tmp3.vec = *((int4*)shmem_warp_stream_ptr+96*32); val[i].a[0] = tmp0.a[0] + 2*tmp1.a[0] + 4*tmp2.a[0] + 8*tmp3.a[0]; val[i].a[1] = tmp0.a[1] + 2*tmp1.a[1] + 4*tmp2.a[1] + 8*tmp3.a[1]; val[i].a[2] = tmp0.a[2] + 2*tmp1.a[2] + 4*tmp2.a[2] + 8*tmp3.a[2]; val[i].a[3] = tmp0.a[3] + 2*tmp1.a[3] + 4*tmp2.a[3] + 8*tmp3.a[3]; shmem_warp_stream_ptr += 128; // if (block_pos == 6 && warpId == 0 && laneId == 0) { // printf("tmp0: %d %d %d %d\n", tmp0.a[0], tmp0.a[1], tmp0.a[2], tmp0.a[3]); // printf("tmp1: %d %d %d %d\n", tmp1.a[0], tmp1.a[1], tmp1.a[2], tmp1.a[3]); // printf("tmp2: %d %d %d %d\n", tmp2.a[0], tmp2.a[1], tmp2.a[2], tmp2.a[3]); // printf("tmp3: %d %d %d %d\n", tmp3.a[0], tmp3.a[1], tmp3.a[2], tmp3.a[3]); // printf("val[i]: %d %d %d %d \n", val[i].a[0], val[i].a[1], val[i].a[2], val[i].a[3]); // } } int SHMEM_row = threadIdx.x/32*4; int SHMEM_col = threadIdx.x%32; int Output_row = SHMEM_row/8; int Output_col = SHMEM_row%8; int* dst_gmem_warp_stream_ptr = Output + block_i*Width * COUT + block_j*COUT + block_z + Output_row*Width*COUT + Output_col*COUT + SHMEM_col*4; // if (block_pos == 0) { // printf("block_i: %d, block_j: %d, block_z: %d, threadIdx.x: %d, Output_row: %d, Output_col: %d, idx: %d\n", block_i, block_j, block_z, // threadIdx.x, Output_row, Output_col, // block_i * Width * COUT + block_j*COUT + block_z // + Output_row*Width*COUT + Output_col*COUT // + SHMEM_col*4); // } #pragma unroll for(int i=0; i<4; i++) { *(int4*)dst_gmem_warp_stream_ptr = val[i].vec; SHMEM_row += 1; Output_row = SHMEM_row/8; Output_col = SHMEM_row%8; dst_gmem_warp_stream_ptr = Output + block_i * Width * COUT + block_j*COUT + block_z + Output_row*Width*COUT + Output_col*COUT + SHMEM_col*4; } __syncthreads(); } } void init_matrices(int4 *X, int4 *W, int Height, int Width, int CIN, int COUT, int X_BIT, int W_BIT){ int *X_int = (int*) X; int *W_int = (int*) W; for(int b = 0; b<X_BIT; b++) { for(int i=0; i < Height+2; i++) { for(int j=0; j < Width+2; j++) { for(int k = 0; k < CIN/32; k++) { // X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = 0xFFFFFFFF; // X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = i; // X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = j; X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = rand(); } } } } for(int b=0; b<W_BIT; b++) { for(int i = 0; i < COUT; i++) { for(int j = 0; j < 9*CIN/32; j++) { // W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = 0xFFFFFFFF; W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = rand(); // W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = i; } } } } // int popcnt(int i) { // // Java: use int, and use >>> instead of >> // // C or C++: use int // i = i - ((i >> 1) & 0x55555555); // i = (i & 0x33333333) + ((i >> 2) & 0x33333333); // return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; // } int int_pow(int base, int exp) { int result = 1; while (exp) { if (exp % 2) result *= base; exp /= 2; base *= base; } return result; } void compute_ref(int4 *X, int4 *W, int *ref_C, int Height, int Width, int CIN, int COUT, int X_BIT, int W_BIT) { int *X_int = (int*) X; int *W_int = (int*) W; for (int co=0; co<COUT; co++) { for (int m = 0; m < Height; m++) { for (int n = 0; n < Width; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int i=0; i<3; i++) { for(int j=0; j<3; j++) { for(int k_tile=0; k_tile<CIN/32; k_tile++) { int x_int = X_int[xb*(Height+2)*(Width+2)*CIN/32 + (m+i)*(Width+2)*CIN/32 + (n+j)*CIN/32 + k_tile]; int w_int = W_int[wb*COUT*9*CIN/32 + co*9*CIN/32 + i*3*CIN/32 + j*CIN/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } // if(m==0 && n==1 && co == 0) { // printf("xb: %d, i: %d, j: %d, k_tile: %d, x_int: %x, w_int: %x, tmp: %d, idx: %d\n", xb, i, j, k_tile, x_int, w_int, tmp, xb*Height*Width*CIN/32 + (m+i)*Width*CIN/32 + (n+j)*CIN/32 + k_tile); // } } } } } } ref_C[m*Width*COUT + n*COUT + co]= tmp; } } } } void validate_results(int *C, int* ref_C, int Height, int Width, int COUT) { printf("Checking computed result for correctness: \n"); bool correct = true; double eps = 1.e-6; // machine zero for(int i = 0; i < Height; i++) { for(int j = 0; j < Width; j++) { for(int co=0; co<COUT; co++) { int idx = i*Width*COUT+j*COUT+co; double dst = fabs(C[idx] - ref_C[idx]); double abs = fabs(C[idx]) * fabs(ref_C[idx]); double ref_err = dst / abs; if (ref_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("i: %d, j: %d, co: %d, C: %d, ref_C: %d\n", i, j, co, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } // #define verify_output int main(int argc, char **argv) { printf("Initializing...\n"); int dev = findCudaDevice(argc, (const char **)argv); hipDeviceProp_t deviceProp; checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); int Height = 32; int Width = 32; int X_BIT = 4; int W_BIT = 1; for(int CIN = 128; CIN <= 2048; CIN+=128) { // int CIN = 128; // int COUT = 128; int COUT = CIN; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT)); checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT)); checkCudaErrors(hipMalloc(reinterpret_cast<void **>(&Output), sizeof(int4) * Height * Width * COUT )); #ifdef verify_output printf("Preparing validation data for GPU...\n"); int4 *W_h = NULL; int4 *X_h = NULL; int *Output_h = NULL; X_h = (int4 *)malloc(sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT); W_h = (int4 *)malloc(sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT); Output_h = (int *)malloc(sizeof(int) * (Height+2) * (Width+2) * COUT); init_matrices(X_h, W_h, Height, Width, CIN, COUT, X_BIT, W_BIT); checkCudaErrors(hipMemcpy(X, X_h, sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(W, W_h, sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT, hipMemcpyHostToDevice)); #endif int SHMEM_SZ = 65536; checkCudaErrors(hipFuncSetAttribute( compute_conv_imma, hipFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; int NUM_PROFILES = 1; for(int iter=0; iter<NUM_PROFILES; ++iter){ float bmma_ms = 0.0f; hipEvent_t bmma_start; hipEvent_t bmma_end; hipEventCreate(&bmma_start); hipEventCreate(&bmma_end); hipEventRecord(bmma_start); checkKernelErrors( hipLaunchKernelGGL(( (compute_conv_imma), dim3(deviceProp.multiProcessorCount), dim3(THREADS_PER_BLOCK), SHMEM_SZ, 0, W, X, Output, Height, Width, CIN, COUT))); hipEventRecord(bmma_end); hipEventSynchronize(bmma_end); hipEventElapsedTime(&bmma_ms, bmma_start, bmma_end); hipEventDestroy(bmma_start); hipEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/(double)NUM_PROFILES; printf("H: %d, W: %d, CIN: %d, COUT: %d, W_BIT: %d, X_BIT: %d\n", Height, Width, CIN, COUT, W_BIT, X_BIT); printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n\n", (((double)9 * CIN * Height * Width * COUT * 2)/(bmma_ms_avg/1000.)) / 1e12); } #ifdef verify_output printf("Validating results...\n"); checkCudaErrors(hipMemcpy(Output_h, Output, sizeof(int) * Height * Width * COUT, hipMemcpyDeviceToHost)); int *C_ref = (int *)malloc(sizeof(int) * Height * Width * COUT); /* Copmpute reference matrix on CPU */ compute_ref(X_h, W_h, C_ref, Height, Width, CIN, COUT, X_BIT, W_BIT); /* validation results */ validate_results(Output_h, C_ref, Height, Width, COUT); #endif // free(A_h); // free(B_h); // free(C_h); // checkCudaErrors(hipFree(reinterpret_cast<void *>(A))); // checkCudaErrors(hipFree(reinterpret_cast<void *>(B))); // checkCudaErrors(hipFree(reinterpret_cast<void *>(C))); return EXIT_SUCCESS; }
c9d4e37316e1f5a2655355456809083bb5c80249.cu
/* 1-bit BMMA code. Runs at 500TOPS for matrix size of 4096x4096x8192. Borrows largely from CUDA-SDK. By Boyuan */ #include <assert.h> #include <cuda.h> #include <mma.h> #include <stdio.h> #include <helper_cuda.h> #include <helper_functions.h> #define CHUNK_K 4 #define SKEW 1 #define WARPS_PER_BLOCK 8 #define WARP_SIZE 32 #define THREADS_PER_BLOCK WARP_SIZE * WARPS_PER_BLOCK #define CHUNK_LINE_BYTES CHUNK_K * sizeof(int4) #define WARP_COPY_BYTES WARP_SIZE * sizeof(int4) #define CHUNK_COPY_LINES_PER_WARP WARP_COPY_BYTES / CHUNK_LINE_BYTES #define CHUNK_COPY_LINE_LANES WARP_SIZE / CHUNK_COPY_LINES_PER_WARP #define BLOCK_ROW_WARPS 2 #define BLOCK_COL_WARPS 4 #define WARP_ROW_TILES 8 #define WARP_COL_TILES 4 #define BLOCK_ROW_TILES WARP_ROW_TILES * BLOCK_ROW_WARPS #define BLOCK_COL_TILES WARP_COL_TILES * BLOCK_COL_WARPS #define M 8 #define N 8 #define K 128 #define checkKernelErrors(expr) \ do { \ expr; \ \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ printf("Line %d: '%s' failed: %s\n", __LINE__, #expr, \ cudaGetErrorString(__err)); \ abort(); \ } \ } while (0) using namespace nvcuda; using namespace nvcuda::wmma::experimental; typedef union { int4 vec; int a[4]; } U4; // Assume that Kernel size is 3x3. // Assume CIN is 128. __global__ void compute_conv_imma(const int4 *W, const int4 *X, int *Output, int Height, int Width, int CIN, int COUT) { // GEMM Configuration int X_bit_offset = (Height+2) * (Width+2) * CIN/128; int W_bit_offset = 9*CIN*COUT/128; int X_ROW_BIT = (Width+2)*CIN/128; int W_ROW_BIT = 9*(CIN/128); // if (blockIdx.x == 0 && threadIdx.x == 0) { // // for(int i = 0; i<Height*Width*CIN/32*BIT; i++) { // // printf("X[%d]: %x\n", i, *((int*)X+i)); // // } // for(int i = 0; i<COUT*9*CIN/32; i++) { // printf("W[%d]: %x\n", i, *((int*)W+i)); // } // } extern __shared__ int4 shmem[][CHUNK_K+SKEW]; // TODO: Padding opportunity may exist here. wmma::fragment<wmma::accumulator, 8, 8, 128, int> c[WARP_COL_TILES][WARP_ROW_TILES]; wmma::fragment<wmma::matrix_a, M, N, K, precision::b1, wmma::row_major> a[WARP_COL_TILES]; wmma::fragment<wmma::matrix_b, M, N, K, precision::b1, wmma::col_major> b[WARP_ROW_TILES]; // Warp and lane identification. const unsigned int warpId = threadIdx.x / WARP_SIZE; const unsigned int laneId = threadIdx.x % WARP_SIZE; for (unsigned int block_pos = blockIdx.x;; block_pos += gridDim.x) { const unsigned int block_i = (block_pos/(COUT/128)) / (Width/8) * 4; const unsigned int block_j = (block_pos/(COUT/128)) % (Width/8) * 8; const unsigned int block_z = block_pos % (COUT/128) * 128; if (block_i >= Height) { break; } int image_starting_idx = block_i * (Width+2) * CIN/128 + block_j * CIN/128; for(int i=0; i < WARP_COL_TILES; i++) for(int j=0; j < WARP_ROW_TILES; j++) wmma::fill_fragment(c[i][j], 0); // On the K dimension, there are 9*CIN/128 element to solve. // This for loop computes [0,1,2,...,int(9*CIN/128/CHUNK_K)*CHUNK_K-1]. Next for loop computes [int(9*CIN/128/CHUNK_K)*CHUNK_K, ..., 9*CIN/128-1] // Go through the global K dimension by a fixed step at a time. #pragma unroll for (int tile_k = 0; tile_k+CHUNK_K < 9*CIN/128; tile_k += CHUNK_K) { int SHMEM_i = threadIdx.x/4; int bit_flag = SHMEM_i / 32; int SHMEM_offset = SHMEM_i % 32; int row = SHMEM_offset / 8; int col = SHMEM_offset % 8; int t = threadIdx.x % 4; int sub_row = (tile_k+t)/(3*CIN/128); int sub_col = (tile_k+t)%(3*CIN/128); int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col; // if (block_pos == 0 && tile_k ==0 && SHMEM_i == 1) { // printf("tile_k: %d, block_i: %d, block_j: %d, row: %d, col: %d, sub_row: %d, sub_col: %d, GL_idx: %d\n", tile_k, block_i, block_j, row, col, sub_row, sub_col, GL_idx); // printf("X[17]: %x %x %x %x\n", *((int*)X+ 4*17), *((int*)X+ 4*17+1), *((int*)X+ 4*17+2), *((int*)X+ 4*17+3)); // } shmem[SHMEM_i][t] = X[GL_idx]; SHMEM_i += 64; shmem[SHMEM_i][t] = X[GL_idx+2*X_bit_offset]; SHMEM_i += 64; int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k + t; shmem[SHMEM_i][t] = W[weight_load_idx]; SHMEM_i += 64; shmem[SHMEM_i][t] = W[weight_load_idx+64*W_ROW_BIT]; __syncthreads(); // if (block_pos == 6 && warpId == 0 && laneId == 0) { // for(int i = 0; i < 256; i++) { // for(int j = 0; j < 16; j++) { // int *tile_ptr = (int*)&shmem[0][0] + i*20 + j; // printf("tile_k: %d, i: %d, j: %d, val: %x\n", tile_k, i, j, *tile_ptr); // } // } // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int k_step = 0; k_step < CHUNK_K; k_step++) { #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 4 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][k_step]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); // if (block_pos == 0 && warpId == 0) { // // printf("tile_k: %d, k_step: %d, shmem_idx_a: %d\n", tile_k, k_step, shmem_idx_a); // for(int t = 0; t<a[i].num_elements; t++) { // printf("tile_k: %d, k_step: %d, a[%d].x[%d]: %x\n", tile_k, k_step, i, t, a[i].x[t]); // } // } #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = 128 + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][k_step * (K/128)]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); // wmma::fill_fragment(b[j], 0x5); } // printf("ckpt4\n"); // if (block_pos == 0 && warpId == 0 && laneId == 0 && tile_k == 0) { // for(int t = 0; t<b[j].num_elements; t++) { // printf("b[%d].x[%d]: %x\n", j, t, b[j].x[t]); // } // } wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND); } } } __syncthreads(); // if (block_pos == 0 && warpId == 0) { // for(int t = 0; t<c[0][0].num_elements; t++) { // printf("tile_k: %d, c[0][0].x[%d]: %d\n", tile_k, t, c[0][0].x[t]); // } // } } // int *shmem_warp_tile_ptr = (int*)&shmem[0][0] + // (warpId / 2) * 128 * 32 + // (warpId % 2) * 64; // Will be used only when writing back D. May be moved outside the for loop. TODO. // // Store the D fragments to shared memory. // #pragma unroll // for (int i = 0; i < WARP_COL_TILES; i++) { // #pragma unroll // for (int j = 0; j < WARP_ROW_TILES; j++) { // int *tile_ptr = shmem_warp_tile_ptr + i*128*8 + j*8; // wmma::store_matrix_sync(tile_ptr, c[i][j], 128, wmma::mem_row_major); // } // } // __syncthreads(); // if (block_pos == 0 && warpId == 0 && laneId == 0) { // for(int i = 1; i < 2; i++) { // for(int j = 0; j < 2; j++) { // int *tile_ptr = (int*)&shmem[0][0] + i*128 + j; // printf("i: %d, j: %d, val: %d\n", i, j, *tile_ptr); // } // } // } #pragma unroll for (int tile_k = int(9*CIN/128/CHUNK_K)*CHUNK_K; tile_k < 9*CIN/128; tile_k++) { int SHMEM_i = threadIdx.x/4; int bit_flag = SHMEM_i / 32; int SHMEM_offset = SHMEM_i % 32; int row = SHMEM_offset / 8; int col = SHMEM_offset % 8; int t = threadIdx.x % 4; int sub_row = (tile_k)/(3*CIN/128); int sub_col = (tile_k)%(3*CIN/128); int GL_idx = image_starting_idx + bit_flag*X_bit_offset + row*X_ROW_BIT + col*CIN/128 + sub_row*X_ROW_BIT + sub_col; *((int*)&shmem[SHMEM_i][0] + t) = *((int*)&X[GL_idx] + t); SHMEM_i += 64; *((int*)&shmem[SHMEM_i][0] + t) = *((int*)&X[GL_idx+2*X_bit_offset] + t); SHMEM_i += 64; int weight_load_idx = (block_z + threadIdx.x/4) * W_ROW_BIT + tile_k; *((int*)&shmem[SHMEM_i][0] + t) = *((int*)&W[weight_load_idx] + t); SHMEM_i += 64; *((int*)&shmem[SHMEM_i][0] + t) = *((int*)&W[weight_load_idx+64*W_ROW_BIT] + t); __syncthreads(); // if (block_pos == 6 && warpId == 0 && laneId == 0) { // for(int i = 0; i < 256; i++) { // for(int j = 0; j < 4; j++) { // int *tile_ptr = (int*)&shmem[0][0] + i*20 + j; // printf("tile_k: %d, i: %d, j: %d, val: %x\n", tile_k, i, j, *tile_ptr); // } // } // } // Compute a grid of C matrix tiles in each warp. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { size_t shmem_idx_a = (warpId / 2) * M * 4 + (i * M); const int4 *tile_ptr = &shmem[shmem_idx_a][0]; wmma::load_matrix_sync(a[i], tile_ptr, (CHUNK_K + SKEW)*128); #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { if (i == 0) { // Load the B matrix fragment once, because it is going to be // reused against the other A matrix fragments. size_t shmem_idx_b = 128 + (WARP_ROW_TILES * N) * (warpId % 2) + (j * N); const int4 *tile_ptr = &shmem[shmem_idx_b][0]; wmma::load_matrix_sync(b[j], tile_ptr, (CHUNK_K + SKEW)*128); } // printf("ckpt4\n"); wmma::bmma_sync(c[i][j], a[i], b[j], c[i][j], bmmaBitOpAND); } } __syncthreads(); } // if (block_pos == 0){ // && warpId == 4 && laneId == 0) { // for(int t = 0; t<c[0][0].num_elements; t++) { // printf("c[0][0].x[%d]: %d\n", t, c[0][0].x[t]); // } // } // This pointer is used to access the C and D matrix tiles this warp computes. int *shmem_warp_tile_ptr = (int*)&shmem[0][0] + (warpId / 2) * 128 * 32 + (warpId % 2) * 64; // Will be used only when writing back D. May be moved outside the for loop. TODO. // Store the D fragments to shared memory. #pragma unroll for (int i = 0; i < WARP_COL_TILES; i++) { #pragma unroll for (int j = 0; j < WARP_ROW_TILES; j++) { int *tile_ptr = shmem_warp_tile_ptr + i*128*8 + j*8; wmma::store_matrix_sync(tile_ptr, c[i][j], 128, wmma::mem_row_major); } } __syncthreads(); // if (block_pos == 6 && warpId == 0 && laneId == 0) { // for(int i = 0; i < 128; i++) { // for(int j = 0; j < 64; j++) { // int *tile_ptr = (int*)&shmem[0][0] + i*128 + j; // printf("i: %d, j: %d, val: %d\n", i, j, *tile_ptr); // } // } // } U4 tmp0; U4 tmp1; U4 tmp2; U4 tmp3; U4 val[4]; int *shmem_warp_stream_ptr = (int*)&shmem[0][0]+threadIdx.x/32*4*128 + (threadIdx.x%32)*4; #pragma unroll for(int i = 0; i < 4; i++) { tmp0.vec = *((int4*)shmem_warp_stream_ptr); tmp1.vec = *((int4*)shmem_warp_stream_ptr+32*32); tmp2.vec = *((int4*)shmem_warp_stream_ptr+64*32); tmp3.vec = *((int4*)shmem_warp_stream_ptr+96*32); val[i].a[0] = tmp0.a[0] + 2*tmp1.a[0] + 4*tmp2.a[0] + 8*tmp3.a[0]; val[i].a[1] = tmp0.a[1] + 2*tmp1.a[1] + 4*tmp2.a[1] + 8*tmp3.a[1]; val[i].a[2] = tmp0.a[2] + 2*tmp1.a[2] + 4*tmp2.a[2] + 8*tmp3.a[2]; val[i].a[3] = tmp0.a[3] + 2*tmp1.a[3] + 4*tmp2.a[3] + 8*tmp3.a[3]; shmem_warp_stream_ptr += 128; // if (block_pos == 6 && warpId == 0 && laneId == 0) { // printf("tmp0: %d %d %d %d\n", tmp0.a[0], tmp0.a[1], tmp0.a[2], tmp0.a[3]); // printf("tmp1: %d %d %d %d\n", tmp1.a[0], tmp1.a[1], tmp1.a[2], tmp1.a[3]); // printf("tmp2: %d %d %d %d\n", tmp2.a[0], tmp2.a[1], tmp2.a[2], tmp2.a[3]); // printf("tmp3: %d %d %d %d\n", tmp3.a[0], tmp3.a[1], tmp3.a[2], tmp3.a[3]); // printf("val[i]: %d %d %d %d \n", val[i].a[0], val[i].a[1], val[i].a[2], val[i].a[3]); // } } int SHMEM_row = threadIdx.x/32*4; int SHMEM_col = threadIdx.x%32; int Output_row = SHMEM_row/8; int Output_col = SHMEM_row%8; int* dst_gmem_warp_stream_ptr = Output + block_i*Width * COUT + block_j*COUT + block_z + Output_row*Width*COUT + Output_col*COUT + SHMEM_col*4; // if (block_pos == 0) { // printf("block_i: %d, block_j: %d, block_z: %d, threadIdx.x: %d, Output_row: %d, Output_col: %d, idx: %d\n", block_i, block_j, block_z, // threadIdx.x, Output_row, Output_col, // block_i * Width * COUT + block_j*COUT + block_z // + Output_row*Width*COUT + Output_col*COUT // + SHMEM_col*4); // } #pragma unroll for(int i=0; i<4; i++) { *(int4*)dst_gmem_warp_stream_ptr = val[i].vec; SHMEM_row += 1; Output_row = SHMEM_row/8; Output_col = SHMEM_row%8; dst_gmem_warp_stream_ptr = Output + block_i * Width * COUT + block_j*COUT + block_z + Output_row*Width*COUT + Output_col*COUT + SHMEM_col*4; } __syncthreads(); } } void init_matrices(int4 *X, int4 *W, int Height, int Width, int CIN, int COUT, int X_BIT, int W_BIT){ int *X_int = (int*) X; int *W_int = (int*) W; for(int b = 0; b<X_BIT; b++) { for(int i=0; i < Height+2; i++) { for(int j=0; j < Width+2; j++) { for(int k = 0; k < CIN/32; k++) { // X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = 0xFFFFFFFF; // X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = i; // X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = j; X_int[b*(Height+2)*(Width+2)*CIN/32 + i*(Width+2)*CIN/32 + j*CIN/32 + k] = rand(); } } } } for(int b=0; b<W_BIT; b++) { for(int i = 0; i < COUT; i++) { for(int j = 0; j < 9*CIN/32; j++) { // W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = 0xFFFFFFFF; W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = rand(); // W_int[b*COUT*9*CIN/32+i*9*CIN/32+j] = i; } } } } // int popcnt(int i) { // // Java: use int, and use >>> instead of >> // // C or C++: use int // i = i - ((i >> 1) & 0x55555555); // i = (i & 0x33333333) + ((i >> 2) & 0x33333333); // return (((i + (i >> 4)) & 0x0F0F0F0F) * 0x01010101) >> 24; // } int int_pow(int base, int exp) { int result = 1; while (exp) { if (exp % 2) result *= base; exp /= 2; base *= base; } return result; } void compute_ref(int4 *X, int4 *W, int *ref_C, int Height, int Width, int CIN, int COUT, int X_BIT, int W_BIT) { int *X_int = (int*) X; int *W_int = (int*) W; for (int co=0; co<COUT; co++) { for (int m = 0; m < Height; m++) { for (int n = 0; n < Width; n++) { int tmp = 0; for(int xb=0; xb<X_BIT; xb++) { int X_Multiplier = int_pow(2,xb); for(int wb=0; wb<W_BIT; wb++) { int W_Multiplier = int_pow(2,wb); for(int i=0; i<3; i++) { for(int j=0; j<3; j++) { for(int k_tile=0; k_tile<CIN/32; k_tile++) { int x_int = X_int[xb*(Height+2)*(Width+2)*CIN/32 + (m+i)*(Width+2)*CIN/32 + (n+j)*CIN/32 + k_tile]; int w_int = W_int[wb*COUT*9*CIN/32 + co*9*CIN/32 + i*3*CIN/32 + j*CIN/32 + k_tile]; for(int k=0; k<32; k++) { int mask = 1; int x_val = ((mask << k) & x_int) >> k; int w_val = ((mask << k) & w_int) >> k; tmp += X_Multiplier * W_Multiplier * x_val * w_val; } // if(m==0 && n==1 && co == 0) { // printf("xb: %d, i: %d, j: %d, k_tile: %d, x_int: %x, w_int: %x, tmp: %d, idx: %d\n", xb, i, j, k_tile, x_int, w_int, tmp, xb*Height*Width*CIN/32 + (m+i)*Width*CIN/32 + (n+j)*CIN/32 + k_tile); // } } } } } } ref_C[m*Width*COUT + n*COUT + co]= tmp; } } } } void validate_results(int *C, int* ref_C, int Height, int Width, int COUT) { printf("Checking computed result for correctness: \n"); bool correct = true; double eps = 1.e-6; // machine zero for(int i = 0; i < Height; i++) { for(int j = 0; j < Width; j++) { for(int co=0; co<COUT; co++) { int idx = i*Width*COUT+j*COUT+co; double dst = fabs(C[idx] - ref_C[idx]); double abs = fabs(C[idx]) * fabs(ref_C[idx]); double ref_err = dst / abs; if (ref_err > eps) { // printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n",, eps); printf("i: %d, j: %d, co: %d, C: %d, ref_C: %d\n", i, j, co, C[idx], ref_C[idx]); // printf("non equal\n"); correct = false; } } } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); } // #define verify_output int main(int argc, char **argv) { printf("Initializing...\n"); int dev = findCudaDevice(argc, (const char **)argv); cudaDeviceProp deviceProp; checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); int Height = 32; int Width = 32; int X_BIT = 4; int W_BIT = 1; for(int CIN = 128; CIN <= 2048; CIN+=128) { // int CIN = 128; // int COUT = 128; int COUT = CIN; int4 *X = NULL; int4 *W = NULL; int *Output = NULL; checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&X), sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT)); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&W), sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT)); checkCudaErrors(cudaMalloc(reinterpret_cast<void **>(&Output), sizeof(int4) * Height * Width * COUT )); #ifdef verify_output printf("Preparing validation data for GPU...\n"); int4 *W_h = NULL; int4 *X_h = NULL; int *Output_h = NULL; X_h = (int4 *)malloc(sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT); W_h = (int4 *)malloc(sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT); Output_h = (int *)malloc(sizeof(int) * (Height+2) * (Width+2) * COUT); init_matrices(X_h, W_h, Height, Width, CIN, COUT, X_BIT, W_BIT); checkCudaErrors(cudaMemcpy(X, X_h, sizeof(int4) * (Height+2) * (Width+2) * (CIN/128) * X_BIT, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(W, W_h, sizeof(int4) * 9 * (CIN/128) * COUT * W_BIT, cudaMemcpyHostToDevice)); #endif int SHMEM_SZ = 65536; checkCudaErrors(cudaFuncSetAttribute( compute_conv_imma, cudaFuncAttributeMaxDynamicSharedMemorySize, SHMEM_SZ)); // Run ours NUM_PROFILES times and record time. float bmma_ms_avg = 0.0f; int NUM_PROFILES = 1; for(int iter=0; iter<NUM_PROFILES; ++iter){ float bmma_ms = 0.0f; cudaEvent_t bmma_start; cudaEvent_t bmma_end; cudaEventCreate(&bmma_start); cudaEventCreate(&bmma_end); cudaEventRecord(bmma_start); checkKernelErrors( (compute_conv_imma<<<deviceProp.multiProcessorCount, THREADS_PER_BLOCK, SHMEM_SZ>>>(W, X, Output, Height, Width, CIN, COUT))); cudaEventRecord(bmma_end); cudaEventSynchronize(bmma_end); cudaEventElapsedTime(&bmma_ms, bmma_start, bmma_end); cudaEventDestroy(bmma_start); cudaEventDestroy(bmma_end); bmma_ms_avg += bmma_ms; } bmma_ms_avg = bmma_ms_avg/(double)NUM_PROFILES; printf("H: %d, W: %d, CIN: %d, COUT: %d, W_BIT: %d, X_BIT: %d\n", Height, Width, CIN, COUT, W_BIT, X_BIT); printf("Time: %f ms\n", bmma_ms_avg); printf("TOPS: %.2f\n\n", (((double)9 * CIN * Height * Width * COUT * 2)/(bmma_ms_avg/1000.)) / 1e12); } #ifdef verify_output printf("Validating results...\n"); checkCudaErrors(cudaMemcpy(Output_h, Output, sizeof(int) * Height * Width * COUT, cudaMemcpyDeviceToHost)); int *C_ref = (int *)malloc(sizeof(int) * Height * Width * COUT); /* Copmpute reference matrix on CPU */ compute_ref(X_h, W_h, C_ref, Height, Width, CIN, COUT, X_BIT, W_BIT); /* validation results */ validate_results(Output_h, C_ref, Height, Width, COUT); #endif // free(A_h); // free(B_h); // free(C_h); // checkCudaErrors(cudaFree(reinterpret_cast<void *>(A))); // checkCudaErrors(cudaFree(reinterpret_cast<void *>(B))); // checkCudaErrors(cudaFree(reinterpret_cast<void *>(C))); return EXIT_SUCCESS; }
286887bb4a13ab2a7f31b1d7eeb5e62b2576bb6c.hip
// !!! This is a file automatically generated by hipify!!! #include <call_kernel.h> //xfail:BOOGIE_ERROR //--blockDim=1024 --gridDim=1 //null pointer access // ALTOUGH, IT WORKS #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #define N 2//4//8 __global__ void foo(int *H) { size_t tmp = (size_t)H; //type cast tmp += sizeof(int); int *G = (int *)tmp; G -= 1; //POSSIBLE NULL POINTER ACCESS G[threadIdx.x] = threadIdx.x; __syncthreads(); H[threadIdx.x] = G[threadIdx.x]; } int main() { int *a; int *dev_a; int size = N*sizeof(int); hipMalloc((void**)&dev_a, size); a = (int*)malloc(N*size); for (int i = 0; i < N; i++) a[i] = 1; hipMemcpy(dev_a,a,size, hipMemcpyHostToDevice); printf("a: "); for (int i = 0; i < N; i++) printf("%d ", a[i]); //foo<<<1,N>>>(dev_a); ESBMC_verify_kernel(foo, 1, N, dev_a); hipMemcpy(a,dev_a,size,hipMemcpyDeviceToHost); printf("\nFunction Results:\n "); for (int i = 0; i < N; i++) printf("%d ", a[i]); free(a); hipFree(dev_a); return 0; }
286887bb4a13ab2a7f31b1d7eeb5e62b2576bb6c.cu
#include <call_kernel.h> //xfail:BOOGIE_ERROR //--blockDim=1024 --gridDim=1 //null pointer access // ALTOUGH, IT WORKS #include <stdio.h> #include <cuda.h> #include <cuda_runtime_api.h> #define N 2//4//8 __global__ void foo(int *H) { size_t tmp = (size_t)H; //type cast tmp += sizeof(int); int *G = (int *)tmp; G -= 1; //POSSIBLE NULL POINTER ACCESS G[threadIdx.x] = threadIdx.x; __syncthreads(); H[threadIdx.x] = G[threadIdx.x]; } int main() { int *a; int *dev_a; int size = N*sizeof(int); cudaMalloc((void**)&dev_a, size); a = (int*)malloc(N*size); for (int i = 0; i < N; i++) a[i] = 1; cudaMemcpy(dev_a,a,size, cudaMemcpyHostToDevice); printf("a: "); for (int i = 0; i < N; i++) printf("%d ", a[i]); //foo<<<1,N>>>(dev_a); ESBMC_verify_kernel(foo, 1, N, dev_a); cudaMemcpy(a,dev_a,size,cudaMemcpyDeviceToHost); printf("\nFunction Results:\n "); for (int i = 0; i < N; i++) printf("%d ", a[i]); free(a); cudaFree(dev_a); return 0; }
07de61e4ed1952cf4156e2f721c67af939e7406d.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include <hip/hip_runtime.h> #define N 100 #define DIM 2 #define PamM 2e-11 #define S 0.5 char le_entrada(); char inicializa_parametros(); float *aloca_matriz(int, int); void cal_cond_robin(); char parametro_independentes(); char copia_dados_para_gpu(); void copia_dados_para_cpu(); void clear_mem(); //char calcula_pressao_velocidade(int, int, int, int, int); //char atualiza_mult_lagrange(int tid); static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} //- - - - - - - - - - - - - - GLOBAIS - - - - - - - - - - - - - - // /* - - - - - - - Estruturas - - - - - - - */ typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_Q; typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_L; typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_B; typedef struct{ float *p, *p_old; }ESTRUTURA_PRESSAO; typedef struct{ float *perm, *font, *epsilon; }ESTRUTURA_MAT; /* - - - - - - - Fim das Estruturas - - - - - - - */ /* - - - - - - - Variaveis das Estruturas - - - - - - - */ ESTRUTURA_Q host_q, dev_q; ESTRUTURA_L host_l, dev_l; ESTRUTURA_B host_b, dev_b; ESTRUTURA_PRESSAO host_pressao, dev_pressao; ESTRUTURA_MAT host_mat, dev_mat; /* - - - - - - - Entradas Externas - - - - - - - */ int tam_mat_interna = 3, tam_mat_real = 3 + 2, max_interacoes = 1000, op_contorno = 1; float tam_regiao = 20000.00, erro_max = 1e-5, valor_contor = 2.00; float h = 20000.00 / 3; // ALTURA H = TAM_REGIAO / TAM_MAT_INTERNA //float *mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL; //float *dev_mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL; /* - - - - - - - Fim das Entradas Externas - - - - - - - */ /* - - - - - - - Fim das Variaveis das Estruturas - - - - - - - */ /* - - - - - - - Ponteiros para GPU - - - - - - - */ float *host_aux_1 = NULL, *dev_aux_1 = NULL, *dev_aux_2 = NULL, dev_erro = NULL, *dev_media = NULL; // float *dev_aux_1 = NULL, dev_erro = 0.0, dev_media = 0.0, dev_sum1 = 0.0, dev_sum2 = 0.0; // // float *dev_q.R = NULL, *dev_q.L = NULL, *dev_q.U = NULL, *dev_q.D = NULL; // float *dev_q.R_old = NULL, *dev_q.L_old = NULL, *dev_q.U_old = NULL, *dev_q.D_old = NULL; // // float *dev_l.R = NULL, *dev_l.L = NULL, *dev_l.U = NULL, *dev_l.D = NULL; // float *dev_l.R_old = NULL, *dev_l.L_old = NULL, *dev_l.U_old = NULL, *dev_l.D_old = NULL; // // float *dev_b.R = NULL, *dev_b.L = NULL, *dev_b.U = NULL, *dev_b.D = NULL; // float *dev_b.R_old = NULL, *dev_b.L_old = NULL, *dev_b.U_old = NULL, *dev_b.D_old = NULL; // // float *dev_pressao.p = NULL, *dev_pressao.p_old = NULL; // //- - - - - - - - - - - - - - FIM - GLOBAIS - - - - - - - - - - - - - - // __device__ char atualiza_mult_lagrange( int tid, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b ){ int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0; int index_mem_left = 0, index_mem_right = 0; int offset = (blockDim.x * gridDim.x); // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado index_mem_central = tid; index_mem_uper = index_mem_central - offset; // (offset -1) = comprimento do kernel index_mem_down = index_mem_central + offset; index_mem_left = index_mem_central - 1; index_mem_right = index_mem_central + 1; dev_l.U[index_mem_central] = dev_b.U[index_mem_central] * (dev_q.U[index_mem_central] + dev_q.D_old[index_mem_uper]) + dev_l.D_old[index_mem_uper]; dev_l.D[index_mem_central] = dev_b.D[index_mem_central] * (dev_q.D[index_mem_central] + dev_q.U_old[index_mem_down]) + dev_l.U_old[index_mem_down]; dev_l.R[index_mem_central] = dev_b.R[index_mem_central] * (dev_q.R[index_mem_central] + dev_q.L_old[index_mem_right]) + dev_l.L_old[index_mem_right]; dev_l.L[index_mem_central] = dev_b.L[index_mem_central] * (dev_q.L[index_mem_central] + dev_q.R_old[index_mem_left]) + dev_l.R_old[index_mem_left]; return 0; } __device__ char calcula_pressao_velocidade( int tid, int uper, int right, int down, int left, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat ){ float auxU = 0.0, auxD = 0.0, auxR = 0.0, auxL = 0.0, DU = 0.0, DD = 0.0, DR = 0.0, DL = 0.0; int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0; int index_mem_left = 0, index_mem_right = 0; int offset = (blockDim.x * gridDim.x); // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado index_mem_central = tid; index_mem_uper = index_mem_central - offset; index_mem_down = index_mem_central + offset; index_mem_left = index_mem_central - 1; index_mem_right = index_mem_central + 1; if(uper == 1){ auxU = dev_mat.epsilon[index_mem_central] / (1 + dev_b.U[index_mem_central] * dev_mat.epsilon[index_mem_central]); DU = auxU * (dev_b.U[index_mem_central] * dev_q.D_old[index_mem_uper] + dev_l.D_old[index_mem_uper]); } if(right == 1){ auxR = dev_mat.epsilon[index_mem_central] / (1 + dev_b.R[index_mem_central] * dev_mat.epsilon[index_mem_central]); DR = auxR * (dev_b.R[index_mem_central] * dev_q.L_old[index_mem_right] + dev_l.L_old[index_mem_right]); } if(down == 1){ auxD = dev_mat.epsilon[index_mem_central] / (1 + dev_b.D[index_mem_central] * dev_mat.epsilon[index_mem_central]); DD = auxD * (dev_b.D[index_mem_central] * dev_q.U_old[index_mem_down] + dev_l.U_old[index_mem_down]); } if(left == 1){ auxL = dev_mat.epsilon[index_mem_central] / (1 + dev_b.L[index_mem_central] * dev_mat.epsilon[index_mem_central]); DL = auxL * (dev_b.L[index_mem_central] * dev_q.R_old[index_mem_left] + dev_l.R_old[index_mem_left]); } dev_pressao.p[index_mem_central] = (dev_mat.font[index_mem_central] + DU + DR + DD + DL) / (auxU + auxR + auxD + auxL); dev_q.L[index_mem_central] = auxL * dev_pressao.p[index_mem_central] - DL; dev_q.R[index_mem_central] = auxR * dev_pressao.p[index_mem_central] - DR; dev_q.U[index_mem_central] = auxU * dev_pressao.p[index_mem_central] - DU; dev_q.D[index_mem_central] = auxD * dev_pressao.p[index_mem_central] - DD; return 0; } __global__ void reduce1(float *g_idata, float *g_odata, int n) { __shared__ float sdata[512]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reducao(float *in){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int i = (dimensao_x * dimensao_y )/ 2; //efetuando a reduo while(i != 0){ if(tid < i) in[tid] += in[tid + i]; if(i % 2 == 1){ if(i>1) in[0] += in[i-1]; } __syncthreads(); i /= 2; } } __global__ void reducao2(float *in_1, float *in_2){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int i = (dimensao_x * dimensao_y )/ 2; while(i != 0){ if(tid < i) in_1[tid] += in_1[tid + i]; in_2[tid] += in_2[tid + i]; if(i % 2 == 1){ if(i>1) in_1[0] += in_1[i-1]; in_2[0] += in_2[i-1]; } __syncthreads(); i /= 2; } } __global__ void escoamento_monofasico( ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat ){ /*int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; a[offset] = offset;*/ /*vificar as condies de contorno*/ int flag_thread_centrais = 1; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; /*int offset = (blockDim.x * gridDim.x) + 1; // deslocamento para o tamanho da regio (tam_regiao = n + 2) */ int tid = x + y * blockDim.x * gridDim.x; //verificar esse deslocamento para n causar problema (somente na hora de armazenar utilizar o deslocamento) //int tid = (x + y * blockDim.x * gridDim.x) + offset; // tid fornece o indice do vetor int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int eq_tid_cant_sup_esq = dimensao_x + 1; int eq_tid_cant_sup_dir = dimensao_x + (dimensao_x - 2); // posio extremo sup direito int eq_tid_cant_inf_dir = (dimensao_x * dimensao_y) - (dimensao_x + 2); // posio extremo inf direito int eq_tid_cant_inf_esq = ((dimensao_x) * (dimensao_y - 2)) + 1; // posio extremo inf esquerdo // int offset = (blockDim.x * gridDim.x) + 1 + 2; // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado int index_mem_central = tid; if(tid == eq_tid_cant_sup_esq){//canto superior esquerdo /*VERIFICAR AS CONDIES DE CONTORNO*/ /* * calcula_pressao_velocidade(); * * Param: ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat * */ calcula_pressao_velocidade( tid, 0, 1, 1, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); /* * * atualiza_mult_lagrange(); * * param: int tid, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b * */ atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_sup_dir){//canto superior direito /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 0, 0, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_inf_esq){//canto inferior esquerdo /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 0, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_inf_dir){//canto inferior direito /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 0, 0, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_esq) && (tid < eq_tid_cant_sup_dir)){//fronteira superior /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 0, 1, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_dir) && (tid % dimensao_x == dimensao_x - 2)){ //fronteira direita /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 0, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_inf_esq) && (tid < eq_tid_cant_inf_dir)){ //fronteira inferior /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 0, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_esq) && (tid < eq_tid_cant_inf_dir) && (tid < eq_tid_cant_inf_esq) && (tid % dimensao_x == 1)){//fronteira esquerda /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 1, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(flag_thread_centrais && (tid % dimensao_x >= 2) && (tid % dimensao_x <= (dimensao_x - 3)) && (tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_esq) ){ /*VERIFICAR AS CONDIES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); } //dev_media[tid] = dev_pressao.p[tid]; /* dev_media[0] = reducao(dev_media, 100); dev_media[0] = dev_media[0] / (dimensao_x * dimensao_y); dev_pressao.p[index_mem_central] -= dev_media[0]; dev_l.D[index_mem_central] -= dev_media[0]; dev_l.U[index_mem_central] -= dev_media[0]; dev_l.L[index_mem_central] -= dev_media[0]; dev_l.R[index_mem_central] -= dev_media[0];*/ //avaliando criterio de convergencia /*dev_aux_1[index_mem_central] = dev_pressao.p[index_mem_central] - dev_pressao.p_old[index_mem_central]; __syncthreads(); dev_aux_1[index_mem_central] = dev_aux_1[index_mem_central] * dev_aux_1[index_mem_central]; __syncthreads();*/ //reduo da primeira soma sum1 /*dev_sum1 = reducao(dev_aux_1, 100);*/ //reduo da segunda soma sum2 /*dev_aux_1[index_mem_central] = dev_pressao.p[index_mem_central] * dev_pressao.p[index_mem_central]; __syncthreads(); dev_sum2 = reducao(dev_aux_1, 100); dev_erro = sqrt(dev_sum1 / dev_sum2);*/ //DUVIDA PARA COMO O SINAL DO ERRO /*if (dev_erro > erro_max){ return; } dev_pressao.p_old[index_mem_central] = dev_pressao.p[index_mem_central]; dev_q.U_old[index_mem_central] = dev_q.U[index_mem_central]; dev_q.R_old[index_mem_central] = dev_q.R[index_mem_central]; dev_q.L_old[index_mem_central] = dev_q.L[index_mem_central]; dev_q.D_old[index_mem_central] = dev_q.D[index_mem_central]; dev_l.D_old[index_mem_central] = dev_l.D[index_mem_central]; dev_l.U_old[index_mem_central] = dev_l.U[index_mem_central]; dev_l.L_old[index_mem_central] = dev_l.L[index_mem_central]; dev_l.R_old[index_mem_central] = dev_l.R[index_mem_central]; i++; }*/ } __global__ void preapara_criterio_convergencia(float *dev_aux_1, float *dev_aux_2, float *dev_media, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_PRESSAO dev_pressao, const float media){ // sum1 = 0.; // sum2 = 0.; // for (k=1; k<=n; k++) // for (j=1; j<=n; j++) // { // aux = p[j][k] - p_old[j][k]; // sum1 += aux*aux; // sum2 += p[j][k]*p[j][k]; // } // erro = sqrt(sum1/sum2); //float dev_sum1 = 0.0, dev_sum2 = 0.0; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; // dimensao_x -= 2; // dimensao_y -= 2; //float media = (dev_media[0] + dev_media[1]) / ((dimensao_x-2) * (dimensao_y-2)); int canto_sup_dir = dimensao_x - 1; int canto_inf_dir = (dimensao_x * dimensao_y ) - 1; int canto_inf_esq = (dimensao_x * dimensao_y ) - (dimensao_x - 1); dev_pressao.p[tid] -= media; dev_l.D[tid] -= media; dev_l.U[tid] -= media; dev_l.L[tid] -= media; dev_l.R[tid] -= media; //despresando valores do contorno if((tid >= 0) && (tid <= canto_sup_dir)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid > canto_sup_dir) && (tid <= canto_inf_esq) && (tid % dimensao_x == 0)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid > canto_sup_dir) && (tid < canto_inf_esq) && (tid % dimensao_x == (dimensao_x -1))){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid >= canto_inf_esq) && (tid <= canto_inf_dir)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } dev_aux_1[tid] = dev_pressao.p[tid] - dev_pressao.p_old[tid]; dev_aux_1[tid] = dev_aux_1[tid] * dev_aux_1[tid]; dev_aux_2[tid] = dev_pressao.p[tid] * dev_pressao.p[tid]; /*Media zero nas pressoes e multiplicadores de lagrange*/ // dev_pressao.p_old[tid] = dev_pressao.p[tid]; // dev_q.U_old[tid] = dev_q.U[tid]; // dev_q.R_old[tid] = dev_q.R[tid]; // dev_q.L_old[tid] = dev_q.L[tid]; // dev_q.D_old[tid] = dev_q.D[tid]; // // dev_l.D_old[tid] = dev_l.D[tid]; // dev_l.U_old[tid] = dev_l.U[tid]; // dev_l.L_old[tid] = dev_l.L[tid]; // dev_l.R_old[tid] = dev_l.R[tid]; } __global__ void teste( ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l ){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; dev_pressao.p_old[tid] = dev_pressao.p[tid]; dev_q.U_old[tid] = dev_q.U[tid]; dev_q.R_old[tid] = dev_q.R[tid]; dev_q.L_old[tid] = dev_q.L[tid]; dev_q.D_old[tid] = dev_q.D[tid]; dev_l.D_old[tid] = dev_l.D[tid]; dev_l.U_old[tid] = dev_l.U[tid]; dev_l.L_old[tid] = dev_l.L[tid]; dev_l.R_old[tid] = dev_l.R[tid]; } int main(void){ le_entrada(); inicializa_parametros(); cal_cond_robin(); parametro_independentes(); copia_dados_para_gpu(); // dim3 block(comprimento/16 , altura/16); // dim3 thread(16, 16); dim3 block(2, 2); dim3 thread(5, 5); /* * escoamento_monofasico(); * * Param: ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat, float *dev_aux_1, const float erro_max * */ int i = 0, j = 0; while (i < 10){ hipLaunchKernelGGL(( escoamento_monofasico), dim3(block), dim3(thread), 0, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat ); hipDeviceSynchronize(); HANDLE_ERROR( hipMemcpy( host_aux_1, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); int k; int temp = tam_mat_real * tam_mat_real; float soma = 0.0, M = 0.0; for(k = 0; k < temp; k++){ soma = soma + host_aux_1[k]; } M = soma / (tam_mat_interna * tam_mat_interna); printf("\nvalor da media em serial = %f \t i = %d\n\n", M , i); int num_blocks, qtElementos; qtElementos = tam_mat_real*tam_mat_real; num_blocks = ((qtElementos) > 512) ? (qtElementos/512) + 1 : 1; hipLaunchKernelGGL(( reduce1), dim3(num_blocks), dim3(512), 0, 0, dev_pressao.p, dev_media, qtElementos ); HANDLE_ERROR( hipMemcpy( host_aux_1, dev_media, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); printf("\n\n\t\t\t\tdev media:\n"); for(k = 0; k < tam_mat_real; k ++){ for(j = 0; j < tam_mat_real; j++) printf("%16.6E ", host_aux_1[k*tam_mat_real + j]); printf("\n"); } soma = 0.0; for(k = 0; k < num_blocks; k++) soma = soma + host_aux_1[k]; M = soma / (tam_mat_interna * tam_mat_interna); printf("valor da media em cuda = %f \t i = %d", M , i); HANDLE_ERROR( hipMemcpy( host_aux_1, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); printf("\n\n\t\t\t\tpressao:\n"); for(k = 0; k < tam_mat_real; k ++){ for(j = 0; j < tam_mat_real; j++) printf("%16.6E ", host_aux_1[k*tam_mat_real + j]); printf("\n"); } hipLaunchKernelGGL(( preapara_criterio_convergencia), dim3(block), dim3(thread), 0, 0, dev_aux_1, dev_aux_2, dev_media, dev_q, dev_l, dev_pressao, M); hipLaunchKernelGGL(( teste), dim3(block), dim3(thread), 0, 0, dev_pressao, dev_q, dev_l ); i++; } copia_dados_para_cpu(); /* printf("\ntam_mat_interna = %d\n", tam_mat_interna); printf("tam_mat_real = %d\n", tam_mat_real); printf("max_interacoes = %d\n", max_interacoes); printf("op_contorno = %d\n", op_contorno); printf("tam_regiao = %f\n", tam_regiao); printf("erro_max = %f\n", erro_max); printf("valor_contor = %f\n", valor_contor); printf("\n\n\t\t\tmat_font:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_mat.font[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tmat_perm:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_mat.perm[i*tam_mat_real + j]); //printf(" %6.4E ", host_mat.perm[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tmat_epsilon:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_mat.epsilon[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\tbeta U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_b.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_b.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_b.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_b.D[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tq_U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_q.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_q.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_q.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_q.D[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_l.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_l.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_l.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_l.D[i*tam_mat_real + j]); printf("\n"); } printf("\npressao:\n"); printf("\n\n\t\t\t\tpressao:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%16.6E ", host_pressao.p[i*tam_mat_real + j]); printf("\n"); }*/ // printf("\npressao old:\n"); // printf("\n\n\t\t\t\tpressao old:\n"); // for(i = 0; i < tam_mat_real; i ++){ // for(j = 0; j < tam_mat_real; j++) // printf("%16.6E ", host_pressao.p_old[i*tam_mat_real + j]); // printf("\n"); // } /*printf("\n\n\t\t\t\tb_U:\t\t\t\t\t\t\t\t\tb_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_R:\t\t\t\t\t\t\t\t\tb_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\npressao:\n"); printf("\n\n\t\t\t\tpressao:\t\t\t\t\t\t\t\t\tpressao_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tl_U:\t\t\t\t\t\t\t\t\tl_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_R:\t\t\t\t\t\t\t\t\tl_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_D:\t\t\t\t\t\t\t\t\tl_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_L:\t\t\t\t\t\t\t\t\tl_L_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.L[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.L_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tq_U:\t\t\t\t\t\t\t\t\tq_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_R:\t\t\t\t\t\t\t\t\tq_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_D:\t\t\t\t\t\t\t\t\tq_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_L:\t\t\t\t\t\t\t\t\tq_L_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.L[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.L_old[i*tam_mat_real + j]); printf("\n"); }*/ clear_mem(); // // system("pause"); return 0; } char le_entrada(){ printf("\n\n\t\t - - CARREGANDO ENTRADA - - \n\n"); FILE *arq = NULL; //arq = fopen("../dir_entrada/parametro_entrada.txt", "r"); arq = fopen("parametro_entrada.txt", "r"); if(arq == NULL){ printf("Erro ao abrir aquivo: 'parametro_entrada.txt'\n\t\tCertifique-se que o arquivo exite.\n"); exit(1); } else{ printf("\t\t - - LENDO ARQUIVO DE ENTRADA - -\n"); /*char c[2], dados[255], buffer[255];*/ char buffer[255]; int cont = 1; while(cont < 9){ fscanf(arq, "%s", buffer); //puts(buffer); int i = 0, j = 0; switch(strlen(buffer)){ case 8: //erro_maximo fscanf(arq, "%f", &erro_max); break; case 10: //tam_regiao fscanf(arq, "%f", &tam_regiao); break; case 11: //opcao_contorno fscanf(arq, "%d", &op_contorno); break; case 12: //valor_contor fscanf(arq, "%f", &valor_contor); break; case 14: //max_interacoes fscanf(arq, "%d", &max_interacoes); break; case 15: //tam_mat_interna fscanf(arq, "%d", &tam_mat_interna); break; case 16: //matriz_de_fontes //uso (tam_mat_interna + 2) - pois ainda no inicializei 'tam_mat_real' host_mat.font = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) fscanf(arq, "%f", &host_mat.font[i*(tam_mat_interna+2) + j]); break; case 18: //matriz_permeabilidade host_mat.perm = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); host_mat.epsilon = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) fscanf(arq, "%f", &host_mat.perm[i*(tam_mat_interna+2) + j]); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) host_mat.perm[i*(tam_mat_interna+2) + j] = PamM*exp(S * host_mat.perm[i*(tam_mat_interna+2) + j]); break; default: printf("\n\n\t\tHouve algum erro no aquivo de entrada!\n\n"); return 0; } //int tam = strlen(buffer); cont++; } printf("\t\t - - ARQUIVO DE ENTRADA CARREGADO - -\n"); } printf("\n\n\t\t - - ENTRADA CARREGA - - \n\n"); return 1; } float *aloca_matriz(int L, int C){ float *aux = NULL; aux = (float *) calloc(L * C, sizeof(float)); if(aux == NULL){ printf("\n\n\t\tErro ao alocar memoria\n\n"); exit(1); }else{ return (aux); } return NULL; } /* * *VERIFICAR RETORNO * */ void cal_cond_robin(){ float keff = 0.0, numerador = 0.0, denominador = 0.0; float C = 1.0; // Cte adimensional que se ajusta experimentalmente C = 1.0 //Canto superior esquerdo numerador = ( 2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[tam_mat_real + 2] ); denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[tam_mat_real + 2] ); keff = numerador / denominador; host_b.R[tam_mat_real + 1] = C*h/keff; numerador = (2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[(2*tam_mat_real) + 1]); denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[(2*tam_mat_real) + 1]); keff = numerador / denominador; host_b.D[tam_mat_real + 1] = C*h/keff; //Canto superior direito numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[tam_mat_real + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.D[tam_mat_real + tam_mat_interna] = C*h/keff; //Canto infeior esquerdo numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] ); keff = numerador / denominador; host_b.U[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] ); keff = numerador / denominador; host_b.R[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff; //Canto infeior direito numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] ); keff = numerador / denominador; host_b.U[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff; //Calculo das fronteiras e regio interna para betas int i = 0; for(i = 2; i < tam_mat_interna; i ++){ //Calcula fronteira superior numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i-1)] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i-1)] ); keff = numerador / denominador; host_b.L[tam_mat_real + i] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i+1)] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i+1)] ); keff = numerador / denominador; host_b.R[tam_mat_real + i] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[(2 * tam_mat_real) + i] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[(2 * tam_mat_real) + i] ); keff = numerador / denominador; host_b.D[tam_mat_real + i] = C*h/keff; //Calcula fronteira esquerda numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i - 1) * tam_mat_real) + 1] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i - 1) * tam_mat_real) + 1] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[(i * tam_mat_real) + 2] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[(i * tam_mat_real) + 2] ); keff = numerador / denominador; host_b.R[(i * tam_mat_real) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i + 1) * tam_mat_real) + 1] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i + 1) * tam_mat_real) + 1] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + 1] = C*h/keff; //Calcula fronteira inferior numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] ); keff = numerador / denominador; host_b.L[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] ); keff = numerador / denominador; host_b.U[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] ); keff = numerador / denominador; host_b.R[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; //Calcula fronteira direita numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; //Calcula dados internos int j = 0; for(j = 2; j < tam_mat_interna; j ++){ numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j - 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j - 1)] ); keff = numerador / denominador; host_b.L[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j + 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j + 1)] ); keff = numerador / denominador; host_b.R[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i - 1) * tam_mat_real) + j] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i - 1) * tam_mat_real) + j] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i + 1) * tam_mat_real) + j] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i + 1) * tam_mat_real) + j] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + j] = C*h/keff; } } } /* * *VERIFICAR RETORNO * */ char parametro_independentes(){ int i = 0, j = 0; float constante = 2/h; for(i = 0; i < tam_mat_real; i ++) for(j = 0; j < tam_mat_real; j++){ host_mat.epsilon[i*tam_mat_real + j] = constante * host_mat.perm[i*tam_mat_real + j]; host_mat.font[i*tam_mat_real + j] *= h; } return 0; } char copia_dados_para_gpu(){ HANDLE_ERROR( hipMemcpy( dev_q.R, host_q.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.L, host_q.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.U, host_q.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.D, host_q.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.R_old, host_q.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.L_old, host_q.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.U_old, host_q.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_q.D_old, host_q.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.R, host_l.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.L, host_l.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.U, host_l.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.D, host_l.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.R_old, host_l.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.L_old, host_l.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.U_old, host_l.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_l.D_old, host_l.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.R, host_b.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.L, host_b.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.U, host_b.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.D, host_b.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.R_old, host_b.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.L_old, host_b.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.U_old, host_b.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_b.D_old, host_b.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_pressao.p, host_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_pressao.p_old, host_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_mat.perm, host_mat.perm, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_mat.epsilon, host_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); HANDLE_ERROR( hipMemcpy( dev_mat.font, host_mat.font, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyHostToDevice ) ); return 0; } void copia_dados_para_cpu(){ HANDLE_ERROR( hipMemcpy( host_q.R, dev_q.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.L, dev_q.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.U, dev_q.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.D, dev_q.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.R_old, dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.L_old, dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.U_old, dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_q.D_old, dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.R, dev_l.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.L, dev_l.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.U, dev_l.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.D, dev_l.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.R_old, dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.L_old, dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.U_old, dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_l.D_old, dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.R, dev_b.R, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.L, dev_b.L, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.U, dev_b.U, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.D, dev_b.D, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.R_old, dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.L_old, dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.U_old, dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_b.D_old, dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_pressao.p, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_pressao.p_old, dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_mat.font, dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_mat.perm, dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); HANDLE_ERROR( hipMemcpy( host_mat.epsilon, dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float), hipMemcpyDeviceToHost ) ); } char inicializa_parametros(){ printf("\n\n\t\t- - INICIALIZANDO PARAMETROS - - \n\n\n"); /* * * * CONTRUIR FUNCAO PARA VERIFICAR ERRO DE ALOCAO * VERIFICAR RETORNO */ tam_mat_real = tam_mat_interna + 2; h = tam_regiao / tam_mat_interna; HANDLE_ERROR( hipMalloc( (void**)&dev_q, sizeof(ESTRUTURA_Q) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_l, sizeof(ESTRUTURA_L) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_b, sizeof(ESTRUTURA_B) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_pressao, sizeof(ESTRUTURA_PRESSAO) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_mat, sizeof(ESTRUTURA_MAT) ) ); host_q.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.R != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.L != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.U != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.D != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.R_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.L_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.U_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.D_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.R != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.L != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.U != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.D != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.R_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.L_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.U_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.D_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.R != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.L != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.U != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.D != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.R_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.L_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.U_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.D_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_pressao.p = aloca_matriz(tam_mat_real, tam_mat_real); if(host_pressao.p != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_pressao.p_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_pressao.p_old != NULL) HANDLE_ERROR( hipMalloc( (void**)&dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_aux_1, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_aux_2, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMemset( dev_aux_1, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMemset( dev_aux_2, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&erro_max, sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_erro, sizeof(float) ) ); HANDLE_ERROR( hipMalloc( (void**)&dev_media, tam_mat_real * tam_mat_real * sizeof(float)) ); host_aux_1 = aloca_matriz(tam_mat_real, tam_mat_real); int i = 0; switch(op_contorno){ case 1: //Inicializa contorno superior for(i = 0; i < tam_mat_real; i++){ host_q.D[i] = valor_contor; host_q.D_old[i] = valor_contor; } break; case 2://Inicializa contorno esquerdo for(i = 0; i < tam_mat_real; i++){ host_q.R[i*tam_mat_real] = valor_contor; host_q.R_old[i*tam_mat_real] = valor_contor; } break; case 3://Inicializa contorno direito for(i = 0; i < tam_mat_real; i++){ host_q.L[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor; host_q.L_old[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor; } break; case 4://Inicializa contorno inferior for(i = 0; i < tam_mat_real; i++){ host_q.L[(tam_mat_real-1)*tam_mat_real + i] = valor_contor; host_q.L_old[(tam_mat_real-1)*tam_mat_real + i] = valor_contor; } break; default: printf("\n\n\t\t - - OCORREU ALGUM ERRO NA OPCAO DE CONTORNO - - \n\n"); break; } printf("\n\n\t\t- - FIM DA INICIALIZACAO PARAMETROS - - \n\n\n"); return 1; } void clear_mem(){ HANDLE_ERROR( hipFree (dev_q.U)); HANDLE_ERROR( hipFree (dev_q.R)); HANDLE_ERROR( hipFree (dev_q.D)); HANDLE_ERROR( hipFree (dev_q.L)); free(host_q.U); free(host_q.R); free(host_q.D); free(host_q.L); HANDLE_ERROR( hipFree (dev_l.U)); HANDLE_ERROR( hipFree (dev_l.R)); HANDLE_ERROR( hipFree (dev_l.D)); HANDLE_ERROR( hipFree (dev_l.L)); free(host_l.U); free(host_l.R); free(host_l.D); free(host_l.L); HANDLE_ERROR( hipFree (dev_b.U)); HANDLE_ERROR( hipFree (dev_b.R)); HANDLE_ERROR( hipFree (dev_b.D)); HANDLE_ERROR( hipFree (dev_b.L)); free(host_b.U); free(host_b.R); free(host_b.D); free(host_b.L); HANDLE_ERROR( hipFree (dev_pressao.p)); HANDLE_ERROR( hipFree (dev_pressao.p_old)); free(host_pressao.p); free(host_pressao.p_old); HANDLE_ERROR( hipFree (dev_mat.perm)); HANDLE_ERROR( hipFree (dev_mat.font)); HANDLE_ERROR( hipFree (dev_mat.epsilon)); free(host_mat.perm); free(host_mat.font); free(host_mat.epsilon); }
07de61e4ed1952cf4156e2f721c67af939e7406d.cu
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include <cuda.h> #include <curand_kernel.h> #include <cuda_runtime.h> #define N 100 #define DIM 2 #define PamM 2e-11 #define S 0.5 char le_entrada(); char inicializa_parametros(); float *aloca_matriz(int, int); void cal_cond_robin(); char parametro_independentes(); char copia_dados_para_gpu(); void copia_dados_para_cpu(); void clear_mem(); //char calcula_pressao_velocidade(int, int, int, int, int); //char atualiza_mult_lagrange(int tid); static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) #define HANDLE_NULL( a ) {if (a == NULL) { \ printf( "Host memory failed in %s at line %d\n", \ __FILE__, __LINE__ ); \ exit( EXIT_FAILURE );}} //- - - - - - - - - - - - - - GLOBAIS - - - - - - - - - - - - - - // /* - - - - - - - Estruturas - - - - - - - */ typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_Q; typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_L; typedef struct{ float *R, *L, *U, *D; float *R_old, *L_old, *U_old, *D_old; }ESTRUTURA_B; typedef struct{ float *p, *p_old; }ESTRUTURA_PRESSAO; typedef struct{ float *perm, *font, *epsilon; }ESTRUTURA_MAT; /* - - - - - - - Fim das Estruturas - - - - - - - */ /* - - - - - - - Variaveis das Estruturas - - - - - - - */ ESTRUTURA_Q host_q, dev_q; ESTRUTURA_L host_l, dev_l; ESTRUTURA_B host_b, dev_b; ESTRUTURA_PRESSAO host_pressao, dev_pressao; ESTRUTURA_MAT host_mat, dev_mat; /* - - - - - - - Entradas Externas - - - - - - - */ int tam_mat_interna = 3, tam_mat_real = 3 + 2, max_interacoes = 1000, op_contorno = 1; float tam_regiao = 20000.00, erro_max = 1e-5, valor_contor = 2.00; float h = 20000.00 / 3; // ALTURA H = TAM_REGIAO / TAM_MAT_INTERNA //float *mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL; //float *dev_mat_perm = NULL, *mat_font = NULL, *mat_epsilon = NULL; /* - - - - - - - Fim das Entradas Externas - - - - - - - */ /* - - - - - - - Fim das Variaveis das Estruturas - - - - - - - */ /* - - - - - - - Ponteiros para GPU - - - - - - - */ float *host_aux_1 = NULL, *dev_aux_1 = NULL, *dev_aux_2 = NULL, dev_erro = NULL, *dev_media = NULL; // float *dev_aux_1 = NULL, dev_erro = 0.0, dev_media = 0.0, dev_sum1 = 0.0, dev_sum2 = 0.0; // // float *dev_q.R = NULL, *dev_q.L = NULL, *dev_q.U = NULL, *dev_q.D = NULL; // float *dev_q.R_old = NULL, *dev_q.L_old = NULL, *dev_q.U_old = NULL, *dev_q.D_old = NULL; // // float *dev_l.R = NULL, *dev_l.L = NULL, *dev_l.U = NULL, *dev_l.D = NULL; // float *dev_l.R_old = NULL, *dev_l.L_old = NULL, *dev_l.U_old = NULL, *dev_l.D_old = NULL; // // float *dev_b.R = NULL, *dev_b.L = NULL, *dev_b.U = NULL, *dev_b.D = NULL; // float *dev_b.R_old = NULL, *dev_b.L_old = NULL, *dev_b.U_old = NULL, *dev_b.D_old = NULL; // // float *dev_pressao.p = NULL, *dev_pressao.p_old = NULL; // //- - - - - - - - - - - - - - FIM - GLOBAIS - - - - - - - - - - - - - - // __device__ char atualiza_mult_lagrange( int tid, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b ){ int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0; int index_mem_left = 0, index_mem_right = 0; int offset = (blockDim.x * gridDim.x); // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado index_mem_central = tid; index_mem_uper = index_mem_central - offset; // (offset -1) = comprimento do kernel index_mem_down = index_mem_central + offset; index_mem_left = index_mem_central - 1; index_mem_right = index_mem_central + 1; dev_l.U[index_mem_central] = dev_b.U[index_mem_central] * (dev_q.U[index_mem_central] + dev_q.D_old[index_mem_uper]) + dev_l.D_old[index_mem_uper]; dev_l.D[index_mem_central] = dev_b.D[index_mem_central] * (dev_q.D[index_mem_central] + dev_q.U_old[index_mem_down]) + dev_l.U_old[index_mem_down]; dev_l.R[index_mem_central] = dev_b.R[index_mem_central] * (dev_q.R[index_mem_central] + dev_q.L_old[index_mem_right]) + dev_l.L_old[index_mem_right]; dev_l.L[index_mem_central] = dev_b.L[index_mem_central] * (dev_q.L[index_mem_central] + dev_q.R_old[index_mem_left]) + dev_l.R_old[index_mem_left]; return 0; } __device__ char calcula_pressao_velocidade( int tid, int uper, int right, int down, int left, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat ){ float auxU = 0.0, auxD = 0.0, auxR = 0.0, auxL = 0.0, DU = 0.0, DD = 0.0, DR = 0.0, DL = 0.0; int index_mem_central = 0, index_mem_down = 0, index_mem_uper = 0; int index_mem_left = 0, index_mem_right = 0; int offset = (blockDim.x * gridDim.x); // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado index_mem_central = tid; index_mem_uper = index_mem_central - offset; index_mem_down = index_mem_central + offset; index_mem_left = index_mem_central - 1; index_mem_right = index_mem_central + 1; if(uper == 1){ auxU = dev_mat.epsilon[index_mem_central] / (1 + dev_b.U[index_mem_central] * dev_mat.epsilon[index_mem_central]); DU = auxU * (dev_b.U[index_mem_central] * dev_q.D_old[index_mem_uper] + dev_l.D_old[index_mem_uper]); } if(right == 1){ auxR = dev_mat.epsilon[index_mem_central] / (1 + dev_b.R[index_mem_central] * dev_mat.epsilon[index_mem_central]); DR = auxR * (dev_b.R[index_mem_central] * dev_q.L_old[index_mem_right] + dev_l.L_old[index_mem_right]); } if(down == 1){ auxD = dev_mat.epsilon[index_mem_central] / (1 + dev_b.D[index_mem_central] * dev_mat.epsilon[index_mem_central]); DD = auxD * (dev_b.D[index_mem_central] * dev_q.U_old[index_mem_down] + dev_l.U_old[index_mem_down]); } if(left == 1){ auxL = dev_mat.epsilon[index_mem_central] / (1 + dev_b.L[index_mem_central] * dev_mat.epsilon[index_mem_central]); DL = auxL * (dev_b.L[index_mem_central] * dev_q.R_old[index_mem_left] + dev_l.R_old[index_mem_left]); } dev_pressao.p[index_mem_central] = (dev_mat.font[index_mem_central] + DU + DR + DD + DL) / (auxU + auxR + auxD + auxL); dev_q.L[index_mem_central] = auxL * dev_pressao.p[index_mem_central] - DL; dev_q.R[index_mem_central] = auxR * dev_pressao.p[index_mem_central] - DR; dev_q.U[index_mem_central] = auxU * dev_pressao.p[index_mem_central] - DU; dev_q.D[index_mem_central] = auxD * dev_pressao.p[index_mem_central] - DD; return 0; } __global__ void reduce1(float *g_idata, float *g_odata, int n) { __shared__ float sdata[512]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; sdata[tid] = (i < n) ? g_idata[i] : 0; __syncthreads(); // do reduction in shared mem for(unsigned int s=1; s < blockDim.x; s *= 2) { int index = 2 * s * tid; if (index < blockDim.x) { sdata[index] += sdata[index + s]; } __syncthreads(); } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void reducao(float *in){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int i = (dimensao_x * dimensao_y )/ 2; //efetuando a redução while(i != 0){ if(tid < i) in[tid] += in[tid + i]; if(i % 2 == 1){ if(i>1) in[0] += in[i-1]; } __syncthreads(); i /= 2; } } __global__ void reducao2(float *in_1, float *in_2){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int i = (dimensao_x * dimensao_y )/ 2; while(i != 0){ if(tid < i) in_1[tid] += in_1[tid + i]; in_2[tid] += in_2[tid + i]; if(i % 2 == 1){ if(i>1) in_1[0] += in_1[i-1]; in_2[0] += in_2[i-1]; } __syncthreads(); i /= 2; } } __global__ void escoamento_monofasico( ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat ){ /*int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; a[offset] = offset;*/ /*vificar as condições de contorno*/ int flag_thread_centrais = 1; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; /*int offset = (blockDim.x * gridDim.x) + 1; // deslocamento para o tamanho da região (tam_regiao = n + 2) */ int tid = x + y * blockDim.x * gridDim.x; //verificar esse deslocamento para n causar problema (somente na hora de armazenar utilizar o deslocamento) //int tid = (x + y * blockDim.x * gridDim.x) + offset; // tid fornece o indice do vetor int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; int eq_tid_cant_sup_esq = dimensao_x + 1; int eq_tid_cant_sup_dir = dimensao_x + (dimensao_x - 2); // posição extremo sup direito int eq_tid_cant_inf_dir = (dimensao_x * dimensao_y) - (dimensao_x + 2); // posição extremo inf direito int eq_tid_cant_inf_esq = ((dimensao_x) * (dimensao_y - 2)) + 1; // posição extremo inf esquerdo // int offset = (blockDim.x * gridDim.x) + 1 + 2; // o kernel contem somente a quantidade de elementos internos // portanto a fronteira deve ser contata "+ 2" de cada lado int index_mem_central = tid; if(tid == eq_tid_cant_sup_esq){//canto superior esquerdo /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ /* * calcula_pressao_velocidade(); * * Param: ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat * */ calcula_pressao_velocidade( tid, 0, 1, 1, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); /* * * atualiza_mult_lagrange(); * * param: int tid, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b * */ atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_sup_dir){//canto superior direito /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 0, 0, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_inf_esq){//canto inferior esquerdo /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 0, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(tid == eq_tid_cant_inf_dir){//canto inferior direito /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 0, 0, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_esq) && (tid < eq_tid_cant_sup_dir)){//fronteira superior /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 0, 1, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_dir) && (tid % dimensao_x == dimensao_x - 2)){ //fronteira direita /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 0, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_inf_esq) && (tid < eq_tid_cant_inf_dir)){ //fronteira inferior /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 0, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if((tid > eq_tid_cant_sup_esq) && (tid < eq_tid_cant_inf_dir) && (tid < eq_tid_cant_inf_esq) && (tid % dimensao_x == 1)){//fronteira esquerda /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 1, 0, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); flag_thread_centrais = 0; } if(flag_thread_centrais && (tid % dimensao_x >= 2) && (tid % dimensao_x <= (dimensao_x - 3)) && (tid > eq_tid_cant_sup_dir) && (tid < eq_tid_cant_inf_esq) ){ /*VERIFICAR AS CONDIÇÕES DE CONTORNO*/ calcula_pressao_velocidade( tid, 1, 1, 1, 1, dev_q, dev_l, dev_b, dev_pressao, dev_mat); atualiza_mult_lagrange(tid, dev_q, dev_l, dev_b); } //dev_media[tid] = dev_pressao.p[tid]; /* dev_media[0] = reducao(dev_media, 100); dev_media[0] = dev_media[0] / (dimensao_x * dimensao_y); dev_pressao.p[index_mem_central] -= dev_media[0]; dev_l.D[index_mem_central] -= dev_media[0]; dev_l.U[index_mem_central] -= dev_media[0]; dev_l.L[index_mem_central] -= dev_media[0]; dev_l.R[index_mem_central] -= dev_media[0];*/ //avaliando criterio de convergencia /*dev_aux_1[index_mem_central] = dev_pressao.p[index_mem_central] - dev_pressao.p_old[index_mem_central]; __syncthreads(); dev_aux_1[index_mem_central] = dev_aux_1[index_mem_central] * dev_aux_1[index_mem_central]; __syncthreads();*/ //redução da primeira soma sum1 /*dev_sum1 = reducao(dev_aux_1, 100);*/ //redução da segunda soma sum2 /*dev_aux_1[index_mem_central] = dev_pressao.p[index_mem_central] * dev_pressao.p[index_mem_central]; __syncthreads(); dev_sum2 = reducao(dev_aux_1, 100); dev_erro = sqrt(dev_sum1 / dev_sum2);*/ //DUVIDA PARA COMO É O SINAL DO ERRO /*if (dev_erro > erro_max){ return; } dev_pressao.p_old[index_mem_central] = dev_pressao.p[index_mem_central]; dev_q.U_old[index_mem_central] = dev_q.U[index_mem_central]; dev_q.R_old[index_mem_central] = dev_q.R[index_mem_central]; dev_q.L_old[index_mem_central] = dev_q.L[index_mem_central]; dev_q.D_old[index_mem_central] = dev_q.D[index_mem_central]; dev_l.D_old[index_mem_central] = dev_l.D[index_mem_central]; dev_l.U_old[index_mem_central] = dev_l.U[index_mem_central]; dev_l.L_old[index_mem_central] = dev_l.L[index_mem_central]; dev_l.R_old[index_mem_central] = dev_l.R[index_mem_central]; i++; }*/ } __global__ void preapara_criterio_convergencia(float *dev_aux_1, float *dev_aux_2, float *dev_media, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_PRESSAO dev_pressao, const float media){ // sum1 = 0.; // sum2 = 0.; // for (k=1; k<=n; k++) // for (j=1; j<=n; j++) // { // aux = p[j][k] - p_old[j][k]; // sum1 += aux*aux; // sum2 += p[j][k]*p[j][k]; // } // erro = sqrt(sum1/sum2); //float dev_sum1 = 0.0, dev_sum2 = 0.0; int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; int dimensao_x = blockDim.x * gridDim.x; int dimensao_y = blockDim.y * gridDim.y; // dimensao_x -= 2; // dimensao_y -= 2; //float media = (dev_media[0] + dev_media[1]) / ((dimensao_x-2) * (dimensao_y-2)); int canto_sup_dir = dimensao_x - 1; int canto_inf_dir = (dimensao_x * dimensao_y ) - 1; int canto_inf_esq = (dimensao_x * dimensao_y ) - (dimensao_x - 1); dev_pressao.p[tid] -= media; dev_l.D[tid] -= media; dev_l.U[tid] -= media; dev_l.L[tid] -= media; dev_l.R[tid] -= media; //despresando valores do contorno if((tid >= 0) && (tid <= canto_sup_dir)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid > canto_sup_dir) && (tid <= canto_inf_esq) && (tid % dimensao_x == 0)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid > canto_sup_dir) && (tid < canto_inf_esq) && (tid % dimensao_x == (dimensao_x -1))){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } if((tid >= canto_inf_esq) && (tid <= canto_inf_dir)){ dev_pressao.p[tid] = 0.0; dev_l.D[tid] = 0.0; dev_l.U[tid] = 0.0; dev_l.L[tid] = 0.0; dev_l.R[tid] = 0.0; dev_aux_1[tid] = 0.0; dev_aux_2[tid] = 0.0; } dev_aux_1[tid] = dev_pressao.p[tid] - dev_pressao.p_old[tid]; dev_aux_1[tid] = dev_aux_1[tid] * dev_aux_1[tid]; dev_aux_2[tid] = dev_pressao.p[tid] * dev_pressao.p[tid]; /*Media zero nas pressoes e multiplicadores de lagrange*/ // dev_pressao.p_old[tid] = dev_pressao.p[tid]; // dev_q.U_old[tid] = dev_q.U[tid]; // dev_q.R_old[tid] = dev_q.R[tid]; // dev_q.L_old[tid] = dev_q.L[tid]; // dev_q.D_old[tid] = dev_q.D[tid]; // // dev_l.D_old[tid] = dev_l.D[tid]; // dev_l.U_old[tid] = dev_l.U[tid]; // dev_l.L_old[tid] = dev_l.L[tid]; // dev_l.R_old[tid] = dev_l.R[tid]; } __global__ void teste( ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l ){ int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int tid = x + y * blockDim.x * gridDim.x; dev_pressao.p_old[tid] = dev_pressao.p[tid]; dev_q.U_old[tid] = dev_q.U[tid]; dev_q.R_old[tid] = dev_q.R[tid]; dev_q.L_old[tid] = dev_q.L[tid]; dev_q.D_old[tid] = dev_q.D[tid]; dev_l.D_old[tid] = dev_l.D[tid]; dev_l.U_old[tid] = dev_l.U[tid]; dev_l.L_old[tid] = dev_l.L[tid]; dev_l.R_old[tid] = dev_l.R[tid]; } int main(void){ le_entrada(); inicializa_parametros(); cal_cond_robin(); parametro_independentes(); copia_dados_para_gpu(); // dim3 block(comprimento/16 , altura/16); // dim3 thread(16, 16); dim3 block(2, 2); dim3 thread(5, 5); /* * escoamento_monofasico(); * * Param: ESTRUTURA_Q dev_q, ESTRUTURA_L dev_l, ESTRUTURA_B dev_b, ESTRUTURA_PRESSAO dev_pressao, ESTRUTURA_MAT dev_mat, float *dev_aux_1, const float erro_max * */ int i = 0, j = 0; while (i < 10){ escoamento_monofasico<<<block, thread>>>( dev_q, dev_l, dev_b, dev_pressao, dev_mat ); cudaDeviceSynchronize(); HANDLE_ERROR( cudaMemcpy( host_aux_1, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); int k; int temp = tam_mat_real * tam_mat_real; float soma = 0.0, M = 0.0; for(k = 0; k < temp; k++){ soma = soma + host_aux_1[k]; } M = soma / (tam_mat_interna * tam_mat_interna); printf("\nvalor da media em serial = %f \t i = %d\n\n", M , i); int num_blocks, qtElementos; qtElementos = tam_mat_real*tam_mat_real; num_blocks = ((qtElementos) > 512) ? (qtElementos/512) + 1 : 1; reduce1<<<num_blocks, 512>>>( dev_pressao.p, dev_media, qtElementos ); HANDLE_ERROR( cudaMemcpy( host_aux_1, dev_media, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); printf("\n\n\t\t\t\tdev media:\n"); for(k = 0; k < tam_mat_real; k ++){ for(j = 0; j < tam_mat_real; j++) printf("%16.6E ", host_aux_1[k*tam_mat_real + j]); printf("\n"); } soma = 0.0; for(k = 0; k < num_blocks; k++) soma = soma + host_aux_1[k]; M = soma / (tam_mat_interna * tam_mat_interna); printf("valor da media em cuda = %f \t i = %d", M , i); HANDLE_ERROR( cudaMemcpy( host_aux_1, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); printf("\n\n\t\t\t\tpressao:\n"); for(k = 0; k < tam_mat_real; k ++){ for(j = 0; j < tam_mat_real; j++) printf("%16.6E ", host_aux_1[k*tam_mat_real + j]); printf("\n"); } preapara_criterio_convergencia<<<block, thread>>>( dev_aux_1, dev_aux_2, dev_media, dev_q, dev_l, dev_pressao, M); teste<<<block, thread>>>( dev_pressao, dev_q, dev_l ); i++; } copia_dados_para_cpu(); /* printf("\ntam_mat_interna = %d\n", tam_mat_interna); printf("tam_mat_real = %d\n", tam_mat_real); printf("max_interacoes = %d\n", max_interacoes); printf("op_contorno = %d\n", op_contorno); printf("tam_regiao = %f\n", tam_regiao); printf("erro_max = %f\n", erro_max); printf("valor_contor = %f\n", valor_contor); printf("\n\n\t\t\tmat_font:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_mat.font[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tmat_perm:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_mat.perm[i*tam_mat_real + j]); //printf(" %6.4E ", host_mat.perm[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tmat_epsilon:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_mat.epsilon[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\tbeta U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_b.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_b.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_b.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\tbeta D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_b.D[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tq_U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_q.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_q.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_q.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_q.D[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_U:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_l.U[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_R:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_l.R[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_L:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_l.L[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_D:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf(" %6.4E ", host_l.D[i*tam_mat_real + j]); printf("\n"); } printf("\npressao:\n"); printf("\n\n\t\t\t\tpressao:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%16.6E ", host_pressao.p[i*tam_mat_real + j]); printf("\n"); }*/ // printf("\npressao old:\n"); // printf("\n\n\t\t\t\tpressao old:\n"); // for(i = 0; i < tam_mat_real; i ++){ // for(j = 0; j < tam_mat_real; j++) // printf("%16.6E ", host_pressao.p_old[i*tam_mat_real + j]); // printf("\n"); // } /*printf("\n\n\t\t\t\tb_U:\t\t\t\t\t\t\t\t\tb_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_R:\t\t\t\t\t\t\t\t\tb_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tb_D:\t\t\t\t\t\t\t\t\tb_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_b.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\npressao:\n"); printf("\n\n\t\t\t\tpressao:\t\t\t\t\t\t\t\t\tpressao_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_pressao.p_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tl_U:\t\t\t\t\t\t\t\t\tl_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_R:\t\t\t\t\t\t\t\t\tl_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_D:\t\t\t\t\t\t\t\t\tl_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tl_L:\t\t\t\t\t\t\t\t\tl_L_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.L[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_l.L_old[i*tam_mat_real + j]); printf("\n"); } printf("\n------------------------------------------------------------------------------------------------------------------------------------------\n"); printf("\n\n\t\t\t\tq_U:\t\t\t\t\t\t\t\t\tq_U_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.U[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.U_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_R:\t\t\t\t\t\t\t\t\tq_R_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.R[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.R_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_D:\t\t\t\t\t\t\t\t\tq_D_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.D[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.D_old[i*tam_mat_real + j]); printf("\n"); } printf("\n\n\t\t\t\tq_L:\t\t\t\t\t\t\t\t\tq_L_old:\n"); for(i = 0; i < tam_mat_real; i ++){ for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.L[i*tam_mat_real + j]); printf("| "); for(j = 0; j < tam_mat_real; j++) printf("%12.4E ", host_q.L_old[i*tam_mat_real + j]); printf("\n"); }*/ clear_mem(); // // system("pause"); return 0; } char le_entrada(){ printf("\n\n\t\t - - CARREGANDO ENTRADA - - \n\n"); FILE *arq = NULL; //arq = fopen("../dir_entrada/parametro_entrada.txt", "r"); arq = fopen("parametro_entrada.txt", "r"); if(arq == NULL){ printf("Erro ao abrir aquivo: 'parametro_entrada.txt'\n\t\tCertifique-se que o arquivo exite.\n"); exit(1); } else{ printf("\t\t - - LENDO ARQUIVO DE ENTRADA - -\n"); /*char c[2], dados[255], buffer[255];*/ char buffer[255]; int cont = 1; while(cont < 9){ fscanf(arq, "%s", buffer); //puts(buffer); int i = 0, j = 0; switch(strlen(buffer)){ case 8: //erro_maximo fscanf(arq, "%f", &erro_max); break; case 10: //tam_regiao fscanf(arq, "%f", &tam_regiao); break; case 11: //opcao_contorno fscanf(arq, "%d", &op_contorno); break; case 12: //valor_contor fscanf(arq, "%f", &valor_contor); break; case 14: //max_interacoes fscanf(arq, "%d", &max_interacoes); break; case 15: //tam_mat_interna fscanf(arq, "%d", &tam_mat_interna); break; case 16: //matriz_de_fontes //uso (tam_mat_interna + 2) - pois ainda não inicializei 'tam_mat_real' host_mat.font = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) fscanf(arq, "%f", &host_mat.font[i*(tam_mat_interna+2) + j]); break; case 18: //matriz_permeabilidade host_mat.perm = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); host_mat.epsilon = aloca_matriz(tam_mat_interna + 2, tam_mat_interna + 2); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) fscanf(arq, "%f", &host_mat.perm[i*(tam_mat_interna+2) + j]); for(i = 1; i < (tam_mat_interna + 2) - 1; i ++) for(j = 1; j < (tam_mat_interna + 2) - 1 ; j++) host_mat.perm[i*(tam_mat_interna+2) + j] = PamM*exp(S * host_mat.perm[i*(tam_mat_interna+2) + j]); break; default: printf("\n\n\t\tHouve algum erro no aquivo de entrada!\n\n"); return 0; } //int tam = strlen(buffer); cont++; } printf("\t\t - - ARQUIVO DE ENTRADA CARREGADO - -\n"); } printf("\n\n\t\t - - ENTRADA CARREGA - - \n\n"); return 1; } float *aloca_matriz(int L, int C){ float *aux = NULL; aux = (float *) calloc(L * C, sizeof(float)); if(aux == NULL){ printf("\n\n\t\tErro ao alocar memoria\n\n"); exit(1); }else{ return (aux); } return NULL; } /* * *VERIFICAR RETORNO * */ void cal_cond_robin(){ float keff = 0.0, numerador = 0.0, denominador = 0.0; float C = 1.0; // Cte adimensional que se ajusta experimentalmente C = 1.0 //Canto superior esquerdo numerador = ( 2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[tam_mat_real + 2] ); denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[tam_mat_real + 2] ); keff = numerador / denominador; host_b.R[tam_mat_real + 1] = C*h/keff; numerador = (2 * host_mat.perm[tam_mat_real + 1] * host_mat.perm[(2*tam_mat_real) + 1]); denominador = ( host_mat.perm[tam_mat_real + 1] + host_mat.perm[(2*tam_mat_real) + 1]); keff = numerador / denominador; host_b.D[tam_mat_real + 1] = C*h/keff; //Canto superior direito numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[tam_mat_real + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[tam_mat_real + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + tam_mat_interna] * host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[tam_mat_real + tam_mat_interna] + host_mat.perm[(2 * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.D[tam_mat_real + tam_mat_interna] = C*h/keff; //Canto infeior esquerdo numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + 1] ); keff = numerador / denominador; host_b.U[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] * host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + 1] + host_mat.perm[(tam_mat_real * tam_mat_interna) + 2] ); keff = numerador / denominador; host_b.R[(tam_mat_real * tam_mat_interna) + 1] = C*h/keff; //Canto infeior direito numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * (tam_mat_interna - 1)) + tam_mat_interna] ); keff = numerador / denominador; host_b.U[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] * host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[(tam_mat_real * tam_mat_interna) + tam_mat_interna] + host_mat.perm[(tam_mat_real * tam_mat_interna) + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[(tam_mat_real * tam_mat_interna) + tam_mat_interna] = C*h/keff; //Calculo das fronteiras e região interna para betas int i = 0; for(i = 2; i < tam_mat_interna; i ++){ //Calcula fronteira superior numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i-1)] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i-1)] ); keff = numerador / denominador; host_b.L[tam_mat_real + i] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[tam_mat_real + (i+1)] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[tam_mat_real + (i+1)] ); keff = numerador / denominador; host_b.R[tam_mat_real + i] = C*h/keff; numerador = ( 2 * host_mat.perm[tam_mat_real + i] * host_mat.perm[(2 * tam_mat_real) + i] ); denominador = ( host_mat.perm[tam_mat_real + i] + host_mat.perm[(2 * tam_mat_real) + i] ); keff = numerador / denominador; host_b.D[tam_mat_real + i] = C*h/keff; //Calcula fronteira esquerda numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i - 1) * tam_mat_real) + 1] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i - 1) * tam_mat_real) + 1] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[(i * tam_mat_real) + 2] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[(i * tam_mat_real) + 2] ); keff = numerador / denominador; host_b.R[(i * tam_mat_real) + 1] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + 1] * host_mat.perm[((i + 1) * tam_mat_real) + 1] ); denominador = ( host_mat.perm[(i * tam_mat_real) + 1] + host_mat.perm[((i + 1) * tam_mat_real) + 1] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + 1] = C*h/keff; //Calcula fronteira inferior numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i - 1)] ); keff = numerador / denominador; host_b.L[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[((tam_mat_interna - 1) * tam_mat_real) + i] ); keff = numerador / denominador; host_b.U[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; numerador = ( 2 * host_mat.perm[(tam_mat_interna * tam_mat_real) + i] * host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] ); denominador = ( host_mat.perm[(tam_mat_interna * tam_mat_real) + i] + host_mat.perm[(tam_mat_interna * tam_mat_real) + (i + 1)] ); keff = numerador / denominador; host_b.R[(tam_mat_interna * tam_mat_real) + i] = C*h/keff; //Calcula fronteira direita numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i-1) * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[(i * tam_mat_real) + (tam_mat_interna - 1)] ); keff = numerador / denominador; host_b.L[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + tam_mat_interna] * host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] ); denominador = ( host_mat.perm[(i * tam_mat_real) + tam_mat_interna] + host_mat.perm[((i+1) * tam_mat_real) + tam_mat_interna] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + tam_mat_interna] = C*h/keff; //Calcula dados internos int j = 0; for(j = 2; j < tam_mat_interna; j ++){ numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j - 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j - 1)] ); keff = numerador / denominador; host_b.L[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[(i * tam_mat_real) + (j + 1)] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[(i * tam_mat_real) + (j + 1)] ); keff = numerador / denominador; host_b.R[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i - 1) * tam_mat_real) + j] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i - 1) * tam_mat_real) + j] ); keff = numerador / denominador; host_b.U[(i * tam_mat_real) + j] = C*h/keff; numerador = ( 2 * host_mat.perm[(i * tam_mat_real) + j] * host_mat.perm[((i + 1) * tam_mat_real) + j] ); denominador = ( host_mat.perm[(i * tam_mat_real) + j] + host_mat.perm[((i + 1) * tam_mat_real) + j] ); keff = numerador / denominador; host_b.D[(i * tam_mat_real) + j] = C*h/keff; } } } /* * *VERIFICAR RETORNO * */ char parametro_independentes(){ int i = 0, j = 0; float constante = 2/h; for(i = 0; i < tam_mat_real; i ++) for(j = 0; j < tam_mat_real; j++){ host_mat.epsilon[i*tam_mat_real + j] = constante * host_mat.perm[i*tam_mat_real + j]; host_mat.font[i*tam_mat_real + j] *= h; } return 0; } char copia_dados_para_gpu(){ HANDLE_ERROR( cudaMemcpy( dev_q.R, host_q.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.L, host_q.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.U, host_q.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.D, host_q.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.R_old, host_q.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.L_old, host_q.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.U_old, host_q.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_q.D_old, host_q.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.R, host_l.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.L, host_l.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.U, host_l.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.D, host_l.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.R_old, host_l.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.L_old, host_l.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.U_old, host_l.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_l.D_old, host_l.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.R, host_b.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.L, host_b.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.U, host_b.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.D, host_b.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.R_old, host_b.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.L_old, host_b.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.U_old, host_b.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_b.D_old, host_b.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_pressao.p, host_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_pressao.p_old, host_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_mat.perm, host_mat.perm, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_mat.epsilon, host_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); HANDLE_ERROR( cudaMemcpy( dev_mat.font, host_mat.font, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyHostToDevice ) ); return 0; } void copia_dados_para_cpu(){ HANDLE_ERROR( cudaMemcpy( host_q.R, dev_q.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.L, dev_q.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.U, dev_q.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.D, dev_q.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.R_old, dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.L_old, dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.U_old, dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_q.D_old, dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.R, dev_l.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.L, dev_l.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.U, dev_l.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.D, dev_l.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.R_old, dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.L_old, dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.U_old, dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_l.D_old, dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.R, dev_b.R, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.L, dev_b.L, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.U, dev_b.U, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.D, dev_b.D, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.R_old, dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.L_old, dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.U_old, dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_b.D_old, dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_pressao.p, dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_pressao.p_old, dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_mat.font, dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_mat.perm, dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); HANDLE_ERROR( cudaMemcpy( host_mat.epsilon, dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float), cudaMemcpyDeviceToHost ) ); } char inicializa_parametros(){ printf("\n\n\t\t- - INICIALIZANDO PARAMETROS - - \n\n\n"); /* * * * CONTRUIR FUNCAO PARA VERIFICAR ERRO DE ALOCAÇÃO * VERIFICAR RETORNO */ tam_mat_real = tam_mat_interna + 2; h = tam_regiao / tam_mat_interna; HANDLE_ERROR( cudaMalloc( (void**)&dev_q, sizeof(ESTRUTURA_Q) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_l, sizeof(ESTRUTURA_L) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_b, sizeof(ESTRUTURA_B) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao, sizeof(ESTRUTURA_PRESSAO) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_mat, sizeof(ESTRUTURA_MAT) ) ); host_q.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.R != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.L != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.U != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.D != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.R_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.L_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.U_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_q.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_q.D_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_q.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.R != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.L != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.U != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.D != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.R_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.L_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.U_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_l.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_l.D_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_l.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.R = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.R != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.R, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.L = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.L != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.L, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.U = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.U != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.U, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.D = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.D != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.D, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.R_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.R_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.R_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.L_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.L_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.L_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.U_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.U_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.U_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_b.D_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_b.D_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_b.D_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_pressao.p = aloca_matriz(tam_mat_real, tam_mat_real); if(host_pressao.p != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao.p, tam_mat_real * tam_mat_real * sizeof(float) ) ); host_pressao.p_old = aloca_matriz(tam_mat_real, tam_mat_real); if(host_pressao.p_old != NULL) HANDLE_ERROR( cudaMalloc( (void**)&dev_pressao.p_old, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_mat.perm, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_mat.font, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_mat.epsilon, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_aux_1, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_aux_2, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMemset( dev_aux_1, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMemset( dev_aux_2, 0.0, tam_mat_real * tam_mat_real * sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&erro_max, sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_erro, sizeof(float) ) ); HANDLE_ERROR( cudaMalloc( (void**)&dev_media, tam_mat_real * tam_mat_real * sizeof(float)) ); host_aux_1 = aloca_matriz(tam_mat_real, tam_mat_real); int i = 0; switch(op_contorno){ case 1: //Inicializa contorno superior for(i = 0; i < tam_mat_real; i++){ host_q.D[i] = valor_contor; host_q.D_old[i] = valor_contor; } break; case 2://Inicializa contorno esquerdo for(i = 0; i < tam_mat_real; i++){ host_q.R[i*tam_mat_real] = valor_contor; host_q.R_old[i*tam_mat_real] = valor_contor; } break; case 3://Inicializa contorno direito for(i = 0; i < tam_mat_real; i++){ host_q.L[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor; host_q.L_old[i*tam_mat_real + (tam_mat_real - 1)] = valor_contor; } break; case 4://Inicializa contorno inferior for(i = 0; i < tam_mat_real; i++){ host_q.L[(tam_mat_real-1)*tam_mat_real + i] = valor_contor; host_q.L_old[(tam_mat_real-1)*tam_mat_real + i] = valor_contor; } break; default: printf("\n\n\t\t - - OCORREU ALGUM ERRO NA OPCAO DE CONTORNO - - \n\n"); break; } printf("\n\n\t\t- - FIM DA INICIALIZACAO PARAMETROS - - \n\n\n"); return 1; } void clear_mem(){ HANDLE_ERROR( cudaFree (dev_q.U)); HANDLE_ERROR( cudaFree (dev_q.R)); HANDLE_ERROR( cudaFree (dev_q.D)); HANDLE_ERROR( cudaFree (dev_q.L)); free(host_q.U); free(host_q.R); free(host_q.D); free(host_q.L); HANDLE_ERROR( cudaFree (dev_l.U)); HANDLE_ERROR( cudaFree (dev_l.R)); HANDLE_ERROR( cudaFree (dev_l.D)); HANDLE_ERROR( cudaFree (dev_l.L)); free(host_l.U); free(host_l.R); free(host_l.D); free(host_l.L); HANDLE_ERROR( cudaFree (dev_b.U)); HANDLE_ERROR( cudaFree (dev_b.R)); HANDLE_ERROR( cudaFree (dev_b.D)); HANDLE_ERROR( cudaFree (dev_b.L)); free(host_b.U); free(host_b.R); free(host_b.D); free(host_b.L); HANDLE_ERROR( cudaFree (dev_pressao.p)); HANDLE_ERROR( cudaFree (dev_pressao.p_old)); free(host_pressao.p); free(host_pressao.p_old); HANDLE_ERROR( cudaFree (dev_mat.perm)); HANDLE_ERROR( cudaFree (dev_mat.font)); HANDLE_ERROR( cudaFree (dev_mat.epsilon)); free(host_mat.perm); free(host_mat.font); free(host_mat.epsilon); }
22f80b9c04f57188a6129d57720fc77c5065250d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void addKernel(int * dev_a, int * dev_b, int * dev_c) { int i = threadIdx.x; dev_c[i] = dev_a[i] + dev_b[i]; }
22f80b9c04f57188a6129d57720fc77c5065250d.cu
#include "includes.h" __global__ void addKernel(int * dev_a, int * dev_b, int * dev_c) { int i = threadIdx.x; dev_c[i] = dev_a[i] + dev_b[i]; }
80063de3dbf12b6d067ffb39f4bbc4a1db75be84.hip
// !!! This is a file automatically generated by hipify!!! // // auto-generated by op2.m on 24-Oct-2011 15:49:30 // // header #include "op_lib_cpp.h" #include "op_cuda_rt_support.h" #include "op_cuda_reduction.h" // global constants #ifndef MAX_CONST_SIZE #define MAX_CONST_SIZE 128 #endif __constant__ double alpha; void op_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ cutilSafeCall(hipMemcpyToSymbol(name, dat, dim*size)); } // user kernel files #include "res_kernel.hip" #include "update_kernel.hip"
80063de3dbf12b6d067ffb39f4bbc4a1db75be84.cu
// // auto-generated by op2.m on 24-Oct-2011 15:49:30 // // header #include "op_lib_cpp.h" #include "op_cuda_rt_support.h" #include "op_cuda_reduction.h" // global constants #ifndef MAX_CONST_SIZE #define MAX_CONST_SIZE 128 #endif __constant__ double alpha; void op_decl_const_char(int dim, char const *type, int size, char *dat, char const *name){ cutilSafeCall(cudaMemcpyToSymbol(name, dat, dim*size)); } // user kernel files #include "res_kernel.cu" #include "update_kernel.cu"
5242d671341c2b6c06031c93cc3a8e9c1a2b5116.hip
// !!! This is a file automatically generated by hipify!!! /* ============================================================================ Name : sorting_segments.cu Author : Rafael Schmid Version : Copyright : Your copyright notice Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU ============================================================================ COMPILAR USANDO O SEGUINTE COMANDO: nvcc segmented_sort.cu -o segmented_sort -std=c++11 --expt-extended-lambda -I"/home/schmid/Dropbox/Unicamp/workspace/sorting_segments/moderngpu-master/src" */ #include <hipcub/hipcub.hpp> #include <cub/device/device_segmented_radix_sort.cuh> #include <stdio.h> #include <stdlib.h> #include <iostream> #include <hip/hip_runtime.h> #ifndef ELAPSED_TIME #define ELAPSED_TIME 0 #endif #ifndef BLOCK_SIZE #define BLOCK_SIZE 32 #endif void cudaTest(hipError_t error) { if (error != hipSuccess) { printf("cuda returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); exit (EXIT_FAILURE); } } template<typename T> void print(T* vec, uint t, uint m) { std::cout << "\n"; for (uint i = 0; i < t; i++) { for (uint j = 0; j < m; j++) { std::cout << vec[i * m + j] << " "; } std::cout << "\n"; } } template<typename T> void print(T* vec, uint t) { std::cout << "\n"; for (uint i = 0; i < t; i++) { std::cout << vec[i] << " "; } std::cout << "\n"; } __global__ void transpose(const float *machines, float *machines_out, const uint *task_index, uint* task_index_out, int t, int m) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //for (int e = 0; e < t; e += BLOCK_SIZE) machines_out[col * t + row] = machines[row * m + col]; task_index_out[col * t + row] = task_index[row * m + col]; } int main(int argc, char** argv) { int t, m; /*if (argc < 3) { printf("Parameters missing: <number of tasks> <number of machines>\n\n"); return 0; } t = atoi(argv[1]); m = atoi(argv[2]); */ int a = scanf("%d", &t); a = scanf("%d", &m); uint mem_size_machines = sizeof(float) * (m * t); uint mem_size_task_index = sizeof(uint) * (m * t); float *machines = (float *) malloc(mem_size_machines); uint *task_index = (uint *) malloc(mem_size_task_index); float aux; for (int i = 0; i < t; i++) { for (int j = 0; j < m; j++) { int a = scanf("%f", &aux); task_index[i * m + j] = j; machines[i * m + j] = aux; } } print(machines, t, m); print(task_index, t, m); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); uint *d_task_index, *d_task_index_out; float *d_machines, *d_machines_out; cudaTest(hipMalloc((void **) &d_machines, mem_size_machines)); cudaTest(hipMalloc((void **) &d_machines_out, mem_size_machines)); cudaTest(hipMalloc((void **) &d_task_index, mem_size_task_index)); cudaTest(hipMalloc((void **) &d_task_index_out, mem_size_task_index)); // copy host memory to device cudaTest(hipMemcpy(d_machines, machines, mem_size_machines, hipMemcpyHostToDevice)); cudaTest(hipMemcpy(d_task_index, task_index, mem_size_task_index, hipMemcpyHostToDevice)); hipEventRecord(start); dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 grid(m/BLOCK_SIZE,t/BLOCK_SIZE,1); hipLaunchKernelGGL(( transposeCoalesced), dim3(block), dim3(grid), 0, 0, d_machines, d_machines_out, d_task_index, d_task_index_out, t, m); hipEventRecord(stop); hipError_t errSync = hipGetLastError(); hipError_t errAsync = hipDeviceSynchronize(); if (errSync != hipSuccess) printf("Sync kernel error: %s\n", hipGetErrorString(errSync)); if (errAsync != hipSuccess) printf("Async kernel error: %s\n", hipGetErrorString(errAsync)); if (ELAPSED_TIME == 1) { hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } hipDeviceSynchronize(); cudaTest(hipMemcpy(machines, d_machines_out, mem_size_machines, hipMemcpyDeviceToHost)); cudaTest(hipMemcpy(task_index, d_task_index_out, mem_size_task_index, hipMemcpyDeviceToHost)); hipFree(d_machines); hipFree(d_machines_out); hipFree(d_task_index); hipFree(d_task_index_out); if (ELAPSED_TIME != 1) { print(machines, m, t); print(task_index, m, t); } free(machines); free(task_index); return 0; }
5242d671341c2b6c06031c93cc3a8e9c1a2b5116.cu
/* ============================================================================ Name : sorting_segments.cu Author : Rafael Schmid Version : Copyright : Your copyright notice Description : Compute sum of reciprocals using STL on CPU and Thrust on GPU ============================================================================ COMPILAR USANDO O SEGUINTE COMANDO: nvcc segmented_sort.cu -o segmented_sort -std=c++11 --expt-extended-lambda -I"/home/schmid/Dropbox/Unicamp/workspace/sorting_segments/moderngpu-master/src" */ #include <cub/util_allocator.cuh> #include <cub/device/device_segmented_radix_sort.cuh> #include <stdio.h> #include <stdlib.h> #include <iostream> #include <cuda.h> #ifndef ELAPSED_TIME #define ELAPSED_TIME 0 #endif #ifndef BLOCK_SIZE #define BLOCK_SIZE 32 #endif void cudaTest(cudaError_t error) { if (error != cudaSuccess) { printf("cuda returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); exit (EXIT_FAILURE); } } template<typename T> void print(T* vec, uint t, uint m) { std::cout << "\n"; for (uint i = 0; i < t; i++) { for (uint j = 0; j < m; j++) { std::cout << vec[i * m + j] << " "; } std::cout << "\n"; } } template<typename T> void print(T* vec, uint t) { std::cout << "\n"; for (uint i = 0; i < t; i++) { std::cout << vec[i] << " "; } std::cout << "\n"; } __global__ void transpose(const float *machines, float *machines_out, const uint *task_index, uint* task_index_out, int t, int m) { int row = blockIdx.y * blockDim.y + threadIdx.y; int col = blockIdx.x * blockDim.x + threadIdx.x; //for (int e = 0; e < t; e += BLOCK_SIZE) machines_out[col * t + row] = machines[row * m + col]; task_index_out[col * t + row] = task_index[row * m + col]; } int main(int argc, char** argv) { int t, m; /*if (argc < 3) { printf("Parameters missing: <number of tasks> <number of machines>\n\n"); return 0; } t = atoi(argv[1]); m = atoi(argv[2]); */ int a = scanf("%d", &t); a = scanf("%d", &m); uint mem_size_machines = sizeof(float) * (m * t); uint mem_size_task_index = sizeof(uint) * (m * t); float *machines = (float *) malloc(mem_size_machines); uint *task_index = (uint *) malloc(mem_size_task_index); float aux; for (int i = 0; i < t; i++) { for (int j = 0; j < m; j++) { int a = scanf("%f", &aux); task_index[i * m + j] = j; machines[i * m + j] = aux; } } print(machines, t, m); print(task_index, t, m); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); uint *d_task_index, *d_task_index_out; float *d_machines, *d_machines_out; cudaTest(cudaMalloc((void **) &d_machines, mem_size_machines)); cudaTest(cudaMalloc((void **) &d_machines_out, mem_size_machines)); cudaTest(cudaMalloc((void **) &d_task_index, mem_size_task_index)); cudaTest(cudaMalloc((void **) &d_task_index_out, mem_size_task_index)); // copy host memory to device cudaTest(cudaMemcpy(d_machines, machines, mem_size_machines, cudaMemcpyHostToDevice)); cudaTest(cudaMemcpy(d_task_index, task_index, mem_size_task_index, cudaMemcpyHostToDevice)); cudaEventRecord(start); dim3 block(BLOCK_SIZE, BLOCK_SIZE, 1); dim3 grid(m/BLOCK_SIZE,t/BLOCK_SIZE,1); transposeCoalesced<<<block, grid>>>(d_machines, d_machines_out, d_task_index, d_task_index_out, t, m); cudaEventRecord(stop); cudaError_t errSync = cudaGetLastError(); cudaError_t errAsync = cudaDeviceSynchronize(); if (errSync != cudaSuccess) printf("Sync kernel error: %s\n", cudaGetErrorString(errSync)); if (errAsync != cudaSuccess) printf("Async kernel error: %s\n", cudaGetErrorString(errAsync)); if (ELAPSED_TIME == 1) { cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); std::cout << milliseconds << "\n"; } cudaDeviceSynchronize(); cudaTest(cudaMemcpy(machines, d_machines_out, mem_size_machines, cudaMemcpyDeviceToHost)); cudaTest(cudaMemcpy(task_index, d_task_index_out, mem_size_task_index, cudaMemcpyDeviceToHost)); cudaFree(d_machines); cudaFree(d_machines_out); cudaFree(d_task_index); cudaFree(d_task_index_out); if (ELAPSED_TIME != 1) { print(machines, m, t); print(task_index, m, t); } free(machines); free(task_index); return 0; }
870f6b9ff1a72870f87592ce8954d41e67d9207a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define GPU __HIPCC__ #include <string> #ifdef GPU # include <hip/hip_runtime.h> #else # include <omp.h> # include <math.h> # include <ctime> #endif // // parameters // #ifdef GPU # define nThreads 64 # define nBlocks 112 # define WARP 32 #else # define nThreads 1 # define nBlocks 1 # define WARP 1 #endif #define nLoop 16 #define nWarps (nThreads / WARP) #define TT_SIZE 4194304 #define UCTK 0.44f #define FPU 1.10f // // printf // #ifdef GPU # include "cuPrintf.cu" # define print(format, ...) cuPrintf(format, __VA_ARGS__) #else # define print(format, ...) printf(format, __VA_ARGS__) #endif // // locks // #ifdef GPU # define LOCK int # define l_create(x) ((x) = 0) # define l_trylock(x) (atomicExch(&(x),1)) # define l_lock(x) while(l_trylock(x) != 0); # define l_unlock(x) (atomicExch(&(x),0)) # define l_add(x,v) (atomicAdd(&x,v)) # define l_sub(x,v) (atomicSub(&x,v)) # define l_barrier() __syncthreads() #else # define LOCK omp_lock_t # define l_create(x) omp_init_lock(&x) # define l_trylock(x) omp_test_lock(&x) # define l_lock(x) omp_set_lock(&x) # define l_unlock(x) omp_unset_lock(&x) template <class T> inline void l_add(T x,T v) { #pragma omp atomic x+=v; } template <class T> inline void l_sub(T x,T v) { #pragma omp atomic x-=v; } inline void l_barrier() { #pragma omp barrier } #endif // // undef cuda specific code // #ifndef GPU # undef __host__ # undef __device__ # undef __global__ # undef __shared__ # undef __constant__ # define __host__ # define __device__ # define __global__ # define __shared__ # define __constant__ #if defined (__GNUC__) # define __align__(x) __attribute__ ((aligned(x))) #else # define __align__(x) __declspec(align(x)) #endif #endif // // types // #ifdef _MSC_VER typedef unsigned __int64 U64; typedef unsigned int U32; # define U64(x) (x##ui64) # define FMTU64 "0x%016I64x" #else # include <inttypes.h> typedef uint64_t U64; typedef uint32_t U32; # define U64(x) (x##ull) # define FMTU64 "0x%016llx" #endif // // define board game // typedef U64 MOVE; struct BOARD { U64 wpawns; U64 all; U32 randn; char player; char emptyc; U32 playout(const BOARD&); void make_random_move(); bool is_white_win(); __device__ __host__ void clear() { wpawns = 0; all = U64(0xffffffffffffffff); emptyc = 64; player = 0; } __host__ __device__ void copy(const BOARD& b) { wpawns = b.wpawns; all = b.all; player = b.player; emptyc = b.emptyc; } __device__ __host__ void do_move(const MOVE& move) { all ^= move; if(player == 0) wpawns ^= move; player ^= 1; emptyc--; } __device__ __host__ void seed(int sd) { randn = sd; } __device__ __host__ U32 rand() { randn *= 214013; randn += 2531011; return ((randn >> 16) & 0x7fff); } __device__ __host__ U64 rand64() { return((U64)rand()) ^ ((U64)rand() << 15) ^ ((U64)rand() << 30) ^ ((U64)rand() << 45) ^ ((U64)rand() << 60); } }; __device__ __host__ void BOARD::make_random_move() { U32 rbit = rand() % emptyc; U64 mbit = all; for(U32 i = 0;i < rbit;i++) mbit &= mbit - 1; mbit = mbit & -mbit; if(player == 0) wpawns ^= mbit; all ^= mbit; player ^= 1; emptyc--; } __device__ __host__ bool BOARD::is_white_win(){ U64 m = (wpawns & U64(0x00000000000000ff)),oldm; do { oldm = m; m |=((((m << 8) | (m >> 8)) | (((m << 9) | (m << 1)) & U64(0xfefefefefefefefe)) | (((m >> 9) | (m >> 1)) & U64(0x7f7f7f7f7f7f7f7f))) & wpawns ); if(m & U64(0xff00000000000000)) { return true; } } while(m != oldm); return false; } __device__ __host__ U32 BOARD::playout(const BOARD& b) { U32 wins = 0; for(U32 i = 0;i < nLoop;i++) { this->copy(b); while(emptyc > 0) make_random_move(); if(is_white_win()) wins++; } return wins; } __constant__ unsigned int index64[64]; __device__ __host__ unsigned int firstone(U64 bb) { unsigned int folded; bb ^= bb - 1; folded = (int) bb ^ (bb >> 32); return index64[folded * 0x78291ACF >> 26]; } // //sq to string and vice versa // #define file(x) ((x) & 7) #define rank(x) ((x) >> 3) #define SQ(x,y) (((x) << 3) + (y)) __device__ __host__ char* sq_str(const int& sq,char* s) { int f = file(sq); int r = rank(sq); *s++ = 'a' + (f); *s++ = '1' + (r); *s = 0; return s; } __host__ const char* str_sq(int& sq,const char* is) { const char* s = is; int f = tolower(*s++) - 'a'; int r = atoi(s++) - 1; sq = SQ(r,f); return s; } // // Node // struct Node { MOVE move; U32 uct_wins; U32 uct_visits; Node* parent; Node* child; Node* next; LOCK lock; int workers; __device__ __host__ void clear() { uct_wins = 0; uct_visits = 0; parent = 0; child = 0; next = 0; move = MOVE(); l_create(lock); workers = 0; } }; // // Table // namespace TABLE { __device__ Node* mem_; __device__ int tsize; __device__ BOARD root_board; __device__ Node* root_node; __device__ Node* head; __device__ int size; __device__ LOCK lock; Node* hmem_; __device__ Node* get_node() { if(size > 0) { l_lock(lock); if(size > 0) { size--; head++; head->clear(); } else head = 0; l_unlock(lock); return head; } else { return 0; } } __global__ void reset() { head = mem_; size = tsize; root_node = get_node(); } __global__ void print_tree(int depthLimit) { int depth = 0,max_depth = 0,average_depth = 0; int leaf_nodes = 0,total_nodes = 0; char str[4]; int sq; Node* current = root_node; while(current) { while(current) { while(current) { if(current->uct_visits && depth <= depthLimit) { for(int i = 0;i < depth;i++) print("\t"); sq = firstone(current->move); sq_str(sq,str); print("%d.%s %12d %12d %12.6f\n", depth,(const char*)str, current->uct_wins,current->uct_visits, float(current->uct_wins) / current->uct_visits ); } total_nodes++; if(current->child) { depth++; current = current->child; } else { if(depth > max_depth) max_depth = depth; average_depth += depth; leaf_nodes++; break; } } NEXT: if(current->next) { current = current->next; } else break; } if(current->parent) { depth--; current = current->parent; goto NEXT; } else { break; } } print("Total nodes : %d\n",total_nodes); print("Leaf nodes : %d\n",leaf_nodes); print("Maximum depth : %d\n",max_depth); print("Average depth : %.2f\n",average_depth / float(leaf_nodes)); } __device__ void create_children(BOARD* b,Node* n) { l_lock(n->lock); if(n->child) { l_unlock(n->lock); return; } Node* last = n; U64 m = b->all; U64 lsb; while(m) { lsb = m & -m; Node* node = get_node(); if(!node) break; node->move = lsb; node->parent = n; if(last == n) last->child = node; else last->next = node; last = node; m ^= lsb; } l_unlock(n->lock); } __device__ Node* UCT_select(Node* n) { Node* bnode = 0; Node* current = n->child; float bvalue = -1.f,value; float logn = logf(float(n->uct_visits + 1)); while(current) { if(current->uct_visits > 0) { value = UCTK * sqrtf(logn / (current->uct_visits + 1)) + (current->uct_wins + 1) / float(current->uct_visits + 1); } else { value = FPU; } value -= (current->workers / 128.f); if(value > bvalue) { bvalue = value; bnode = current; } current = current->next; } return bnode; } __host__ void allocate(int N) { static const unsigned int mindex64[64] = { 63, 30, 3, 32, 59, 14, 11, 33, 60, 24, 50, 9, 55, 19, 21, 34, 61, 29, 2, 53, 51, 23, 41, 18, 56, 28, 1, 43, 46, 27, 0, 35, 62, 31, 58, 4, 5, 49, 54, 6, 15, 52, 12, 40, 7, 42, 45, 16, 25, 57, 48, 13, 10, 39, 8, 44, 20, 47, 38, 22, 17, 37, 36, 26 }; #ifdef GPU hipMalloc((void**) &hmem_,N * sizeof(Node)); hipMemcpyToSymbol(tsize,&N,sizeof(int)); hipMemcpyToSymbol(mem_,&hmem_,sizeof(Node*)); hipMemcpyToSymbol(index64,mindex64,sizeof(mindex64)); #else hmem_ = (Node*) malloc(N * sizeof(Node)); tsize = N; mem_ = hmem_; memcpy(index64,mindex64,sizeof(mindex64)); l_create(lock); #endif } __host__ void release() { #ifdef GPU hipFree(hmem_); #else free(hmem_); #endif } } // // playout // __global__ void playout(U32 N) { // // create blocks // #ifdef GPU { const int blockId = blockIdx.x; #else #pragma omp parallel num_threads(nBlocks) { const int blockId = omp_get_thread_num(); #endif // //shared data with in a block // __shared__ U32 cache[nThreads]; __shared__ BOARD sbw[nWarps]; __shared__ Node* nw[nWarps]; __shared__ bool finished; // // create threads and allocate a BOARD on register // #ifdef GPU { const int threadId = threadIdx.x; #else #pragma omp parallel num_threads(nThreads) { const int threadId = omp_get_thread_num(); print("Block %d : Thread %d of %d\n", blockId,threadId,nThreads); #endif BOARD b; BOARD& sb = sbw[threadId / WARP]; Node*& n = nw[threadId / WARP]; b.seed(blockId * nBlocks + threadId); finished = false; const int threadIdWarp = threadId & (WARP - 1); // //loop forever // while(true) { //get node if(threadIdWarp == 0) { n = TABLE::root_node; sb.copy(TABLE::root_board); while(n->child) { n = TABLE::UCT_select(n); sb.do_move(n->move); } if(n->uct_visits) { TABLE::create_children(&sb,n); Node* next = TABLE::UCT_select(n); if(next) { sb.do_move(next->move); n = next; } } l_add(n->workers,1); } //playout the position b.copy(sb); cache[threadId] = b.playout(sb); //update result if(threadIdWarp == 0) { l_sub(n->workers,1); U32 score = 0; for(int i = 0;i < WARP;i++) score += cache[threadId + i]; if(sb.player == 0) score = nLoop * WARP - score; Node* current = n; while(current) { l_lock(current->lock); current->uct_wins += score; current->uct_visits += nLoop * WARP; l_unlock(current->lock); score = nLoop * WARP - score; current = current->parent; } if(TABLE::root_node->uct_visits >= N) finished = true; } if(finished) break; } // // end of work // } } } // // GPU specific code // #ifdef GPU __host__ void simulate(BOARD* b,U32 N) { hipMemcpyToSymbol(TABLE::root_board,b, sizeof(BOARD),0,hipMemcpyHostToDevice); hipLaunchKernelGGL(( TABLE::reset) , dim3(1),dim3(1), 0, 0, ); hipLaunchKernelGGL(( playout) , dim3(nBlocks),dim3(nThreads), 0, 0, N); hipDeviceSynchronize(); hipLaunchKernelGGL(( TABLE::print_tree) , dim3(1),dim3(1), 0, 0, 1); cudaPrintfDisplay(); printf("Errors: %s\n", hipGetErrorString(hipPeekAtLastError())); } __host__ void init_device() { int count; hipDeviceProp_t prop; hipGetDeviceCount( &count ); for (int i=0; i< count; i++) { hipGetDeviceProperties( &prop, i ); printf( " --- General Information for device %d ---\n", i ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "Kernel execition timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " --- Memory Information for device %d ---\n", i ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", i ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); } printf("nBlocks=%d X nThreads=%d\n",nBlocks,nThreads); cudaPrintfInit(); TABLE::allocate(TT_SIZE); } __host__ void finalize_device() { cudaPrintfEnd(); TABLE::release(); } #else // // Cpu specific code // __host__ void simulate(BOARD* bo,U32 N) { TABLE::root_board = *bo; TABLE::reset(); playout(N); TABLE::print_tree(1); } __host__ void init_device() { omp_set_nested(1); omp_set_dynamic(0); TABLE::allocate(TT_SIZE); } __host__ void finalize_device() { TABLE::release(); } #endif // // Test // __host__ void print_bitboard(U64 b) { std::string s = ""; for(int i=7;i>=0;i--) { for(int z = 0; z < 7-i;z++) s += " "; for(int j=0;j<8;j++) { U64 m = (((U64)1) << (i * 8 + j)); if(b & m) s += "1 "; else s += "0 "; } s += "\n"; } printf("%s",s.c_str()); printf("\n"FMTU64"\n\n",b); } static const char *const commands_recognized[] = { "d", "go", "quit", "help", NULL }; int main() { init_device(); char str[64]; BOARD b; b.clear(); printf("\nType <help> for a list of commands.\n\n"); while(true) { printf("$: "); scanf("%s",&str); if(!strcmp(str,"d")) { print_bitboard(b.wpawns); print_bitboard(b.all); } else if(!strcmp(str,"help")) { size_t index = 0; while (commands_recognized[index]) { puts(commands_recognized[index]); index++; } } else if(!strcmp(str,"go")) { clock_t start,end; start = clock(); simulate(&b,128 * 28 * 128 * 100); end = clock(); printf("time %d\n",end - start); } else if(!strcmp(str,"quit")) { break; } else { int move; str_sq(move,str); b.do_move((U64(1) << move)); } } finalize_device(); } // // end //
870f6b9ff1a72870f87592ce8954d41e67d9207a.cu
#define GPU __CUDACC__ #include <string> #ifdef GPU # include <cuda.h> #else # include <omp.h> # include <math.h> # include <ctime> #endif // // parameters // #ifdef GPU # define nThreads 64 # define nBlocks 112 # define WARP 32 #else # define nThreads 1 # define nBlocks 1 # define WARP 1 #endif #define nLoop 16 #define nWarps (nThreads / WARP) #define TT_SIZE 4194304 #define UCTK 0.44f #define FPU 1.10f // // printf // #ifdef GPU # include "cuPrintf.cu" # define print(format, ...) cuPrintf(format, __VA_ARGS__) #else # define print(format, ...) printf(format, __VA_ARGS__) #endif // // locks // #ifdef GPU # define LOCK int # define l_create(x) ((x) = 0) # define l_trylock(x) (atomicExch(&(x),1)) # define l_lock(x) while(l_trylock(x) != 0); # define l_unlock(x) (atomicExch(&(x),0)) # define l_add(x,v) (atomicAdd(&x,v)) # define l_sub(x,v) (atomicSub(&x,v)) # define l_barrier() __syncthreads() #else # define LOCK omp_lock_t # define l_create(x) omp_init_lock(&x) # define l_trylock(x) omp_test_lock(&x) # define l_lock(x) omp_set_lock(&x) # define l_unlock(x) omp_unset_lock(&x) template <class T> inline void l_add(T x,T v) { #pragma omp atomic x+=v; } template <class T> inline void l_sub(T x,T v) { #pragma omp atomic x-=v; } inline void l_barrier() { #pragma omp barrier } #endif // // undef cuda specific code // #ifndef GPU # undef __host__ # undef __device__ # undef __global__ # undef __shared__ # undef __constant__ # define __host__ # define __device__ # define __global__ # define __shared__ # define __constant__ #if defined (__GNUC__) # define __align__(x) __attribute__ ((aligned(x))) #else # define __align__(x) __declspec(align(x)) #endif #endif // // types // #ifdef _MSC_VER typedef unsigned __int64 U64; typedef unsigned int U32; # define U64(x) (x##ui64) # define FMTU64 "0x%016I64x" #else # include <inttypes.h> typedef uint64_t U64; typedef uint32_t U32; # define U64(x) (x##ull) # define FMTU64 "0x%016llx" #endif // // define board game // typedef U64 MOVE; struct BOARD { U64 wpawns; U64 all; U32 randn; char player; char emptyc; U32 playout(const BOARD&); void make_random_move(); bool is_white_win(); __device__ __host__ void clear() { wpawns = 0; all = U64(0xffffffffffffffff); emptyc = 64; player = 0; } __host__ __device__ void copy(const BOARD& b) { wpawns = b.wpawns; all = b.all; player = b.player; emptyc = b.emptyc; } __device__ __host__ void do_move(const MOVE& move) { all ^= move; if(player == 0) wpawns ^= move; player ^= 1; emptyc--; } __device__ __host__ void seed(int sd) { randn = sd; } __device__ __host__ U32 rand() { randn *= 214013; randn += 2531011; return ((randn >> 16) & 0x7fff); } __device__ __host__ U64 rand64() { return((U64)rand()) ^ ((U64)rand() << 15) ^ ((U64)rand() << 30) ^ ((U64)rand() << 45) ^ ((U64)rand() << 60); } }; __device__ __host__ void BOARD::make_random_move() { U32 rbit = rand() % emptyc; U64 mbit = all; for(U32 i = 0;i < rbit;i++) mbit &= mbit - 1; mbit = mbit & -mbit; if(player == 0) wpawns ^= mbit; all ^= mbit; player ^= 1; emptyc--; } __device__ __host__ bool BOARD::is_white_win(){ U64 m = (wpawns & U64(0x00000000000000ff)),oldm; do { oldm = m; m |=((((m << 8) | (m >> 8)) | (((m << 9) | (m << 1)) & U64(0xfefefefefefefefe)) | (((m >> 9) | (m >> 1)) & U64(0x7f7f7f7f7f7f7f7f))) & wpawns ); if(m & U64(0xff00000000000000)) { return true; } } while(m != oldm); return false; } __device__ __host__ U32 BOARD::playout(const BOARD& b) { U32 wins = 0; for(U32 i = 0;i < nLoop;i++) { this->copy(b); while(emptyc > 0) make_random_move(); if(is_white_win()) wins++; } return wins; } __constant__ unsigned int index64[64]; __device__ __host__ unsigned int firstone(U64 bb) { unsigned int folded; bb ^= bb - 1; folded = (int) bb ^ (bb >> 32); return index64[folded * 0x78291ACF >> 26]; } // //sq to string and vice versa // #define file(x) ((x) & 7) #define rank(x) ((x) >> 3) #define SQ(x,y) (((x) << 3) + (y)) __device__ __host__ char* sq_str(const int& sq,char* s) { int f = file(sq); int r = rank(sq); *s++ = 'a' + (f); *s++ = '1' + (r); *s = 0; return s; } __host__ const char* str_sq(int& sq,const char* is) { const char* s = is; int f = tolower(*s++) - 'a'; int r = atoi(s++) - 1; sq = SQ(r,f); return s; } // // Node // struct Node { MOVE move; U32 uct_wins; U32 uct_visits; Node* parent; Node* child; Node* next; LOCK lock; int workers; __device__ __host__ void clear() { uct_wins = 0; uct_visits = 0; parent = 0; child = 0; next = 0; move = MOVE(); l_create(lock); workers = 0; } }; // // Table // namespace TABLE { __device__ Node* mem_; __device__ int tsize; __device__ BOARD root_board; __device__ Node* root_node; __device__ Node* head; __device__ int size; __device__ LOCK lock; Node* hmem_; __device__ Node* get_node() { if(size > 0) { l_lock(lock); if(size > 0) { size--; head++; head->clear(); } else head = 0; l_unlock(lock); return head; } else { return 0; } } __global__ void reset() { head = mem_; size = tsize; root_node = get_node(); } __global__ void print_tree(int depthLimit) { int depth = 0,max_depth = 0,average_depth = 0; int leaf_nodes = 0,total_nodes = 0; char str[4]; int sq; Node* current = root_node; while(current) { while(current) { while(current) { if(current->uct_visits && depth <= depthLimit) { for(int i = 0;i < depth;i++) print("\t"); sq = firstone(current->move); sq_str(sq,str); print("%d.%s %12d %12d %12.6f\n", depth,(const char*)str, current->uct_wins,current->uct_visits, float(current->uct_wins) / current->uct_visits ); } total_nodes++; if(current->child) { depth++; current = current->child; } else { if(depth > max_depth) max_depth = depth; average_depth += depth; leaf_nodes++; break; } } NEXT: if(current->next) { current = current->next; } else break; } if(current->parent) { depth--; current = current->parent; goto NEXT; } else { break; } } print("Total nodes : %d\n",total_nodes); print("Leaf nodes : %d\n",leaf_nodes); print("Maximum depth : %d\n",max_depth); print("Average depth : %.2f\n",average_depth / float(leaf_nodes)); } __device__ void create_children(BOARD* b,Node* n) { l_lock(n->lock); if(n->child) { l_unlock(n->lock); return; } Node* last = n; U64 m = b->all; U64 lsb; while(m) { lsb = m & -m; Node* node = get_node(); if(!node) break; node->move = lsb; node->parent = n; if(last == n) last->child = node; else last->next = node; last = node; m ^= lsb; } l_unlock(n->lock); } __device__ Node* UCT_select(Node* n) { Node* bnode = 0; Node* current = n->child; float bvalue = -1.f,value; float logn = logf(float(n->uct_visits + 1)); while(current) { if(current->uct_visits > 0) { value = UCTK * sqrtf(logn / (current->uct_visits + 1)) + (current->uct_wins + 1) / float(current->uct_visits + 1); } else { value = FPU; } value -= (current->workers / 128.f); if(value > bvalue) { bvalue = value; bnode = current; } current = current->next; } return bnode; } __host__ void allocate(int N) { static const unsigned int mindex64[64] = { 63, 30, 3, 32, 59, 14, 11, 33, 60, 24, 50, 9, 55, 19, 21, 34, 61, 29, 2, 53, 51, 23, 41, 18, 56, 28, 1, 43, 46, 27, 0, 35, 62, 31, 58, 4, 5, 49, 54, 6, 15, 52, 12, 40, 7, 42, 45, 16, 25, 57, 48, 13, 10, 39, 8, 44, 20, 47, 38, 22, 17, 37, 36, 26 }; #ifdef GPU cudaMalloc((void**) &hmem_,N * sizeof(Node)); cudaMemcpyToSymbol(tsize,&N,sizeof(int)); cudaMemcpyToSymbol(mem_,&hmem_,sizeof(Node*)); cudaMemcpyToSymbol(index64,mindex64,sizeof(mindex64)); #else hmem_ = (Node*) malloc(N * sizeof(Node)); tsize = N; mem_ = hmem_; memcpy(index64,mindex64,sizeof(mindex64)); l_create(lock); #endif } __host__ void release() { #ifdef GPU cudaFree(hmem_); #else free(hmem_); #endif } } // // playout // __global__ void playout(U32 N) { // // create blocks // #ifdef GPU { const int blockId = blockIdx.x; #else #pragma omp parallel num_threads(nBlocks) { const int blockId = omp_get_thread_num(); #endif // //shared data with in a block // __shared__ U32 cache[nThreads]; __shared__ BOARD sbw[nWarps]; __shared__ Node* nw[nWarps]; __shared__ bool finished; // // create threads and allocate a BOARD on register // #ifdef GPU { const int threadId = threadIdx.x; #else #pragma omp parallel num_threads(nThreads) { const int threadId = omp_get_thread_num(); print("Block %d : Thread %d of %d\n", blockId,threadId,nThreads); #endif BOARD b; BOARD& sb = sbw[threadId / WARP]; Node*& n = nw[threadId / WARP]; b.seed(blockId * nBlocks + threadId); finished = false; const int threadIdWarp = threadId & (WARP - 1); // //loop forever // while(true) { //get node if(threadIdWarp == 0) { n = TABLE::root_node; sb.copy(TABLE::root_board); while(n->child) { n = TABLE::UCT_select(n); sb.do_move(n->move); } if(n->uct_visits) { TABLE::create_children(&sb,n); Node* next = TABLE::UCT_select(n); if(next) { sb.do_move(next->move); n = next; } } l_add(n->workers,1); } //playout the position b.copy(sb); cache[threadId] = b.playout(sb); //update result if(threadIdWarp == 0) { l_sub(n->workers,1); U32 score = 0; for(int i = 0;i < WARP;i++) score += cache[threadId + i]; if(sb.player == 0) score = nLoop * WARP - score; Node* current = n; while(current) { l_lock(current->lock); current->uct_wins += score; current->uct_visits += nLoop * WARP; l_unlock(current->lock); score = nLoop * WARP - score; current = current->parent; } if(TABLE::root_node->uct_visits >= N) finished = true; } if(finished) break; } // // end of work // } } } // // GPU specific code // #ifdef GPU __host__ void simulate(BOARD* b,U32 N) { cudaMemcpyToSymbol(TABLE::root_board,b, sizeof(BOARD),0,cudaMemcpyHostToDevice); TABLE::reset <<<1,1>>> (); playout <<<nBlocks,nThreads>>> (N); cudaThreadSynchronize(); TABLE::print_tree <<<1,1>>> (1); cudaPrintfDisplay(); printf("Errors: %s\n", cudaGetErrorString(cudaPeekAtLastError())); } __host__ void init_device() { int count; cudaDeviceProp prop; cudaGetDeviceCount( &count ); for (int i=0; i< count; i++) { cudaGetDeviceProperties( &prop, i ); printf( " --- General Information for device %d ---\n", i ); printf( "Name: %s\n", prop.name ); printf( "Compute capability: %d.%d\n", prop.major, prop.minor ); printf( "Clock rate: %d\n", prop.clockRate ); printf( "Device copy overlap: " ); if (prop.deviceOverlap) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( "Kernel execition timeout : " ); if (prop.kernelExecTimeoutEnabled) printf( "Enabled\n" ); else printf( "Disabled\n" ); printf( " --- Memory Information for device %d ---\n", i ); printf( "Total global mem: %ld\n", prop.totalGlobalMem ); printf( "Total constant Mem: %ld\n", prop.totalConstMem ); printf( "Max mem pitch: %ld\n", prop.memPitch ); printf( "Texture Alignment: %ld\n", prop.textureAlignment ); printf( " --- MP Information for device %d ---\n", i ); printf( "Multiprocessor count: %d\n", prop.multiProcessorCount ); printf( "Shared mem per mp: %ld\n", prop.sharedMemPerBlock ); printf( "Registers per mp: %d\n", prop.regsPerBlock ); printf( "Threads in warp: %d\n", prop.warpSize ); printf( "Max threads per block: %d\n", prop.maxThreadsPerBlock ); printf( "Max thread dimensions: (%d, %d, %d)\n", prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2] ); printf( "Max grid dimensions: (%d, %d, %d)\n", prop.maxGridSize[0], prop.maxGridSize[1], prop.maxGridSize[2] ); printf( "\n" ); } printf("nBlocks=%d X nThreads=%d\n",nBlocks,nThreads); cudaPrintfInit(); TABLE::allocate(TT_SIZE); } __host__ void finalize_device() { cudaPrintfEnd(); TABLE::release(); } #else // // Cpu specific code // __host__ void simulate(BOARD* bo,U32 N) { TABLE::root_board = *bo; TABLE::reset(); playout(N); TABLE::print_tree(1); } __host__ void init_device() { omp_set_nested(1); omp_set_dynamic(0); TABLE::allocate(TT_SIZE); } __host__ void finalize_device() { TABLE::release(); } #endif // // Test // __host__ void print_bitboard(U64 b) { std::string s = ""; for(int i=7;i>=0;i--) { for(int z = 0; z < 7-i;z++) s += " "; for(int j=0;j<8;j++) { U64 m = (((U64)1) << (i * 8 + j)); if(b & m) s += "1 "; else s += "0 "; } s += "\n"; } printf("%s",s.c_str()); printf("\n"FMTU64"\n\n",b); } static const char *const commands_recognized[] = { "d", "go", "quit", "help", NULL }; int main() { init_device(); char str[64]; BOARD b; b.clear(); printf("\nType <help> for a list of commands.\n\n"); while(true) { printf("$: "); scanf("%s",&str); if(!strcmp(str,"d")) { print_bitboard(b.wpawns); print_bitboard(b.all); } else if(!strcmp(str,"help")) { size_t index = 0; while (commands_recognized[index]) { puts(commands_recognized[index]); index++; } } else if(!strcmp(str,"go")) { clock_t start,end; start = clock(); simulate(&b,128 * 28 * 128 * 100); end = clock(); printf("time %d\n",end - start); } else if(!strcmp(str,"quit")) { break; } else { int move; str_sq(move,str); b.do_move((U64(1) << move)); } } finalize_device(); } // // end //
2906d9c93077f65c53bb8417dba28afb530fffae.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "cuda_utils.h" #include "batch_knn.h" // input: query(b, n, c), reference(b, m, c) // output: idxs(b, n, npoints), dists(b, n, npoints) __global__ void batch_knn_kernel(int b, int n, int c, int m, int npoints, const float *__restrict__ query, const float *__restrict__ reference, int *__restrict__ idxs, float *__restrict__ dists, float *__restrict__ temp) { const int batch_index = blockIdx.x; query += batch_index * n * c; reference += batch_index * m * c; idxs += batch_index * n * npoints; dists += batch_index * n * npoints; temp += batch_index * n * m; for (int i = threadIdx.x; i < n; i += blockDim.x) { const float *q = query + i * c; float *d = temp + i * m; for (int j = 0; j < m; ++j) { const float *r = reference + j * 3; d[j] = 0.; for (int k = 0; k < c; ++k) { d[j] += (q[k] - r[k]) * (q[k] - r[k]); } } for (int k = 0; k < npoints; ++k) { int besti = -1; float best_dist = 1e5; for (int j = 0; j < m; ++j) { if(d[j] < best_dist) { besti = j; best_dist = d[j]; } } d[besti] = 1e6; idxs[i * npoints + k] = besti; dists[i * npoints + k] = best_dist; } } } void batch_knn_kernel_wrapper(int b, int n, int c, int m, int npoints, const float *query, const float *reference, int *idxs, float *dists, float *temp) { hipError_t err; hipLaunchKernelGGL(( batch_knn_kernel), dim3(b), dim3(opt_n_threads(n)), 0, 0, b, n, c, m, npoints, query, reference, idxs, dists, temp); err = hipGetLastError(); if (hipSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", hipGetErrorString(err)); exit(-1); } }
2906d9c93077f65c53bb8417dba28afb530fffae.cu
#include "cuda_utils.h" #include "batch_knn.h" // input: query(b, n, c), reference(b, m, c) // output: idxs(b, n, npoints), dists(b, n, npoints) __global__ void batch_knn_kernel(int b, int n, int c, int m, int npoints, const float *__restrict__ query, const float *__restrict__ reference, int *__restrict__ idxs, float *__restrict__ dists, float *__restrict__ temp) { const int batch_index = blockIdx.x; query += batch_index * n * c; reference += batch_index * m * c; idxs += batch_index * n * npoints; dists += batch_index * n * npoints; temp += batch_index * n * m; for (int i = threadIdx.x; i < n; i += blockDim.x) { const float *q = query + i * c; float *d = temp + i * m; for (int j = 0; j < m; ++j) { const float *r = reference + j * 3; d[j] = 0.; for (int k = 0; k < c; ++k) { d[j] += (q[k] - r[k]) * (q[k] - r[k]); } } for (int k = 0; k < npoints; ++k) { int besti = -1; float best_dist = 1e5; for (int j = 0; j < m; ++j) { if(d[j] < best_dist) { besti = j; best_dist = d[j]; } } d[besti] = 1e6; idxs[i * npoints + k] = besti; dists[i * npoints + k] = best_dist; } } } void batch_knn_kernel_wrapper(int b, int n, int c, int m, int npoints, const float *query, const float *reference, int *idxs, float *dists, float *temp) { cudaError_t err; batch_knn_kernel<<<b, opt_n_threads(n)>>>(b, n, c, m, npoints, query, reference, idxs, dists, temp); err = cudaGetLastError(); if (cudaSuccess != err) { fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); exit(-1); } }
c303e1c7342345ce37aa53be8f2f94024cdebf4f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/sequence.h> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/gpu/graph_reindex_funcs.h" #include "paddle/phi/kernels/graph_reindex_kernel.h" namespace phi { constexpr int WARP_SIZE = 32; template <typename T, typename Context> void FillHashTable(const Context& dev_ctx, const T* input, int num_input, int64_t len_hashtable, thrust::device_vector<T>* unique_items, T* keys, int* values, int* key_index) { #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int grid_tmp = (num_input + block - 1) / block; int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; // Insert data into keys and values. hipLaunchKernelGGL(( BuildHashTable<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), input, num_input, len_hashtable, keys, key_index); // Get item index count. thrust::device_vector<int> item_count(num_input + 1, 0); hipLaunchKernelGGL(( GetItemIndexCount<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), input, thrust::raw_pointer_cast(item_count.data()), num_input, len_hashtable, keys, key_index); thrust::exclusive_scan( item_count.begin(), item_count.end(), item_count.begin()); size_t total_unique_items = item_count[num_input]; unique_items->resize(total_unique_items); // Get unique items hipLaunchKernelGGL(( FillUniqueItems<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), input, num_input, len_hashtable, thrust::raw_pointer_cast(unique_items->data()), thrust::raw_pointer_cast(item_count.data()), keys, values, key_index); } template <typename T, typename Context> void FillBufferHashTable(const Context& dev_ctx, const T* input, int num_input, thrust::device_vector<T>* unique_items, int* values, int* key_index) { #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int grid_tmp = (num_input + block - 1) / block; int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; // Insert data. hipLaunchKernelGGL(( BuildHashTable<T>) , dim3(grid), dim3(block), 0, dev_ctx.stream(), input, num_input, key_index); // Get item index count. thrust::device_vector<int> item_count(num_input + 1, 0); hipLaunchKernelGGL(( GetItemIndexCount<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), input, thrust::raw_pointer_cast(item_count.data()), num_input, key_index); thrust::exclusive_scan( item_count.begin(), item_count.end(), item_count.begin()); size_t total_unique_items = item_count[num_input]; unique_items->resize(total_unique_items); // Get unique items hipLaunchKernelGGL(( FillUniqueItems<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), input, num_input, thrust::raw_pointer_cast(unique_items->data()), thrust::raw_pointer_cast(item_count.data()), values, key_index); } template <typename T, typename Context> void ResetBufferHashTable(const Context& dev_ctx, const T* input, int num_input, thrust::device_vector<T>* unique_items, int* values, int* key_index) { #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int grid_tmp = (unique_items->size() + block - 1) / block; int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; hipLaunchKernelGGL(( ResetHashTable<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), thrust::raw_pointer_cast(unique_items->data()), unique_items->size(), key_index, values); } template <typename T, typename Context> void Reindex(const Context& dev_ctx, const T* inputs, thrust::device_ptr<T> src_outputs, thrust::device_vector<T>* out_nodes, int num_inputs, int num_edges) { out_nodes->resize(num_inputs + num_edges); thrust::copy(inputs, inputs + num_inputs, out_nodes->begin()); thrust::copy( src_outputs, src_outputs + num_edges, out_nodes->begin() + num_inputs); thrust::device_vector<T> unique_nodes; unique_nodes.clear(); // Fill hash table int64_t num = out_nodes->size(); int64_t log_num = 1 << static_cast<size_t>(1 + std::log2(num >> 1)); int64_t table_size = log_num << 1; T* keys; int *values, *key_index; #ifdef PADDLE_WITH_HIP hipMalloc(&keys, table_size * sizeof(T)); hipMalloc(&values, table_size * sizeof(int)); hipMalloc(&key_index, table_size * sizeof(int)); hipMemset(keys, -1, table_size * sizeof(T)); hipMemset(values, -1, table_size * sizeof(int)); hipMemset(key_index, -1, table_size * sizeof(int)); #else hipMalloc(&keys, table_size * sizeof(T)); hipMalloc(&values, table_size * sizeof(int)); hipMalloc(&key_index, table_size * sizeof(int)); hipMemset(keys, -1, table_size * sizeof(T)); hipMemset(values, -1, table_size * sizeof(int)); hipMemset(key_index, -1, table_size * sizeof(int)); #endif FillHashTable<T, Context>(dev_ctx, thrust::raw_pointer_cast(out_nodes->data()), out_nodes->size(), table_size, &unique_nodes, keys, values, key_index); out_nodes->resize(unique_nodes.size()); thrust::copy(unique_nodes.begin(), unique_nodes.end(), out_nodes->begin()); // Fill outputs with reindex result. #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int grid_tmp = (num_edges + block - 1) / block; int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; hipLaunchKernelGGL(( ReindexSrcOutput<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), thrust::raw_pointer_cast(src_outputs), num_edges, table_size, keys, values); #ifdef PADDLE_WITH_HIP hipFree(keys); hipFree(values); hipFree(key_index); #else hipFree(keys); hipFree(values); hipFree(key_index); #endif } template <typename T, typename Context> void BufferReindex(const Context& dev_ctx, const T* inputs, thrust::device_ptr<T> src_outputs, thrust::device_vector<T>* out_nodes, int num_inputs, int* hashtable_value, int* hashtable_index, int num_edges) { out_nodes->resize(num_inputs + num_edges); thrust::copy(inputs, inputs + num_inputs, out_nodes->begin()); thrust::copy( src_outputs, src_outputs + num_edges, out_nodes->begin() + num_inputs); thrust::device_vector<T> unique_nodes; unique_nodes.clear(); // Fill hash table FillBufferHashTable<T, Context>(dev_ctx, thrust::raw_pointer_cast(out_nodes->data()), out_nodes->size(), &unique_nodes, hashtable_value, hashtable_index); out_nodes->resize(unique_nodes.size()); thrust::copy(unique_nodes.begin(), unique_nodes.end(), out_nodes->begin()); // Fill outputs with reindex result. #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int grid_tmp = (num_edges + block - 1) / block; int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; hipLaunchKernelGGL(( ReindexSrcOutput<T>), dim3(grid), dim3(block), 0, dev_ctx.stream(), thrust::raw_pointer_cast(src_outputs), num_edges, hashtable_value); ResetBufferHashTable<T, Context>(dev_ctx, thrust::raw_pointer_cast(out_nodes->data()), out_nodes->size(), &unique_nodes, hashtable_value, hashtable_index); } template <typename T, int BLOCK_WARPS, int TILE_SIZE> __global__ void GetDstEdgeCUDAKernel(const int64_t num_rows, const int* in_rows, const int* dst_counts, const int* dst_ptr, T* dst_outputs) { assert(blockDim.x == WARP_SIZE); assert(blockDim.y == BLOCK_WARPS); int64_t out_row = blockIdx.x * TILE_SIZE + threadIdx.y; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); while (out_row < last_row) { const int row = in_rows[out_row]; const int dst_sample_size = dst_counts[out_row]; const int out_row_start = dst_ptr[out_row]; for (int idx = threadIdx.x; idx < dst_sample_size; idx += WARP_SIZE) { dst_outputs[out_row_start + idx] = row; } out_row += BLOCK_WARPS; } } template <typename T, typename Context> void GraphReindexKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& neighbors, const DenseTensor& count, const paddle::optional<DenseTensor>& hashtable_value, const paddle::optional<DenseTensor>& hashtable_index, bool flag_buffer_hashtable, DenseTensor* reindex_src, DenseTensor* reindex_dst, DenseTensor* out_nodes) { const T* x_data = x.data<T>(); const T* neighbors_data = neighbors.data<T>(); const int* count_data = count.data<int>(); const int bs = x.dims()[0]; const int num_edges = neighbors.dims()[0]; reindex_src->Resize({num_edges}); T* reindex_src_data = dev_ctx.template Alloc<T>(reindex_src); thrust::device_ptr<T> src_outputs(reindex_src_data); thrust::device_vector<T> unique_nodes; thrust::copy(neighbors_data, neighbors_data + num_edges, src_outputs); if (flag_buffer_hashtable) { // Here we directly use buffer tensor to act as a hash table. DenseTensor hashtable_value_out(hashtable_value->type()); const auto* ph_value = hashtable_value.get_ptr(); hashtable_value_out.ShareDataWith(*ph_value); DenseTensor hashtable_index_out(hashtable_index->type()); const auto* ph_index = hashtable_index.get_ptr(); hashtable_index_out.ShareDataWith(*ph_index); int* hashtable_value_data = hashtable_value_out.mutable_data<int>(dev_ctx.GetPlace()); int* hashtable_index_data = hashtable_index_out.mutable_data<int>(dev_ctx.GetPlace()); BufferReindex<T, Context>(dev_ctx, x_data, src_outputs, &unique_nodes, bs, hashtable_value_data, hashtable_index_data, num_edges); } else { Reindex<T, Context>( dev_ctx, x_data, src_outputs, &unique_nodes, bs, num_edges); } // Get reindex dst edge. // Add support for multi-type edges reindex. int num_ac_count = count.dims()[0]; int num_edge_types = num_ac_count / bs; thrust::device_vector<int> unique_dst_reindex(bs); thrust::sequence(unique_dst_reindex.begin(), unique_dst_reindex.end()); constexpr int BLOCK_WARPS = 128 / WARP_SIZE; constexpr int TILE_SIZE = BLOCK_WARPS * 16; const dim3 block(WARP_SIZE, BLOCK_WARPS); const dim3 grid((bs + TILE_SIZE - 1) / TILE_SIZE); reindex_dst->Resize({num_edges}); T* reindex_dst_data = dev_ctx.template Alloc<T>(reindex_dst); int begin = 0; for (int i = 0; i < num_edge_types; i++) { thrust::device_vector<int> dst_ptr(bs); thrust::exclusive_scan( count_data + i * bs, count_data + (i + 1) * bs, dst_ptr.begin()); hipLaunchKernelGGL(( GetDstEdgeCUDAKernel<T, BLOCK_WARPS, TILE_SIZE>) , dim3(grid), dim3(block), 0, dev_ctx.stream(), bs, thrust::raw_pointer_cast(unique_dst_reindex.data()), count_data + i * bs, thrust::raw_pointer_cast(dst_ptr.data()), reindex_dst_data + begin); int count_i = thrust::reduce(thrust::device_pointer_cast(count_data) + i * bs, thrust::device_pointer_cast(count_data) + (i + 1) * bs); begin += count_i; } out_nodes->Resize({static_cast<int>(unique_nodes.size())}); T* out_nodes_data = dev_ctx.template Alloc<T>(out_nodes); thrust::copy(unique_nodes.begin(), unique_nodes.end(), out_nodes_data); } } // namespace phi PD_REGISTER_KERNEL( graph_reindex, GPU, ALL_LAYOUT, phi::GraphReindexKernel, int, int64_t) {}
c303e1c7342345ce37aa53be8f2f94024cdebf4f.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/reduce.h> #include <thrust/scan.h> #include <thrust/sequence.h> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/kernels/gpu/graph_reindex_funcs.h" #include "paddle/phi/kernels/graph_reindex_kernel.h" namespace phi { constexpr int WARP_SIZE = 32; template <typename T, typename Context> void FillHashTable(const Context& dev_ctx, const T* input, int num_input, int64_t len_hashtable, thrust::device_vector<T>* unique_items, T* keys, int* values, int* key_index) { #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int grid_tmp = (num_input + block - 1) / block; int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; // Insert data into keys and values. BuildHashTable<T><<<grid, block, 0, dev_ctx.stream()>>>( input, num_input, len_hashtable, keys, key_index); // Get item index count. thrust::device_vector<int> item_count(num_input + 1, 0); GetItemIndexCount<T><<<grid, block, 0, dev_ctx.stream()>>>( input, thrust::raw_pointer_cast(item_count.data()), num_input, len_hashtable, keys, key_index); thrust::exclusive_scan( item_count.begin(), item_count.end(), item_count.begin()); size_t total_unique_items = item_count[num_input]; unique_items->resize(total_unique_items); // Get unique items FillUniqueItems<T><<<grid, block, 0, dev_ctx.stream()>>>( input, num_input, len_hashtable, thrust::raw_pointer_cast(unique_items->data()), thrust::raw_pointer_cast(item_count.data()), keys, values, key_index); } template <typename T, typename Context> void FillBufferHashTable(const Context& dev_ctx, const T* input, int num_input, thrust::device_vector<T>* unique_items, int* values, int* key_index) { #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int grid_tmp = (num_input + block - 1) / block; int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; // Insert data. BuildHashTable<T> <<<grid, block, 0, dev_ctx.stream()>>>(input, num_input, key_index); // Get item index count. thrust::device_vector<int> item_count(num_input + 1, 0); GetItemIndexCount<T><<<grid, block, 0, dev_ctx.stream()>>>( input, thrust::raw_pointer_cast(item_count.data()), num_input, key_index); thrust::exclusive_scan( item_count.begin(), item_count.end(), item_count.begin()); size_t total_unique_items = item_count[num_input]; unique_items->resize(total_unique_items); // Get unique items FillUniqueItems<T><<<grid, block, 0, dev_ctx.stream()>>>( input, num_input, thrust::raw_pointer_cast(unique_items->data()), thrust::raw_pointer_cast(item_count.data()), values, key_index); } template <typename T, typename Context> void ResetBufferHashTable(const Context& dev_ctx, const T* input, int num_input, thrust::device_vector<T>* unique_items, int* values, int* key_index) { #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int grid_tmp = (unique_items->size() + block - 1) / block; int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; ResetHashTable<T><<<grid, block, 0, dev_ctx.stream()>>>( thrust::raw_pointer_cast(unique_items->data()), unique_items->size(), key_index, values); } template <typename T, typename Context> void Reindex(const Context& dev_ctx, const T* inputs, thrust::device_ptr<T> src_outputs, thrust::device_vector<T>* out_nodes, int num_inputs, int num_edges) { out_nodes->resize(num_inputs + num_edges); thrust::copy(inputs, inputs + num_inputs, out_nodes->begin()); thrust::copy( src_outputs, src_outputs + num_edges, out_nodes->begin() + num_inputs); thrust::device_vector<T> unique_nodes; unique_nodes.clear(); // Fill hash table int64_t num = out_nodes->size(); int64_t log_num = 1 << static_cast<size_t>(1 + std::log2(num >> 1)); int64_t table_size = log_num << 1; T* keys; int *values, *key_index; #ifdef PADDLE_WITH_HIP hipMalloc(&keys, table_size * sizeof(T)); hipMalloc(&values, table_size * sizeof(int)); hipMalloc(&key_index, table_size * sizeof(int)); hipMemset(keys, -1, table_size * sizeof(T)); hipMemset(values, -1, table_size * sizeof(int)); hipMemset(key_index, -1, table_size * sizeof(int)); #else cudaMalloc(&keys, table_size * sizeof(T)); cudaMalloc(&values, table_size * sizeof(int)); cudaMalloc(&key_index, table_size * sizeof(int)); cudaMemset(keys, -1, table_size * sizeof(T)); cudaMemset(values, -1, table_size * sizeof(int)); cudaMemset(key_index, -1, table_size * sizeof(int)); #endif FillHashTable<T, Context>(dev_ctx, thrust::raw_pointer_cast(out_nodes->data()), out_nodes->size(), table_size, &unique_nodes, keys, values, key_index); out_nodes->resize(unique_nodes.size()); thrust::copy(unique_nodes.begin(), unique_nodes.end(), out_nodes->begin()); // Fill outputs with reindex result. #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int grid_tmp = (num_edges + block - 1) / block; int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; ReindexSrcOutput<T><<<grid, block, 0, dev_ctx.stream()>>>( thrust::raw_pointer_cast(src_outputs), num_edges, table_size, keys, values); #ifdef PADDLE_WITH_HIP hipFree(keys); hipFree(values); hipFree(key_index); #else cudaFree(keys); cudaFree(values); cudaFree(key_index); #endif } template <typename T, typename Context> void BufferReindex(const Context& dev_ctx, const T* inputs, thrust::device_ptr<T> src_outputs, thrust::device_vector<T>* out_nodes, int num_inputs, int* hashtable_value, int* hashtable_index, int num_edges) { out_nodes->resize(num_inputs + num_edges); thrust::copy(inputs, inputs + num_inputs, out_nodes->begin()); thrust::copy( src_outputs, src_outputs + num_edges, out_nodes->begin() + num_inputs); thrust::device_vector<T> unique_nodes; unique_nodes.clear(); // Fill hash table FillBufferHashTable<T, Context>(dev_ctx, thrust::raw_pointer_cast(out_nodes->data()), out_nodes->size(), &unique_nodes, hashtable_value, hashtable_index); out_nodes->resize(unique_nodes.size()); thrust::copy(unique_nodes.begin(), unique_nodes.end(), out_nodes->begin()); // Fill outputs with reindex result. #ifdef PADDLE_WITH_HIP int block = 256; #else int block = 1024; #endif int max_grid_dimx = dev_ctx.GetCUDAMaxGridDimSize()[0]; int grid_tmp = (num_edges + block - 1) / block; int grid = grid_tmp < max_grid_dimx ? grid_tmp : max_grid_dimx; ReindexSrcOutput<T><<<grid, block, 0, dev_ctx.stream()>>>( thrust::raw_pointer_cast(src_outputs), num_edges, hashtable_value); ResetBufferHashTable<T, Context>(dev_ctx, thrust::raw_pointer_cast(out_nodes->data()), out_nodes->size(), &unique_nodes, hashtable_value, hashtable_index); } template <typename T, int BLOCK_WARPS, int TILE_SIZE> __global__ void GetDstEdgeCUDAKernel(const int64_t num_rows, const int* in_rows, const int* dst_counts, const int* dst_ptr, T* dst_outputs) { assert(blockDim.x == WARP_SIZE); assert(blockDim.y == BLOCK_WARPS); int64_t out_row = blockIdx.x * TILE_SIZE + threadIdx.y; const int64_t last_row = min(static_cast<int64_t>(blockIdx.x + 1) * TILE_SIZE, num_rows); while (out_row < last_row) { const int row = in_rows[out_row]; const int dst_sample_size = dst_counts[out_row]; const int out_row_start = dst_ptr[out_row]; for (int idx = threadIdx.x; idx < dst_sample_size; idx += WARP_SIZE) { dst_outputs[out_row_start + idx] = row; } out_row += BLOCK_WARPS; } } template <typename T, typename Context> void GraphReindexKernel(const Context& dev_ctx, const DenseTensor& x, const DenseTensor& neighbors, const DenseTensor& count, const paddle::optional<DenseTensor>& hashtable_value, const paddle::optional<DenseTensor>& hashtable_index, bool flag_buffer_hashtable, DenseTensor* reindex_src, DenseTensor* reindex_dst, DenseTensor* out_nodes) { const T* x_data = x.data<T>(); const T* neighbors_data = neighbors.data<T>(); const int* count_data = count.data<int>(); const int bs = x.dims()[0]; const int num_edges = neighbors.dims()[0]; reindex_src->Resize({num_edges}); T* reindex_src_data = dev_ctx.template Alloc<T>(reindex_src); thrust::device_ptr<T> src_outputs(reindex_src_data); thrust::device_vector<T> unique_nodes; thrust::copy(neighbors_data, neighbors_data + num_edges, src_outputs); if (flag_buffer_hashtable) { // Here we directly use buffer tensor to act as a hash table. DenseTensor hashtable_value_out(hashtable_value->type()); const auto* ph_value = hashtable_value.get_ptr(); hashtable_value_out.ShareDataWith(*ph_value); DenseTensor hashtable_index_out(hashtable_index->type()); const auto* ph_index = hashtable_index.get_ptr(); hashtable_index_out.ShareDataWith(*ph_index); int* hashtable_value_data = hashtable_value_out.mutable_data<int>(dev_ctx.GetPlace()); int* hashtable_index_data = hashtable_index_out.mutable_data<int>(dev_ctx.GetPlace()); BufferReindex<T, Context>(dev_ctx, x_data, src_outputs, &unique_nodes, bs, hashtable_value_data, hashtable_index_data, num_edges); } else { Reindex<T, Context>( dev_ctx, x_data, src_outputs, &unique_nodes, bs, num_edges); } // Get reindex dst edge. // Add support for multi-type edges reindex. int num_ac_count = count.dims()[0]; int num_edge_types = num_ac_count / bs; thrust::device_vector<int> unique_dst_reindex(bs); thrust::sequence(unique_dst_reindex.begin(), unique_dst_reindex.end()); constexpr int BLOCK_WARPS = 128 / WARP_SIZE; constexpr int TILE_SIZE = BLOCK_WARPS * 16; const dim3 block(WARP_SIZE, BLOCK_WARPS); const dim3 grid((bs + TILE_SIZE - 1) / TILE_SIZE); reindex_dst->Resize({num_edges}); T* reindex_dst_data = dev_ctx.template Alloc<T>(reindex_dst); int begin = 0; for (int i = 0; i < num_edge_types; i++) { thrust::device_vector<int> dst_ptr(bs); thrust::exclusive_scan( count_data + i * bs, count_data + (i + 1) * bs, dst_ptr.begin()); GetDstEdgeCUDAKernel<T, BLOCK_WARPS, TILE_SIZE> <<<grid, block, 0, dev_ctx.stream()>>>( bs, thrust::raw_pointer_cast(unique_dst_reindex.data()), count_data + i * bs, thrust::raw_pointer_cast(dst_ptr.data()), reindex_dst_data + begin); int count_i = thrust::reduce(thrust::device_pointer_cast(count_data) + i * bs, thrust::device_pointer_cast(count_data) + (i + 1) * bs); begin += count_i; } out_nodes->Resize({static_cast<int>(unique_nodes.size())}); T* out_nodes_data = dev_ctx.template Alloc<T>(out_nodes); thrust::copy(unique_nodes.begin(), unique_nodes.end(), out_nodes_data); } } // namespace phi PD_REGISTER_KERNEL( graph_reindex, GPU, ALL_LAYOUT, phi::GraphReindexKernel, int, int64_t) {}
ed366d669c6fc676b1b0931ec49ec365e16d6d4f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> c d s @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 #define COMPLEX // dot product for multiple vectors __global__ void magma_zmdotc1_kernel_1( int Gs, int n, magmaDoubleComplex * v0, magmaDoubleComplex * w0, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; // 1 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_Z_ZERO; __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // block reduction for 1 vectors __global__ void magma_zmdotc1_kernel_2( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx] = MAGMA_Z_ZERO; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + (blockSize) < Gs ) ? vtmp[ i + (blockSize) ] : MAGMA_Z_ZERO; i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } /** Purpose ------- Computes the scalar product of a set of 1 vectors such that skp[0] = [ <v_0,w_0> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDoubleComplex_ptr input vector @param[in] w0 magmaDoubleComplex_ptr input vector @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[4] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_zmdotc1( magma_int_t n, magmaDoubleComplex_ptr v0, magmaDoubleComplex_ptr w0, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = (local_block_size) * sizeof( magmaDoubleComplex ); // 1 skp magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_zmdotc1_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v0, w0, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_zmdotc1_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_zgetvector( 1 , aux1, 1, skp, 1, queue ); return MAGMA_SUCCESS; } // 2 dot products // // initialize arrays with zero __global__ void magma_zmdotc2_gpumemzero( magmaDoubleComplex * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 2; j++) d[ i+j*n ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_zmdotc2_kernel_1( int Gs, int n, magmaDoubleComplex * v0, magmaDoubleComplex * w0, magmaDoubleComplex * v1, magmaDoubleComplex * w1, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 2 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_Z_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_Z_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 2 vectors __global__ void magma_zmdotc2_kernel_2( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_Z_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_Z_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 2 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDoubleComplex_ptr input vector @param[in] w0 magmaDoubleComplex_ptr input vector @param[in] v1 magmaDoubleComplex_ptr input vector @param[in] w1 magmaDoubleComplex_ptr input vector @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[3] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_zmdotc2( magma_int_t n, magmaDoubleComplex_ptr v0, magmaDoubleComplex_ptr w0, magmaDoubleComplex_ptr v1, magmaDoubleComplex_ptr w1, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2 * (local_block_size) * sizeof( magmaDoubleComplex ); // 4 skp magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_zmdotc2_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v0, w0, v1, w1, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_zmdotc2_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_zgetvector( 2 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; } // 3 dot products // // initialize arrays with zero __global__ void magma_zmdotc3_gpumemzero( magmaDoubleComplex * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 3; j++) d[ i+j*n ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_zmdotc3_kernel_1( int Gs, int n, magmaDoubleComplex * v0, magmaDoubleComplex * w0, magmaDoubleComplex * v1, magmaDoubleComplex * w1, magmaDoubleComplex * v2, magmaDoubleComplex * w2, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 3 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_Z_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_Z_ZERO; temp[ Idx + 2*blockDim.x ] = ( i < n ) ? v2[ i ] * w2[ i ] : MAGMA_Z_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<3; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<3; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<3; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<3; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 3 vectors __global__ void magma_zmdotc3_kernel_2( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<3; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_Z_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_Z_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<3; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<3; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<3; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 4 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDoubleComplex_ptr input vector @param[in] w0 magmaDoubleComplex_ptr input vector @param[in] v1 magmaDoubleComplex_ptr input vector @param[in] w1 magmaDoubleComplex_ptr input vector @param[in] v2 magmaDoubleComplex_ptr input vector @param[in] w2 magmaDoubleComplex_ptr input vector @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[3] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_zmdotc3( magma_int_t n, magmaDoubleComplex_ptr v0, magmaDoubleComplex_ptr w0, magmaDoubleComplex_ptr v1, magmaDoubleComplex_ptr w1, magmaDoubleComplex_ptr v2, magmaDoubleComplex_ptr w2, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 3 * (local_block_size) * sizeof( magmaDoubleComplex ); // 4 skp magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; // magma_zmdotc3_gpumemzero<<< Gs, Bs, 0, queue->cuda_stream() >>>( d1, n ); hipLaunchKernelGGL(( magma_zmdotc3_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v0, w0, v1, w1, v2, w2, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_zmdotc3_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_zgetvector( 3 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; } // 4 dot products // // initialize arrays with zero __global__ void magma_zmdotc4_gpumemzero( magmaDoubleComplex * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 4; j++) d[ i+j*n ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_zmdotc4_kernel_1( int Gs, int n, magmaDoubleComplex * v0, magmaDoubleComplex * w0, magmaDoubleComplex * v1, magmaDoubleComplex * w1, magmaDoubleComplex * v2, magmaDoubleComplex * w2, magmaDoubleComplex * v3, magmaDoubleComplex * w3, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 4 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_Z_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_Z_ZERO; temp[ Idx + 2*blockDim.x ] = ( i < n ) ? v2[ i ] * w2[ i ] : MAGMA_Z_ZERO; temp[ Idx + 3*blockDim.x ] = ( i < n ) ? v3[ i ] * w3[ i ] : MAGMA_Z_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<4; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<4; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<4; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<4; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 4 vectors __global__ void magma_zmdotc4_kernel_2( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<4; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_Z_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_Z_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<4; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<4; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<4; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 4 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDoubleComplex_ptr input vector @param[in] w0 magmaDoubleComplex_ptr input vector @param[in] v1 magmaDoubleComplex_ptr input vector @param[in] w1 magmaDoubleComplex_ptr input vector @param[in] v2 magmaDoubleComplex_ptr input vector @param[in] w2 magmaDoubleComplex_ptr input vector @param[in] v3 magmaDoubleComplex_ptr input vector @param[in] w3 magmaDoubleComplex_ptr input vector @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[4] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmdotc4( magma_int_t n, magmaDoubleComplex_ptr v0, magmaDoubleComplex_ptr w0, magmaDoubleComplex_ptr v1, magmaDoubleComplex_ptr w1, magmaDoubleComplex_ptr v2, magmaDoubleComplex_ptr w2, magmaDoubleComplex_ptr v3, magmaDoubleComplex_ptr w3, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4 * (local_block_size) * sizeof( magmaDoubleComplex ); // 4 skp magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; hipLaunchKernelGGL(( magma_zmdotc4_kernel_1), dim3(Gs), dim3(Bs), Ms, queue->cuda_stream() , Gs.x, n, v0, w0, v1, w1, v2, w2, v3, w3, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; hipLaunchKernelGGL(( magma_zmdotc4_kernel_2), dim3(Gs_next.x/2), dim3(Bs.x/2), Ms/2, queue->cuda_stream() , Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_zgetvector( 4 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; }
ed366d669c6fc676b1b0931ec49ec365e16d6d4f.cu
/* -- MAGMA (version 2.5.4) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date October 2020 @precisions normal z -> c d s @author Hartwig Anzt */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 #define COMPLEX // dot product for multiple vectors __global__ void magma_zmdotc1_kernel_1( int Gs, int n, magmaDoubleComplex * v0, magmaDoubleComplex * w0, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; // 1 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_Z_ZERO; __syncthreads(); if ( Idx < 128 ){ temp[ Idx ] += temp[ Idx + 128 ]; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp[ blockIdx.x ] = temp[ 0 ]; } } // block reduction for 1 vectors __global__ void magma_zmdotc1_kernel_2( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx] = MAGMA_Z_ZERO; while (i < Gs ) { temp[ Idx ] += vtmp[ i ]; temp[ Idx ] += ( i + (blockSize) < Gs ) ? vtmp[ i + (blockSize) ] : MAGMA_Z_ZERO; i += gridSize; } __syncthreads(); if ( Idx < 64 ){ temp[ Idx ] += temp[ Idx + 64 ]; } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ temp[ Idx ] += temp[ Idx + 32 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 16 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 8 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 4 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 2 ]; __syncthreads(); temp[ Idx ] += temp[ Idx + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; temp2[ Idx ] += temp2[ Idx + 32 ]; temp2[ Idx ] += temp2[ Idx + 16 ]; temp2[ Idx ] += temp2[ Idx + 8 ]; temp2[ Idx ] += temp2[ Idx + 4 ]; temp2[ Idx ] += temp2[ Idx + 2 ]; temp2[ Idx ] += temp2[ Idx + 1 ]; } #endif if ( Idx == 0 ){ vtmp2[ blockIdx.x ] = temp[ 0 ]; } } /** Purpose ------- Computes the scalar product of a set of 1 vectors such that skp[0] = [ <v_0,w_0> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDoubleComplex_ptr input vector @param[in] w0 magmaDoubleComplex_ptr input vector @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[4] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_zmdotc1( magma_int_t n, magmaDoubleComplex_ptr v0, magmaDoubleComplex_ptr w0, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = (local_block_size) * sizeof( magmaDoubleComplex ); // 1 skp magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; magma_zmdotc1_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( Gs.x, n, v0, w0, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_zmdotc1_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_zgetvector( 1 , aux1, 1, skp, 1, queue ); return MAGMA_SUCCESS; } // 2 dot products // // initialize arrays with zero __global__ void magma_zmdotc2_gpumemzero( magmaDoubleComplex * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 2; j++) d[ i+j*n ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_zmdotc2_kernel_1( int Gs, int n, magmaDoubleComplex * v0, magmaDoubleComplex * w0, magmaDoubleComplex * v1, magmaDoubleComplex * w1, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 2 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_Z_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_Z_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 2 vectors __global__ void magma_zmdotc2_kernel_2( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<2; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_Z_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_Z_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<2; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<2; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<2; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<2; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 2 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDoubleComplex_ptr input vector @param[in] w0 magmaDoubleComplex_ptr input vector @param[in] v1 magmaDoubleComplex_ptr input vector @param[in] w1 magmaDoubleComplex_ptr input vector @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[3] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_zmdotc2( magma_int_t n, magmaDoubleComplex_ptr v0, magmaDoubleComplex_ptr w0, magmaDoubleComplex_ptr v1, magmaDoubleComplex_ptr w1, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 2 * (local_block_size) * sizeof( magmaDoubleComplex ); // 4 skp magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; magma_zmdotc2_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( Gs.x, n, v0, w0, v1, w1, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_zmdotc2_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_zgetvector( 2 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; } // 3 dot products // // initialize arrays with zero __global__ void magma_zmdotc3_gpumemzero( magmaDoubleComplex * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 3; j++) d[ i+j*n ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_zmdotc3_kernel_1( int Gs, int n, magmaDoubleComplex * v0, magmaDoubleComplex * w0, magmaDoubleComplex * v1, magmaDoubleComplex * w1, magmaDoubleComplex * v2, magmaDoubleComplex * w2, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 3 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_Z_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_Z_ZERO; temp[ Idx + 2*blockDim.x ] = ( i < n ) ? v2[ i ] * w2[ i ] : MAGMA_Z_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<3; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<3; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<3; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<3; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 3 vectors __global__ void magma_zmdotc3_kernel_2( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<3; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_Z_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_Z_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<3; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<3; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<3; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<3; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 4 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDoubleComplex_ptr input vector @param[in] w0 magmaDoubleComplex_ptr input vector @param[in] v1 magmaDoubleComplex_ptr input vector @param[in] w1 magmaDoubleComplex_ptr input vector @param[in] v2 magmaDoubleComplex_ptr input vector @param[in] w2 magmaDoubleComplex_ptr input vector @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[3] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_cblas ********************************************************************/ extern "C" magma_int_t magma_zmdotc3( magma_int_t n, magmaDoubleComplex_ptr v0, magmaDoubleComplex_ptr w0, magmaDoubleComplex_ptr v1, magmaDoubleComplex_ptr w1, magmaDoubleComplex_ptr v2, magmaDoubleComplex_ptr w2, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 3 * (local_block_size) * sizeof( magmaDoubleComplex ); // 4 skp magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; // magma_zmdotc3_gpumemzero<<< Gs, Bs, 0, queue->cuda_stream() >>>( d1, n ); magma_zmdotc3_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( Gs.x, n, v0, w0, v1, w1, v2, w2, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_zmdotc3_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_zgetvector( 3 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; } // 4 dot products // // initialize arrays with zero __global__ void magma_zmdotc4_gpumemzero( magmaDoubleComplex * d, int n ) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { for (int j = 0; j < 4; j++) d[ i+j*n ] = MAGMA_Z_MAKE( 0.0, 0.0 ); } } // dot product for multiple vectors __global__ void magma_zmdotc4_kernel_1( int Gs, int n, magmaDoubleComplex * v0, magmaDoubleComplex * w0, magmaDoubleComplex * v1, magmaDoubleComplex * w1, magmaDoubleComplex * v2, magmaDoubleComplex * w2, magmaDoubleComplex * v3, magmaDoubleComplex * w3, magmaDoubleComplex * vtmp) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int i = blockIdx.x * blockDim.x + Idx; int j; // 4 vectors v(i)/w(i) temp[ Idx ] = ( i < n ) ? v0[ i ] * w0[ i ] : MAGMA_Z_ZERO; temp[ Idx + blockDim.x ] = ( i < n ) ? v1[ i ] * w1[ i ] : MAGMA_Z_ZERO; temp[ Idx + 2*blockDim.x ] = ( i < n ) ? v2[ i ] * w2[ i ] : MAGMA_Z_ZERO; temp[ Idx + 3*blockDim.x ] = ( i < n ) ? v3[ i ] * w3[ i ] : MAGMA_Z_ZERO; __syncthreads(); if ( Idx < 128 ){ for( j=0; j<4; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 128 ]; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<4; j++){ temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 32 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 16 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 8 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 4 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 2 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*blockDim.x ] += temp[ Idx+j*blockDim.x + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<4; j++){ temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 32 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 16 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 8 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 4 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 2 ]; temp2[ Idx+j*blockDim.x ] += temp2[ Idx+j*blockDim.x + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<4; j++){ vtmp[ blockIdx.x+j*n ] = temp[ j*blockDim.x ]; } } } // block reduction for 4 vectors __global__ void magma_zmdotc4_kernel_2( int Gs, int n, magmaDoubleComplex * vtmp, magmaDoubleComplex * vtmp2 ) { extern __shared__ magmaDoubleComplex temp[]; int Idx = threadIdx.x; int blockSize = 128; int gridSize = blockSize * 2 * gridDim.x; int j; for( j=0; j<4; j++){ int i = blockIdx.x * ( blockSize * 2 ) + Idx; temp[Idx+j*(blockSize)] = MAGMA_Z_ZERO; while (i < Gs ) { temp[ Idx+j*(blockSize) ] += vtmp[ i+j*n ]; temp[ Idx+j*(blockSize) ] += ( i + (blockSize) < Gs ) ? vtmp[ i+j*n + (blockSize) ] : MAGMA_Z_ZERO; i += gridSize; } } __syncthreads(); if ( Idx < 64 ){ for( j=0; j<4; j++){ temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 64 ]; } } __syncthreads(); #ifdef COMPLEX if( Idx < 32 ){ for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 32 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 16 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 8 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 4 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 2 ]; __syncthreads(); for( j=0; j<4; j++) temp[ Idx+j*(blockSize) ] += temp[ Idx+j*(blockSize) + 1 ]; __syncthreads(); } #endif #ifdef REAL if( Idx < 32 ){ volatile double *temp2 = temp; for( j=0; j<4; j++){ temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 32 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 16 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 8 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 4 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 2 ]; temp2[ Idx+j*(blockSize) ] += temp2[ Idx+j*(blockSize) + 1 ]; } } #endif if ( Idx == 0 ){ for( j=0; j<4; j++){ vtmp2[ blockIdx.x+j*n ] = temp[ j*(blockSize) ]; } } } /** Purpose ------- Computes the scalar product of a set of 4 vectors such that skp[0,1,2,3] = [ <v_0,w_0>, <v_1,w_1>, <v_2,w_2>, <v3,w_3> ] Returns the vector skp. In case there are less dot products required, an easy workaround is given by doubling input. Arguments --------- @param[in] n int length of v_i and w_i @param[in] v0 magmaDoubleComplex_ptr input vector @param[in] w0 magmaDoubleComplex_ptr input vector @param[in] v1 magmaDoubleComplex_ptr input vector @param[in] w1 magmaDoubleComplex_ptr input vector @param[in] v2 magmaDoubleComplex_ptr input vector @param[in] w2 magmaDoubleComplex_ptr input vector @param[in] v3 magmaDoubleComplex_ptr input vector @param[in] w3 magmaDoubleComplex_ptr input vector @param[in] d1 magmaDoubleComplex_ptr workspace @param[in] d2 magmaDoubleComplex_ptr workspace @param[out] skp magmaDoubleComplex_ptr vector[4] of scalar products [<v_i, w_i>] This vector is located on the host @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmdotc4( magma_int_t n, magmaDoubleComplex_ptr v0, magmaDoubleComplex_ptr w0, magmaDoubleComplex_ptr v1, magmaDoubleComplex_ptr w1, magmaDoubleComplex_ptr v2, magmaDoubleComplex_ptr w2, magmaDoubleComplex_ptr v3, magmaDoubleComplex_ptr w3, magmaDoubleComplex_ptr d1, magmaDoubleComplex_ptr d2, magmaDoubleComplex_ptr skp, magma_queue_t queue ) { int local_block_size=256; dim3 Bs( local_block_size ); dim3 Gs( magma_ceildiv( n, local_block_size ) ); dim3 Gs_next; int Ms = 4 * (local_block_size) * sizeof( magmaDoubleComplex ); // 4 skp magmaDoubleComplex_ptr aux1 = d1, aux2 = d2; int b = 1; magma_zmdotc4_kernel_1<<< Gs, Bs, Ms, queue->cuda_stream() >>> ( Gs.x, n, v0, w0, v1, w1, v2, w2, v3, w3, d1 ); while( Gs.x > 1 ) { Gs_next.x = magma_ceildiv( Gs.x, Bs.x ); if ( Gs_next.x == 1 ) Gs_next.x = 2; magma_zmdotc4_kernel_2<<< Gs_next.x/2, Bs.x/2, Ms/2, queue->cuda_stream() >>> ( Gs.x, n, aux1, aux2 ); Gs_next.x = Gs_next.x /2; Gs.x = Gs_next.x; b = 1 - b; if ( b ) { aux1 = d1; aux2 = d2; } else { aux2 = d1; aux1 = d2; } } // copy vectors to host magma_zgetvector( 4 , aux1, n, skp, 1, queue ); return MAGMA_SUCCESS; }
b487df765e4b16e4c47f2722b0b1b2c188cba870.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <device_launch_parameters.h> #include <iostream> #include <chrono> #include <stdio.h> #include <stdlib.h> #include <omp.h> using namespace std; #define matrixSize 1000 // Multiply Matrices in GPU __global__ void multMatrixGPU(long *MatA, long *MatB, long *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; // col unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; // row unsigned int idx = iy * nx + ix; long sum = 0; if (ix < nx && iy < ny) { for (int i = 0; i < nx; i++) { sum += MatA[iy * nx + i] * MatB[i * ny + ix]; } MatC[idx] = sum; } } // Multiply Matrix in CPU void multMatrixCPU(long *A, long *B, long *C, const int nx, const int ny) { long *ia = A; long *ib = B; long *ic = C; long sum = 0; for (int i = 0; i < nx; i++) { for (int j = 0; j < nx; j++) { for (int k = 0; k < nx; k++) { ic[i * nx + j] += ia[i * nx + k] * ib[j + k * nx]; } } } return; } // Multiply Matrix in CPU Parallel void multMatrixCPUParallel(long *A, long *B, long *C, const int nx, const int ny) { long *ia = A; long *ib = B; long *ic = C; long sum = 0; int i, j, k; int nProcessors = omp_get_max_threads(); std::cout << "CPU processors available: " << nProcessors << std::endl; omp_set_num_threads(nProcessors/2); #pragma omp parallel for private(sum,i,j,k) shared(ia, ib, ic) for (i = 0; i < nx; i++) { for (j = 0; j < nx; j++) { sum = 0; for (k = 0; k < nx; k++) { sum += ia[i * nx + k] * ib[k * nx + j]; } ic[i * nx + j] = sum; } } return; } // Fill Matrix with random ints void initialData(long *ip, const int size) { int i; for (i = 0; i < size; i++) { ip[i] = rand() % 10; } return; } // Check if matrices match void checkResult(long *hostRef, long *gpuRef, const int N) { // double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (hostRef[i] != gpuRef[i]) { match = 0; printf("host %f gpu %f\n", hostRef[i], gpuRef[i]); break; } } if (match) printf("Arrays match.\n\n"); else printf("Arrays do not match.\n\n"); } // Print Matrix (for debug) void printMatrix(long *A, const int nx, const int ny) { long *ia = A; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { printf("%d ", ia[ix]); } printf("\n"); ia += nx; } return; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; hipDeviceProp_t deviceProp; SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev), "Error device prop"); printf("Using Device %d: %s\n", dev, deviceProp.name); SAFE_CALL(hipSetDevice(dev), "Error setting device"); // set up data size of matrix int nx = matrixSize; int ny = matrixSize; int nxy = nx * ny; int nBytes = nxy * sizeof(long); printf("Matrix size: nx %d ny %d\n", nx, ny); // malloc host memory long *h_A, *h_B, *hostRef, *gpuRef, *hostRefParallel; h_A = (long *)malloc(nBytes); h_B = (long *)malloc(nBytes); hostRef = (long *)malloc(nBytes); gpuRef = (long *)malloc(nBytes); hostRefParallel = (long *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // malloc device global memory long *d_MatA, *d_MatB, *d_MatC; SAFE_CALL(hipMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA"); SAFE_CALL(hipMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB"); SAFE_CALL(hipMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC"); // transfer data from host to device SAFE_CALL(hipMemcpy(d_MatA, h_A, nBytes, hipMemcpyHostToDevice), "Error copying d_MatA"); SAFE_CALL(hipMemcpy(d_MatB, h_B, nBytes, hipMemcpyHostToDevice), "Error copying d_MatB"); // invoke kernel at host side dim3 block(8, 16); dim3 grid(256, 256); //dim3 grid((nx + block.x - 1) / block.x, (nx + block.y - 1) / block.y); // GPU multiplication auto start_cpu = chrono::high_resolution_clock::now(); multMatrixGPU << <grid, block >> > (d_MatA, d_MatB, d_MatC, nx, ny); SAFE_CALL(hipDeviceSynchronize(), "Error executing kernel"); auto end_cpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu; printf("multMatrixGPU <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, duration_ms.count()); // SAFE_CALL kernel error SAFE_CALL(hipGetLastError(), "Error with last error"); // copy kernel result back to host side SAFE_CALL(hipMemcpy(gpuRef, d_MatC, nBytes, hipMemcpyDeviceToHost), "Error copying d_MatC"); // CPU parallel multiplication start_cpu = chrono::high_resolution_clock::now(); multMatrixCPUParallel(h_A, h_B, hostRefParallel, nx, ny); end_cpu = chrono::high_resolution_clock::now(); duration_ms = end_cpu - start_cpu; printf("multMatrixCPUParallel elapsed %f ms\n", duration_ms.count()); // check CPU parallel against GPU results cout << "\n" << "\n"; cout << "check CPU parallel against GPU results: "; checkResult(hostRefParallel, gpuRef, nxy); // CPU multiplication start_cpu = chrono::high_resolution_clock::now(); multMatrixCPU(h_A, h_B, hostRef, nx, ny); end_cpu = chrono::high_resolution_clock::now(); duration_ms = end_cpu - start_cpu; printf("multMatrixCPU elapsed %f ms\n", duration_ms.count()); cout << "\n" << "\n"; // check CPU against GPU results cout << "check CPU against GPU results: "; checkResult(hostRef, gpuRef, nxy); // check both CPU results cout << "check CPU parallel against CPU results: "; checkResult(hostRef, hostRefParallel, nxy); // Print Matrices // printMatrix(hostRef, nx, ny); // printf("\n"); // printMatrix(gpuRef, nx, ny); // printf("\n"); // printMatrix(hostRefParallel, nx, ny); // free device global memory SAFE_CALL(hipFree(d_MatA), "Error freeing memory"); SAFE_CALL(hipFree(d_MatB), "Error freeing memory"); SAFE_CALL(hipFree(d_MatC), "Error freeing memory"); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); free(hostRefParallel); // reset device SAFE_CALL(hipDeviceReset(), "Error reseting"); return (0); }
b487df765e4b16e4c47f2722b0b1b2c188cba870.cu
#include "common.h" #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <device_launch_parameters.h> #include <iostream> #include <chrono> #include <stdio.h> #include <stdlib.h> #include <omp.h> using namespace std; #define matrixSize 1000 // Multiply Matrices in GPU __global__ void multMatrixGPU(long *MatA, long *MatB, long *MatC, int nx, int ny) { unsigned int ix = threadIdx.x + blockIdx.x * blockDim.x; // col unsigned int iy = threadIdx.y + blockIdx.y * blockDim.y; // row unsigned int idx = iy * nx + ix; long sum = 0; if (ix < nx && iy < ny) { for (int i = 0; i < nx; i++) { sum += MatA[iy * nx + i] * MatB[i * ny + ix]; } MatC[idx] = sum; } } // Multiply Matrix in CPU void multMatrixCPU(long *A, long *B, long *C, const int nx, const int ny) { long *ia = A; long *ib = B; long *ic = C; long sum = 0; for (int i = 0; i < nx; i++) { for (int j = 0; j < nx; j++) { for (int k = 0; k < nx; k++) { ic[i * nx + j] += ia[i * nx + k] * ib[j + k * nx]; } } } return; } // Multiply Matrix in CPU Parallel void multMatrixCPUParallel(long *A, long *B, long *C, const int nx, const int ny) { long *ia = A; long *ib = B; long *ic = C; long sum = 0; int i, j, k; int nProcessors = omp_get_max_threads(); std::cout << "CPU processors available: " << nProcessors << std::endl; omp_set_num_threads(nProcessors/2); #pragma omp parallel for private(sum,i,j,k) shared(ia, ib, ic) for (i = 0; i < nx; i++) { for (j = 0; j < nx; j++) { sum = 0; for (k = 0; k < nx; k++) { sum += ia[i * nx + k] * ib[k * nx + j]; } ic[i * nx + j] = sum; } } return; } // Fill Matrix with random ints void initialData(long *ip, const int size) { int i; for (i = 0; i < size; i++) { ip[i] = rand() % 10; } return; } // Check if matrices match void checkResult(long *hostRef, long *gpuRef, const int N) { // double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (hostRef[i] != gpuRef[i]) { match = 0; printf("host %f gpu %f\n", hostRef[i], gpuRef[i]); break; } } if (match) printf("Arrays match.\n\n"); else printf("Arrays do not match.\n\n"); } // Print Matrix (for debug) void printMatrix(long *A, const int nx, const int ny) { long *ia = A; for (int iy = 0; iy < ny; iy++) { for (int ix = 0; ix < nx; ix++) { printf("%d ", ia[ix]); } printf("\n"); ia += nx; } return; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; cudaDeviceProp deviceProp; SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop"); printf("Using Device %d: %s\n", dev, deviceProp.name); SAFE_CALL(cudaSetDevice(dev), "Error setting device"); // set up data size of matrix int nx = matrixSize; int ny = matrixSize; int nxy = nx * ny; int nBytes = nxy * sizeof(long); printf("Matrix size: nx %d ny %d\n", nx, ny); // malloc host memory long *h_A, *h_B, *hostRef, *gpuRef, *hostRefParallel; h_A = (long *)malloc(nBytes); h_B = (long *)malloc(nBytes); hostRef = (long *)malloc(nBytes); gpuRef = (long *)malloc(nBytes); hostRefParallel = (long *)malloc(nBytes); // initialize data at host side initialData(h_A, nxy); initialData(h_B, nxy); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // malloc device global memory long *d_MatA, *d_MatB, *d_MatC; SAFE_CALL(cudaMalloc((void **)&d_MatA, nBytes), "Error allocating d_MatA"); SAFE_CALL(cudaMalloc((void **)&d_MatB, nBytes), "Error allocating d_MatB"); SAFE_CALL(cudaMalloc((void **)&d_MatC, nBytes), "Error allocating d_MatC"); // transfer data from host to device SAFE_CALL(cudaMemcpy(d_MatA, h_A, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatA"); SAFE_CALL(cudaMemcpy(d_MatB, h_B, nBytes, cudaMemcpyHostToDevice), "Error copying d_MatB"); // invoke kernel at host side dim3 block(8, 16); dim3 grid(256, 256); //dim3 grid((nx + block.x - 1) / block.x, (nx + block.y - 1) / block.y); // GPU multiplication auto start_cpu = chrono::high_resolution_clock::now(); multMatrixGPU << <grid, block >> > (d_MatA, d_MatB, d_MatC, nx, ny); SAFE_CALL(cudaDeviceSynchronize(), "Error executing kernel"); auto end_cpu = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = end_cpu - start_cpu; printf("multMatrixGPU <<<(%d,%d), (%d,%d)>>> elapsed %f ms\n", grid.x, grid.y, block.x, block.y, duration_ms.count()); // SAFE_CALL kernel error SAFE_CALL(cudaGetLastError(), "Error with last error"); // copy kernel result back to host side SAFE_CALL(cudaMemcpy(gpuRef, d_MatC, nBytes, cudaMemcpyDeviceToHost), "Error copying d_MatC"); // CPU parallel multiplication start_cpu = chrono::high_resolution_clock::now(); multMatrixCPUParallel(h_A, h_B, hostRefParallel, nx, ny); end_cpu = chrono::high_resolution_clock::now(); duration_ms = end_cpu - start_cpu; printf("multMatrixCPUParallel elapsed %f ms\n", duration_ms.count()); // check CPU parallel against GPU results cout << "\n" << "\n"; cout << "check CPU parallel against GPU results: "; checkResult(hostRefParallel, gpuRef, nxy); // CPU multiplication start_cpu = chrono::high_resolution_clock::now(); multMatrixCPU(h_A, h_B, hostRef, nx, ny); end_cpu = chrono::high_resolution_clock::now(); duration_ms = end_cpu - start_cpu; printf("multMatrixCPU elapsed %f ms\n", duration_ms.count()); cout << "\n" << "\n"; // check CPU against GPU results cout << "check CPU against GPU results: "; checkResult(hostRef, gpuRef, nxy); // check both CPU results cout << "check CPU parallel against CPU results: "; checkResult(hostRef, hostRefParallel, nxy); // Print Matrices // printMatrix(hostRef, nx, ny); // printf("\n"); // printMatrix(gpuRef, nx, ny); // printf("\n"); // printMatrix(hostRefParallel, nx, ny); // free device global memory SAFE_CALL(cudaFree(d_MatA), "Error freeing memory"); SAFE_CALL(cudaFree(d_MatB), "Error freeing memory"); SAFE_CALL(cudaFree(d_MatC), "Error freeing memory"); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); free(hostRefParallel); // reset device SAFE_CALL(cudaDeviceReset(), "Error reseting"); return (0); }
eff11e11a3bf594c2933349de7673e952b0a37b2.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "gpu_stencil37_hack1_cp_slices.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dst = NULL; hipMalloc(&dst, XSIZE*YSIZE); double *shared_rows = NULL; hipMalloc(&shared_rows, XSIZE*YSIZE); double *shared_cols = NULL; hipMalloc(&shared_cols, XSIZE*YSIZE); double *shared_slices = NULL; hipMalloc(&shared_slices, XSIZE*YSIZE); uint64_t n_rows = 1; uint64_t n_cols = 1; uint64_t n_slices = 1; int tile_x = 1; int tile_y = 1; int tile_z = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( gpu_stencil37_hack1_cp_slices), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( gpu_stencil37_hack1_cp_slices), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( gpu_stencil37_hack1_cp_slices), dim3(gridBlock),dim3(threadBlock), 0, 0, dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
eff11e11a3bf594c2933349de7673e952b0a37b2.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "gpu_stencil37_hack1_cp_slices.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dst = NULL; cudaMalloc(&dst, XSIZE*YSIZE); double *shared_rows = NULL; cudaMalloc(&shared_rows, XSIZE*YSIZE); double *shared_cols = NULL; cudaMalloc(&shared_cols, XSIZE*YSIZE); double *shared_slices = NULL; cudaMalloc(&shared_slices, XSIZE*YSIZE); uint64_t n_rows = 1; uint64_t n_cols = 1; uint64_t n_slices = 1; int tile_x = 1; int tile_y = 1; int tile_z = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); gpu_stencil37_hack1_cp_slices<<<gridBlock,threadBlock>>>(dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { gpu_stencil37_hack1_cp_slices<<<gridBlock,threadBlock>>>(dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { gpu_stencil37_hack1_cp_slices<<<gridBlock,threadBlock>>>(dst,shared_rows,shared_cols,shared_slices,n_rows,n_cols,n_slices,tile_x,tile_y,tile_z); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
5fac8d4f0b45bd81d5d217db91eeab70be6ddc2d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 Note: [ds] precisions generated from zhemv.cu zsymv.cu is nearly identical to zhemv.cu, just change names and drop cuConj. @precisions normal z -> c @author Mark Gates */ #include "common_magma.h" #define PRECISION_z #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /******************************************************************************* Lower case, compute block multiply, work = A*x, for any size n: [ A11*x1 A12*x2 A13*x3 ] [ A11 A12 A13 ] [ x1 ] [ --- (A21*x1 + A22*x2) A23*x3 ] = [ A21 A22 A23 ] * [ x2 ] [ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ] Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. ********************************************************************/ __global__ void zsymv_kernel_L( int n, const magmaDoubleComplex * __restrict__ A, int lda, const magmaDoubleComplex * __restrict__ x, int incx, magmaDoubleComplex * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); magmaDoubleComplex psum, psum2; magmaDoubleComplex total = MAGMA_Z_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ magmaDoubleComplex sx [NB_X]; // for x[ blk ] __shared__ magmaDoubleComplex sx2[NB_X]; // for x[ blk2 ], which cycles over all blocks left of diag magmaDoubleComplex rA[4]; magmaDoubleComplex psums[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( partial && tx >= partial ) { sx[tx] = MAGMA_Z_ZERO; } else { sx[tx] = x[0]; } } // -------------------- // move to 32x32 diag block A += blk_ind * (lda + 1); // A is A(blk_ind, blk_ind) A += ty2*lda + tx2; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying lower to upper triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j < tx2 ) sA32(j, tx2) = sA32(tx2, j); } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying lower to upper triangle #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j < tx2 ) sA32(j, tx2) = sA32(tx2, j); } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2 + j*8) * sx[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum2 = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum2 += sA32(ty2*4 + j, tx2) * sx[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum2; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to left most 64x64 block in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind) A -= blk_ind*lda; // A is A(blk_ind, 0) A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty) if ( partial && tx >= partial ) { A = A - tx + (partial - 1); } x -= blk_ind * incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; work += blk*lda + tx4; // work is work(tx4, blk) for(int blk2=0; blk2 < blk; ++blk2) { // load 64x1 block x(blk2_ind + 0:63) into sx2 // since this block is left of diagonal, x cannot be partial rows if ( ty == 0 ) { sx2[tx] = x[blk2*NB_X*incx]; } __syncthreads(); for( int k=0; k < 4; k++ ) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 // since this block is left of diagonal, it cannot be partial columns #pragma unroll for(int j=0; j < 4; j++) { rA[j] = A[j*lda]; } // 1) multiply 64x16 block A * x2 // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply transposed 16x64 block A**H * x, // storing each product Aji*xi to sA(j,i) #pragma unroll for(int j=0; j < 4; j++) { total += rA[j] * sx2[quarter_NB_X*k + ty*4 + j]; sA16(ty*4 + j, tx) = rA[j] * sx[tx]; } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum2 = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum2 += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums (locally) psums[k] = psum2; // move to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx#, blk2*NB_x + k*NB_X/4 + 4*ty), # or partial } // store partial row sums #pragma unroll for(int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums[k]; } __syncthreads(); // sum up partial row sums and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 // since this is the transposed block above the diagonal, it cannot be partial rows if ( ty4 < 4 ) { int k = ty4*quarter_NB_X; psum2 = sA16(tx4, 0 + k) + sA16(tx4, 1 + k) + sA16(tx4, 2 + k) + sA16(tx4, 3 + k) + sA16(tx4, 4 + k) + sA16(tx4, 5 + k) + sA16(tx4, 6 + k) + sA16(tx4, 7 + k) + sA16(tx4, 8 + k) + sA16(tx4, 9 + k) + sA16(tx4, 10 + k) + sA16(tx4, 11 + k) + sA16(tx4, 12 + k) + sA16(tx4, 13 + k) + sA16(tx4, 14 + k) + sA16(tx4, 15 + k); work[blk2*NB_X + k] = psum2; // store at work( blk2*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } work -= tx4; // work is work(blk_ind) work += tx; // work is work(blk_ind + tx) // store row sums sA16(ty, tx) = total; __syncthreads(); // sum up final total for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } /************************************************************** Lower case, sum up final results On input: [ A11*x1 A12*x2 A13*x3 ] work = [ --- (A21*x1 + A22*x2) A23*x3 ] [ --- --- (A31*x1 + A32*x2 + A33*x3) ] On output: [ A11*x1 + A12*x2 + A13*x3 ] y = alpha*[ A11*x1 + A22*x2 + A23*x3 ] + beta*y [ A21*x1 + A22*x2 + A33*x3 ] Previously: [ A11*x1 --- ] work = [ A12*x2 (A21*x1 + A22*x2) --- ] [ A13*x3 A23*x3 (A31*x1 + A32*x2 + A33*x3) ] which doesn't work as well because A13*x3 has 64 rows, while A31*x1 has only n % NB rows. This is why it used to need lwork = lda*(blocks + 1) instead of lda*blocks. ********************************************************************/ __global__ void zsymv_kernel_L_sum( int n, magmaDoubleComplex alpha, int lda, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, int incy, magmaDoubleComplex * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; if ( ind < n ) { work += ind + blk*lda; magmaDoubleComplex Ax = MAGMA_Z_ZERO; for(int i = blk_ind; i < n; i += NB_X) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } } /************************************************************** * Lower case, launch kernels */ extern "C" void magmablas_zsymv_L( magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, magma_int_t lda, const magmaDoubleComplex *x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *y, magma_int_t incy, magmaDoubleComplex *dwork) { magma_int_t blocks = (n - 1)/NB_X + 1; dim3 grid( blocks, 1, 1 ); dim3 threads( NB_X, NB_Y, 1 ); hipLaunchKernelGGL(( zsymv_kernel_L), dim3(grid), dim3(threads), 0, magma_stream , n, A, lda, x, incx, dwork); dim3 threads_sum( NB_X, 1, 1 ); hipLaunchKernelGGL(( zsymv_kernel_L_sum), dim3(grid), dim3(threads_sum), 0, magma_stream , n, alpha, lda, beta, y, incy, dwork); } /** Purpose ------- magmablas_zsymv_work performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n complex symmetric matrix. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: - = MagmaUpper: Only the upper triangular part of A is to be referenced. - = MagmaLower: Only the lower triangular part of A is to be referenced. @param[in] n INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. @param[in] alpha COMPLEX*16. On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] lda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. @param[in] x COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. @param[in] incx INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX*16. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[in, out] y COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. @param[in] incy INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. @param[in] dwork (workspace) COMPLEX*16 array on the GPU, dimension (MAX(1, LWORK)), @param[in] lwork INTEGER. The dimension of the array DWORK. LWORK >= LDA * ceil( N / NB_X ), where NB_X = 64. MAGMA implements zsymv through two steps: 1) perform the multiplication in each thread block and put the intermediate value in dwork. 2) sum the intermediate values and store the final result in y. magamblas_zsymv_work requires users to provide a workspace, while magmablas_zsymv is a wrapper routine allocating the workspace inside the routine and provides the same interface as cublas. If users need to call zsymv frequently, we suggest using magmablas_zsymv_work instead of magmablas_zsymv. As the overhead to allocate and free in device memory in magmablas_zsymv would hurt performance. Our tests show that this penalty is about 10 Gflop/s when the matrix size is around 10000. @ingroup magma_zblas2 ********************************************************************/ extern "C" magma_int_t magmablas_zsymv_work( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, magma_int_t lda, const magmaDoubleComplex *x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *y, magma_int_t incy, magmaDoubleComplex *dwork, magma_int_t lwork) { #if defined(PRECISION_z) // z precision requires CUDA ARCH 2.x; call CUBLAS version instead. magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { //magma_zsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy ); //return MAGMA_SUCCESS; fprintf(stderr, "%s: %s\n", __func__, "not implemented on CUDA ARCH 1.x"); return MAGMA_ERR_NOT_SUPPORTED; } #endif // -------------------- // [sdc] precisions, or z precision with CUDA ARCH 2.x int upper = (uplo == MagmaUpper); magma_int_t blocks = (n - 1)/NB_X + 1; magma_int_t lwmin = lda*blocks; /* * Test the input parameters. */ magma_int_t info = 0; if ((! upper) && (uplo != MagmaLower)) { info = -1; } else if ( n < 0 ) { info = -2; } else if ( lda < max(1, n) ) { info = -5; } else if ( incx == 0 ) { info = -7; } else if ( incy == 0 ) { info = -10; } else if ( lwork < lwmin ) { info = -12; } if (info != 0) { magma_xerbla( __func__, -(info) ); return info; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return info; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { //magma_zsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy); fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); info = MAGMA_ERR_NOT_SUPPORTED; } else { magmablas_zsymv_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork); } return info; } /** Purpose ------- magmablas_zsymv performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n complex symmetric matrix. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: - = MagmaUpper: Only the upper triangular part of A is to be referenced. - = MagmaLower: Only the lower triangular part of A is to be referenced. @param[in] n INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. @param[in] alpha COMPLEX*16. On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] lda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. @param[in] x COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. @param[in] incx INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX*16. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[in, out] y COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. @param[in] incy INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. @ingroup magma_zblas2 ********************************************************************/ extern "C" magma_int_t magmablas_zsymv( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, magma_int_t lda, const magmaDoubleComplex *x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *y, magma_int_t incy) { #if defined(PRECISION_z) // z precision requires CUDA ARCH 2.x; call CUBLAS version instead. magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { //magma_zsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy ); //return MAGMA_SUCCESS; fprintf(stderr, "%s: %s\n", __func__, "not implemented on CUDA ARCH 1.x"); return MAGMA_ERR_NOT_SUPPORTED; } #endif // -------------------- // [sdc] precisions, or z precision with CUDA ARCH 2.x int upper = (uplo == MagmaUpper); /* * Test the input parameters. */ magma_int_t info = 0; if ((! upper) && (uplo != MagmaLower)) { info = -1; } else if ( n < 0 ) { info = -2; } else if ( lda < max(1, n) ) { info = -5; } else if ( incx == 0 ) { info = -7; } else if ( incy == 0 ) { info = -10; } if (info != 0) { magma_xerbla( __func__, -(info) ); return info; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return info; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { //magma_zsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy); fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); info = MAGMA_ERR_NOT_SUPPORTED; } else { magmaDoubleComplex *dwork; magma_int_t blocks = (n - 1)/NB_X + 1; magma_int_t lwork = lda*blocks; // TODO deal with error magma_zmalloc( &dwork, lwork ); magmablas_zsymv_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork); magma_free( dwork ); } return info; }
5fac8d4f0b45bd81d5d217db91eeab70be6ddc2d.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 Note: [ds] precisions generated from zhemv.cu zsymv.cu is nearly identical to zhemv.cu, just change names and drop cuConj. @precisions normal z -> c @author Mark Gates */ #include "common_magma.h" #define PRECISION_z #define NB_X 64 #define NB_Y 4 #define bank_shift 33 #define quarter_NB_X 16 #define half_NB_X 32 /******************************************************************************* Lower case, compute block multiply, work = A*x, for any size n: [ A11*x1 A12*x2 A13*x3 ] [ A11 A12 A13 ] [ x1 ] [ --- (A21*x1 + A22*x2) A23*x3 ] = [ A21 A22 A23 ] * [ x2 ] [ --- --- (A31*x1 + A32*x2 + A33*x3) ] [ A31 A32 A33 ] [ x3 ] Uses a 64x4 thread block. For diagonal tiles, covers a 64x64 tile using three 32x32 tiles (plus one gets transposed). For off-diagonal tiles, covers a 64x64 tile using four 64x16 tiles. In both cases, each thread multiplies 4 elements. For rows past the bottom of the matrix, the A pointer is adjusted to be the last valid row of A, which multiple threads will read. Extra rows are ignored when saving results to work. Columns past the right edge are explicitly ignored when loading. x values past the bottom are set to zero, thus, extra columns are zeroed when multiplying. ********************************************************************/ __global__ void zsymv_kernel_L( int n, const magmaDoubleComplex * __restrict__ A, int lda, const magmaDoubleComplex * __restrict__ x, int incx, magmaDoubleComplex * __restrict__ work) { #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) || (__CUDA_ARCH__ >= 200) // treats sA as 16x64 block #define sA16(i_, j_) (sA[(i_)][(j_)]) // i.e., sA[ (i_)*(NB_X+3) + (j_) ] // treats sA as 32x32 block #define sA32(i_, j_) (sA[0][(i_) + bank_shift*(j_)]) // 64x4 thread block const int tx = threadIdx.x; const int ty = threadIdx.y; const int blk = blockIdx.x; const int blk_ind = NB_X * blk; const int td = NB_X * ty + tx; // 32x8 thread block const int tx2 = td % half_NB_X; const int ty2 = td / half_NB_X; // If this blk has fewer than NB_X rows, partial is the number of valid rows, // so tx = 0, ..., partial-1 are valid rows, and tx >= partial are invalid. // Else, partial == 0. const int partial = (blk == gridDim.x - 1 ? (n % NB_X) : 0); magmaDoubleComplex psum, psum2; magmaDoubleComplex total = MAGMA_Z_ZERO; // sA is used as a 32x32 block, sA32(i,j), // and as a 16x64 block, sA16(i,j), in different parts of the code. // sA must be at least half_NB_X*bank_shift = 32x33 = 1056; // quarter_NB_X*(NB_X + 2) = 16*(64 + 2) = 1056 __shared__ magmaDoubleComplex sA [quarter_NB_X][NB_X + 3]; /* Why +3? seems it only needs +2. Does +3 reduce bank conflicts? */ __shared__ magmaDoubleComplex sx [NB_X]; // for x[ blk ] __shared__ magmaDoubleComplex sx2[NB_X]; // for x[ blk2 ], which cycles over all blocks left of diag magmaDoubleComplex rA[4]; magmaDoubleComplex psums[4]; // -------------------- // load 64x1 block x(blk_ind + 0:63) into sx x += (blk_ind + tx)*incx; // x is x(blk_ind + tx) if ( ty == 0 ) { if ( partial && tx >= partial ) { sx[tx] = MAGMA_Z_ZERO; } else { sx[tx] = x[0]; } } // -------------------- // move to 32x32 diag block A += blk_ind * (lda + 1); // A is A(blk_ind, blk_ind) A += ty2*lda + tx2; // A is A(blk_ind + tx2, blk_ind + ty2) // load 32x32 diag block A(blk_ind + 0:31, blk_ind + 0:31) into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 >= partial ) { A = A - tx2 + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 >= partial ) { A = A + tx2 - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying lower to upper triangle, // as four 32x8 sections in parallel: // columns 0,4,8,12,16,20,24,28; then 1,5,...,29; then 2,6,...,30, then 3,7,...,31 #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j < tx2 ) sA32(j, tx2) = sA32(tx2, j); } __syncthreads(); // multiply 32x32 diag block * x // each thread does partial row sA(tx2, ty2*4 : ty2*4 + 3) psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx[ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to next 32x32 diag block, then repeat steps from first diag block A += half_NB_X + half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + NB/2 + ty2) // load 32x32 diag block A[block + 0:31, block + 0:31] into sA if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j + half_NB_X < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // symmetrize 32x32 diag block, copying lower to upper triangle #pragma unroll for(int j=ty2*4; j < ty2*4 + 4; j++) { if ( j < tx2 ) sA32(j, tx2) = sA32(tx2, j); } __syncthreads(); // multiply 32x32 diag block * x psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2*4 + j) * sx[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial row sums sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to off-diag 32x32 block A -= half_NB_X*lda; // A is A(blk_ind + NB/2 + tx2, blk_ind + ty2) // load 32x32 block of A into sA, // as four 32x8 sections one after another: // columns 0:7, then 8:15, then 16:23, then 24:31 if ( partial ) { if ( tx2 + half_NB_X >= partial ) { A = A - (tx2 + half_NB_X) + (partial - 1); } #pragma unroll for(int j=0; j < half_NB_X; j += 8) { if ( ty2+j < partial ) { sA32(tx2, ty2 + j) = A[j*lda]; } } if ( tx2 + half_NB_X >= partial ) { A = A + (tx2 + half_NB_X) - (partial - 1); } } else { #pragma unroll for(int j=0; j < half_NB_X; j += 8) { sA32(tx2, ty2 + j) = A[j*lda]; } } __syncthreads(); // multiply 32x32 block (below diag) psum = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum += sA32(tx2, ty2 + j*8) * sx[j*8 + ty2]; } //__syncthreads(); // no sync needed here // multiply transposed 32x32 block (above diag) psum2 = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum2 += sA32(ty2*4 + j, tx2) * sx[half_NB_X + ty2*4 + j]; } __syncthreads(); // store partial sums for non-transposed 32x32 block sA32(ty2, tx2) = psum; __syncthreads(); // sum up partial row sums, so thread (tx2,1) has total for row (blk_ind + NB/2 + tx2) if ( ty2 == 1 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // store partial sums for transposed 32x32 block sA32(ty2, tx2) = psum2; __syncthreads(); // sum up partial row sums, so thread (tx2,0) has total for row (blk_ind + tx2) if ( ty2 == 0 ) { total = total + sA32(0, tx2) + sA32(1, tx2) + sA32(2, tx2) + sA32(3, tx2) + sA32(4, tx2) + sA32(5, tx2) + sA32(6, tx2) + sA32(7, tx2); } __syncthreads(); // -------------------- // move to left most 64x64 block in block row, and // switch thread offset from (tx2,ty2) 32x8 block to (tx,ty) 64x4 block A -= half_NB_X; // A is A(blk_ind + tx2, blk_ind + ty2) A -= ty2*lda + tx2; // A is A(blk_ind, blk_ind) A -= blk_ind*lda; // A is A(blk_ind, 0) A += 4*ty*lda + tx; // A is A(blk_ind + tx, 4*ty) if ( partial && tx >= partial ) { A = A - tx + (partial - 1); } x -= blk_ind * incx; // x is x(tx) // 16x16 thread block const int tx4 = td % quarter_NB_X; const int ty4 = td / quarter_NB_X; work += blk*lda + tx4; // work is work(tx4, blk) for(int blk2=0; blk2 < blk; ++blk2) { // load 64x1 block x(blk2_ind + 0:63) into sx2 // since this block is left of diagonal, x cannot be partial rows if ( ty == 0 ) { sx2[tx] = x[blk2*NB_X*incx]; } __syncthreads(); for( int k=0; k < 4; k++ ) { // load 64x16 block of A into rA, 4 elements per thread, // as four 64x4 sections in parallel: // columns 0,4,8,12; then 1,5,9,13; then 2,6,10,14; then 3,7,11,15 // since this block is left of diagonal, it cannot be partial columns #pragma unroll for(int j=0; j < 4; j++) { rA[j] = A[j*lda]; } // 1) multiply 64x16 block A * x2 // each thread does partial row rA(tx + 16*k, ty*4 + 16*k : ty*4 + 3 + 16*k) // 2) multiply transposed 16x64 block A**H * x, // storing each product Aji*xi to sA(j,i) #pragma unroll for(int j=0; j < 4; j++) { total += rA[j] * sx2[quarter_NB_X*k + ty*4 + j]; sA16(ty*4 + j, tx) = rA[j] * sx[tx]; } __syncthreads(); // do partial row sums for transposed 16x64 result // use 16x16 thread grid (tx4, ty4) instead of 64x4 (tx, ty) // sum sixteen 16x4 sections in parallel: // columns 0,4,8,...,60; then 1,5,...,61; then 2,6,...,62; then 3,7,...,63 psum2 = MAGMA_Z_ZERO; #pragma unroll for(int j=0; j < 4; j++) { psum2 += sA16(tx4, ty4*4 + j); } __syncthreads(); // store partial row sums (locally) psums[k] = psum2; // move to next 64x16 block A += lda * quarter_NB_X; // A is A(blk_ind + tx#, blk2*NB_x + k*NB_X/4 + 4*ty), # or partial } // store partial row sums #pragma unroll for(int k=0; k < 4; k++) { sA16(tx4, ty4 + quarter_NB_X*k) = psums[k]; } __syncthreads(); // sum up partial row sums and store final total to workspace // thread (tx4,ty4) where ty4 < 4 sums row tx4 + ty4*16 // since this is the transposed block above the diagonal, it cannot be partial rows if ( ty4 < 4 ) { int k = ty4*quarter_NB_X; psum2 = sA16(tx4, 0 + k) + sA16(tx4, 1 + k) + sA16(tx4, 2 + k) + sA16(tx4, 3 + k) + sA16(tx4, 4 + k) + sA16(tx4, 5 + k) + sA16(tx4, 6 + k) + sA16(tx4, 7 + k) + sA16(tx4, 8 + k) + sA16(tx4, 9 + k) + sA16(tx4, 10 + k) + sA16(tx4, 11 + k) + sA16(tx4, 12 + k) + sA16(tx4, 13 + k) + sA16(tx4, 14 + k) + sA16(tx4, 15 + k); work[blk2*NB_X + k] = psum2; // store at work( blk2*NB_X + tx4 + ty4*16, blk ) } __syncthreads(); } work -= tx4; // work is work(blk_ind) work += tx; // work is work(blk_ind + tx) // store row sums sA16(ty, tx) = total; __syncthreads(); // sum up final total for row tx if ( ty == 0 && (partial == 0 || tx < partial) ) { total = sA16(0, tx) + sA16(1, tx) + sA16(2, tx) + sA16(3, tx); work[blk*NB_X] = total; // store at work( blk*NB_X + tx, blk ) } #endif /* PRECISION_[sdc] || (__CUDA_ARCH__ >= 200) */ } /************************************************************** Lower case, sum up final results On input: [ A11*x1 A12*x2 A13*x3 ] work = [ --- (A21*x1 + A22*x2) A23*x3 ] [ --- --- (A31*x1 + A32*x2 + A33*x3) ] On output: [ A11*x1 + A12*x2 + A13*x3 ] y = alpha*[ A11*x1 + A22*x2 + A23*x3 ] + beta*y [ A21*x1 + A22*x2 + A33*x3 ] Previously: [ A11*x1 --- ] work = [ A12*x2 (A21*x1 + A22*x2) --- ] [ A13*x3 A23*x3 (A31*x1 + A32*x2 + A33*x3) ] which doesn't work as well because A13*x3 has 64 rows, while A31*x1 has only n % NB rows. This is why it used to need lwork = lda*(blocks + 1) instead of lda*blocks. ********************************************************************/ __global__ void zsymv_kernel_L_sum( int n, magmaDoubleComplex alpha, int lda, magmaDoubleComplex beta, magmaDoubleComplex * __restrict__ y, int incy, magmaDoubleComplex * __restrict__ work ) { int tx = threadIdx.x; int blk = blockIdx.x; int blk_ind = blk * NB_X; int ind = blk_ind + tx; if ( ind < n ) { work += ind + blk*lda; magmaDoubleComplex Ax = MAGMA_Z_ZERO; for(int i = blk_ind; i < n; i += NB_X) { Ax += work[0]; work += lda; } y[ind * incy] = beta*y[ind * incy] + alpha*Ax; } } /************************************************************** * Lower case, launch kernels */ extern "C" void magmablas_zsymv_L( magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, magma_int_t lda, const magmaDoubleComplex *x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *y, magma_int_t incy, magmaDoubleComplex *dwork) { magma_int_t blocks = (n - 1)/NB_X + 1; dim3 grid( blocks, 1, 1 ); dim3 threads( NB_X, NB_Y, 1 ); zsymv_kernel_L<<< grid, threads, 0, magma_stream >>> (n, A, lda, x, incx, dwork); dim3 threads_sum( NB_X, 1, 1 ); zsymv_kernel_L_sum<<< grid, threads_sum, 0, magma_stream >>> (n, alpha, lda, beta, y, incy, dwork); } /** Purpose ------- magmablas_zsymv_work performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n complex symmetric matrix. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: - = MagmaUpper: Only the upper triangular part of A is to be referenced. - = MagmaLower: Only the lower triangular part of A is to be referenced. @param[in] n INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. @param[in] alpha COMPLEX*16. On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] lda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. @param[in] x COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. @param[in] incx INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX*16. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[in, out] y COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. @param[in] incy INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. @param[in] dwork (workspace) COMPLEX*16 array on the GPU, dimension (MAX(1, LWORK)), @param[in] lwork INTEGER. The dimension of the array DWORK. LWORK >= LDA * ceil( N / NB_X ), where NB_X = 64. MAGMA implements zsymv through two steps: 1) perform the multiplication in each thread block and put the intermediate value in dwork. 2) sum the intermediate values and store the final result in y. magamblas_zsymv_work requires users to provide a workspace, while magmablas_zsymv is a wrapper routine allocating the workspace inside the routine and provides the same interface as cublas. If users need to call zsymv frequently, we suggest using magmablas_zsymv_work instead of magmablas_zsymv. As the overhead to allocate and free in device memory in magmablas_zsymv would hurt performance. Our tests show that this penalty is about 10 Gflop/s when the matrix size is around 10000. @ingroup magma_zblas2 ********************************************************************/ extern "C" magma_int_t magmablas_zsymv_work( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, magma_int_t lda, const magmaDoubleComplex *x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *y, magma_int_t incy, magmaDoubleComplex *dwork, magma_int_t lwork) { #if defined(PRECISION_z) // z precision requires CUDA ARCH 2.x; call CUBLAS version instead. magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { //magma_zsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy ); //return MAGMA_SUCCESS; fprintf(stderr, "%s: %s\n", __func__, "not implemented on CUDA ARCH 1.x"); return MAGMA_ERR_NOT_SUPPORTED; } #endif // -------------------- // [sdc] precisions, or z precision with CUDA ARCH 2.x int upper = (uplo == MagmaUpper); magma_int_t blocks = (n - 1)/NB_X + 1; magma_int_t lwmin = lda*blocks; /* * Test the input parameters. */ magma_int_t info = 0; if ((! upper) && (uplo != MagmaLower)) { info = -1; } else if ( n < 0 ) { info = -2; } else if ( lda < max(1, n) ) { info = -5; } else if ( incx == 0 ) { info = -7; } else if ( incy == 0 ) { info = -10; } else if ( lwork < lwmin ) { info = -12; } if (info != 0) { magma_xerbla( __func__, -(info) ); return info; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return info; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { //magma_zsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy); fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); info = MAGMA_ERR_NOT_SUPPORTED; } else { magmablas_zsymv_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork); } return info; } /** Purpose ------- magmablas_zsymv performs the matrix-vector operation: y := alpha*A*x + beta*y, where alpha and beta are scalars, x and y are n element vectors and A is an n by n complex symmetric matrix. Arguments ---------- @param[in] uplo magma_uplo_t. On entry, UPLO specifies whether the upper or lower triangular part of the array A is to be referenced as follows: - = MagmaUpper: Only the upper triangular part of A is to be referenced. - = MagmaLower: Only the lower triangular part of A is to be referenced. @param[in] n INTEGER. On entry, N specifies the order of the matrix A. N must be at least zero. @param[in] alpha COMPLEX*16. On entry, ALPHA specifies the scalar alpha. @param[in] A COMPLEX*16 array of DIMENSION ( LDA, n ). Before entry with UPLO = MagmaUpper, the leading n by n upper triangular part of the array A must contain the upper triangular part of the symmetric matrix and the strictly lower triangular part of A is not referenced. Before entry with UPLO = MagmaLower, the leading n by n lower triangular part of the array A must contain the lower triangular part of the symmetric matrix and the strictly upper triangular part of A is not referenced. Note that the imaginary parts of the diagonal elements need not be set and are assumed to be zero. @param[in] lda INTEGER. On entry, LDA specifies the first dimension of A as declared in the calling (sub) program. LDA must be at least max( 1, n ). It is recommended that lda is multiple of 16. Otherwise performance would be deteriorated as the memory accesses would not be fully coalescent. @param[in] x COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCX ) ). Before entry, the incremented array X must contain the n element vector x. @param[in] incx INTEGER. On entry, INCX specifies the increment for the elements of X. INCX must not be zero. @param[in] beta COMPLEX*16. On entry, BETA specifies the scalar beta. When BETA is supplied as zero then Y need not be set on input. @param[in, out] y COMPLEX*16 array of dimension at least ( 1 + ( n - 1 )*abs( INCY ) ). Before entry, the incremented array Y must contain the n element vector y. On exit, Y is overwritten by the updated vector y. @param[in] incy INTEGER. On entry, INCY specifies the increment for the elements of Y. INCY must not be zero. @ingroup magma_zblas2 ********************************************************************/ extern "C" magma_int_t magmablas_zsymv( magma_uplo_t uplo, magma_int_t n, magmaDoubleComplex alpha, const magmaDoubleComplex *A, magma_int_t lda, const magmaDoubleComplex *x, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex *y, magma_int_t incy) { #if defined(PRECISION_z) // z precision requires CUDA ARCH 2.x; call CUBLAS version instead. magma_int_t arch = magma_getdevice_arch(); if ( arch < 200 ) { //magma_zsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy ); //return MAGMA_SUCCESS; fprintf(stderr, "%s: %s\n", __func__, "not implemented on CUDA ARCH 1.x"); return MAGMA_ERR_NOT_SUPPORTED; } #endif // -------------------- // [sdc] precisions, or z precision with CUDA ARCH 2.x int upper = (uplo == MagmaUpper); /* * Test the input parameters. */ magma_int_t info = 0; if ((! upper) && (uplo != MagmaLower)) { info = -1; } else if ( n < 0 ) { info = -2; } else if ( lda < max(1, n) ) { info = -5; } else if ( incx == 0 ) { info = -7; } else if ( incy == 0 ) { info = -10; } if (info != 0) { magma_xerbla( __func__, -(info) ); return info; } /* * Quick return if possible. */ if ( (n == 0) || ( MAGMA_Z_EQUAL(alpha, MAGMA_Z_ZERO) && MAGMA_Z_EQUAL(beta, MAGMA_Z_ONE) ) ) return info; /* TODO: Upper case is not implemented in MAGMA */ if ( upper ) { //magma_zsymv( uplo, n, alpha, A, lda, x, incx, beta, y, incy); fprintf(stderr, "%s: %s\n", __func__, "Upper case not implemented"); info = MAGMA_ERR_NOT_SUPPORTED; } else { magmaDoubleComplex *dwork; magma_int_t blocks = (n - 1)/NB_X + 1; magma_int_t lwork = lda*blocks; // TODO deal with error magma_zmalloc( &dwork, lwork ); magmablas_zsymv_L(n, alpha, A, lda, x, incx, beta, y, incy, dwork); magma_free( dwork ); } return info; }
ea2a749cfb67353d383ffc2db7fce6fc8f4f9ec6.hip
// !!! This is a file automatically generated by hipify!!! #include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <hip/hip_runtime.h> #include <cusparse_v2.h> #include "rocblas.h" #include <hiprand/hiprand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" /* * log_p_y * log likelihood for a poisson * y = observed count * dt = length of observation */ //__device__ KC_FP_TYPE log_p_y( KC_FP_TYPE y, KC_FP_TYPE rate, KC_FP_TYPE dt, KC_FP_TYPE sh) { // return y*(KC_LOG(rate)+KC_LOG(dt)) - dt*rate;// - KC_GAMMALN(y+1) //} __device__ KC_FP_TYPE log_p_y(KC_FP_TYPE y, KC_FP_TYPE rate, KC_FP_TYPE dt, KC_FP_TYPE sh) { KC_FP_TYPE ex = rate*KC_MAX(KC_MIN(KC_EXP(sh),KC_MAXN),KC_MINN); return y*(KC_LOG(ex)+KC_LOG(dt)) - dt*ex - KC_GAMMALN(y+1.0); } /* kcSampleSMStates * kernel runs on each trial (not timebin) * outputs: * z = jump times per each trial * s = which state jumped to * sampleStats = (3,2,NT) array, spike counts observed in each hidden state (divided up by trial) * inputs * y = spike counts * trialIndex = index for y ( first spike count for trial i is y[trialIndex[i]] and the last spike count is y[trialIndex[i+1]-1] * y is indexed at 0. This array includes final value that should be length of y) * trialCoh = coherence level for each trial (coherence controls prior jump time distribution and jump to state probability) * coherence labels/indices begin at 0 instead of 1 to be consistent with C, unlike MATLAB * NT = number of trials * alpha = (3,1) array, spike rates * phi = (numCoherences,1) jump probabilities (p(s=3) = phi, p(s=2) = 1-phi), trial coherence dependent * delta_t = length of each timebin * maxJump = the longest to calculate out possible jump time values for * randU = (NT,1) array a set of uniform random numbers on [0,1] * * nbPDF = (maxJump,numberOfCoherences) array, negative binomial pdf values (up to some limit) for each of the parameters of coherences * * jumpToProbs = (maxJump*NT,2) preallocated space to do calculations over */ __global__ void kcSampleSMStates(KC_FP_TYPE * z, KC_FP_TYPE * s, KC_FP_TYPE * sampleStats, KC_FP_TYPE * y, int * trialIndex, int * trialCoh, int NT, KC_FP_TYPE * alphas, KC_FP_TYPE * phi, KC_FP_TYPE delta_t, int maxJump, KC_FP_TYPE * randU, KC_FP_TYPE * nbPDF, KC_FP_TYPE * jumpToProbs, KC_FP_TYPE * spe) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { int T1 = trialIndex[idx]; int T = trialIndex[idx+1]-T1; //index in jumpToProbs for jumping to state 2 int jumpT1_2 = idx*(maxJump*2); //index in jumpToProbs for jumping to state 3 int jumpT1_3 = idx*(maxJump*2) + maxJump; int cohIndex = trialCoh[idx]*maxJump; KC_FP_TYPE p2 = (phi[trialCoh[idx]] < 1)?KC_LOG(1-phi[trialCoh[idx]]):0; KC_FP_TYPE p3 = KC_LOG(phi[trialCoh[idx]]); //calculate jump time probabilities for jump time happening within observed window (else model says jump happens after trial observations end) for(int ii = T-1; ii >= 0; ii--) { //taking a cumulative sum over p(y_{ii:end}|z=ii,s=2 or 3) jumpToProbs[jumpT1_2+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_2+ii+1]):(0)) + log_p_y(y[T1+ii],alphas[1],delta_t,spe[T1+ii]) ; jumpToProbs[jumpT1_3+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_3+ii+1]):(0)) + log_p_y(y[T1+ii],alphas[2],delta_t,spe[T1+ii]) ; } KC_FP_TYPE initStateCumsum = 0; KC_FP_TYPE maxLog = 0; for(int ii = 0; ii < maxJump; ii++) { // p (y_{1:t}|z==ii<=T), my comments are starting indexes at 1 while the code starts at 0 if(ii < T) { KC_FP_TYPE p_y_init = log_p_y(y[T1+ii],alphas[0],delta_t,spe[T1+ii]); initStateCumsum += p_y_init; if(ii < T-1) { jumpToProbs[jumpT1_2+ii+1] += initStateCumsum; jumpToProbs[jumpT1_3+ii+1] += initStateCumsum; } } else { jumpToProbs[jumpT1_2+ii] = initStateCumsum; jumpToProbs[jumpT1_3+ii] = initStateCumsum; } jumpToProbs[jumpT1_2+ii] = jumpToProbs[jumpT1_2+ii] + nbPDF[cohIndex+ii] + p2; jumpToProbs[jumpT1_3+ii] = jumpToProbs[jumpT1_3+ii] + nbPDF[cohIndex+ii] + p3; maxLog = KC_MAX(KC_MAX(maxLog,jumpToProbs[jumpT1_2+ii]),jumpToProbs[jumpT1_3+ii]); //maxLog = jumpToProbs[jumpT1_2+ii]+jumpToProbs[jumpT1_3+ii]; } //maxLog /= (maxJump*2.0); KC_FP_TYPE maxNumToExp = 8; KC_FP_TYPE minNumToExp = 2; KC_FP_TYPE extraConst = 0; //this helps numerical stability when going from log p to p (quick and dirty method) if(maxLog > maxNumToExp) { extraConst = maxLog-maxNumToExp; } else if(maxLog < minNumToExp) { extraConst = minNumToExp-maxLog; } KC_FP_TYPE totalProbCumsum = 0; for(int ii = 0; ii < maxJump; ii++) { jumpToProbs[jumpT1_3+ii] = KC_EXP(jumpToProbs[jumpT1_3+ii] + extraConst); if(phi[trialCoh[idx]] < 1.0) { jumpToProbs[jumpT1_2+ii] = KC_EXP(jumpToProbs[jumpT1_2+ii] + extraConst); totalProbCumsum += jumpToProbs[jumpT1_3+ii] + jumpToProbs[jumpT1_2+ii]; } else { totalProbCumsum += jumpToProbs[jumpT1_3+ii]; jumpToProbs[jumpT1_2+ii] = 0.0; } } //goes back through and finds a sampling time + sample to state KC_FP_TYPE post_cdf = 0; int switchFound = -1; int switchTime = 0; KC_FP_TYPE randn = randU[idx] * totalProbCumsum; for(int ii = 0; ii < maxJump && switchFound < 1; ii++) { post_cdf += jumpToProbs[jumpT1_2+ii]; if(post_cdf > randn && phi[trialCoh[idx]] < 1) { switchFound = 2; switchTime = ii; } else { post_cdf += jumpToProbs[jumpT1_3+ii]; if(post_cdf > randn) { switchFound = 3; switchTime = ii; } } } if(switchFound <= 0) { //just to make sure it doesn't crash switchFound = (KC_LOG(randU[idx])>p3)?2:3; switchTime = 101; } s[idx] = switchFound; z[idx] = switchTime; //sum up observed spike count info sampleStats[idx*6] = KC_MIN((KC_FP_TYPE)switchTime,(KC_FP_TYPE)T); sampleStats[idx*6+3] = 0; sampleStats[idx*6+4] = 0; sampleStats[idx*6+5] = 0; if(switchFound == 2) { sampleStats[idx*6+1] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ; sampleStats[idx*6+2] = 0.0; for(int ii = 0; ii < T;ii++) { if(ii<switchTime) { sampleStats[idx*6+3] += y[T1+ii]; } else { sampleStats[idx*6+4] += y[T1+ii]; } } } else { sampleStats[idx*6+2] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ; sampleStats[idx*6+1] = 0.0; for(int ii = 0; ii < T;ii++) { if(ii<switchTime) { sampleStats[idx*6+3] += y[T1+ii]; } else { sampleStats[idx*6+5] += y[T1+ii]; } } } } } /* * [SMSamples.z(:,ss) SMSamples.s(:,ss) SMSamples.spikeStats(:,:,ss)] = kcStepTimeSampler(gpu_y,gpu_trIndex,gpu_trCoh,SMSamples.alpha(:,ss-1),SMSamples.phi(:,ss-1),nbPDF,nbCDF,gpu_spe); * Inputs: * 0 = y (spikes) - one long vector of all the spike times for all trials (GPU array) * 1 = trial index - 0:end-1 are the trial start times (GPU array) * 2 = trial coherence - on GPU, coherence levels per each trial (GPU array) * 3 = alpha, firing rates per each state (MATLAB array) * 4 = phi, probability of switiching to state 3 for each coherence (MATLAB array) * 5 = nbPDF, negative binomial pdf values (up to some limit) for each of the parameters of coherences nbPDF(k,c) = P(z=k| p_c,r) (MATLAB array) * 6 = delta_t, length of each timebins * 7 = spe, spike history effect (GPU array) * * Outputs (all in MATLAB array form) * 0 = z, switching times per each trial, size (NT,1) * 1 = s, which state was switched to per each trial (either 2 or 3), size (NT,1) * 2 = spikeStats, summary statistics on how many spikes were fired per each state of the semi-markov model and how many observations per state, size (3,2) */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { //load up the GPU array inputs unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * y = kcGetArrayData(prhs[0]); unsigned int NT = kcGetArrayNumEl(prhs[1])-1; int * trIndex = kcGetArrayDataInt(prhs[1]); int * cohIndex = kcGetArrayDataInt(prhs[2],NT); KC_FP_TYPE * spe = kcGetArrayData(prhs[7]); //put the precalculated negative binomial PDF, CDF values onto the GPU const mwSize * precalcSize = mxGetDimensions(prhs[5]); int maxJump = precalcSize[0]; int NC = precalcSize[1]; //mexPrintf("Sampling SM states. Max jump = %d, NC = %d, TT = %d, NT = %d\n",maxJump,NC,TT,NT); KC_FP_TYPE * nbPDF; checkCudaErrors(hipMalloc((void**)&nbPDF,sizeof(KC_FP_TYPE)*NC*maxJump)); checkCudaErrors(hipMemcpy(nbPDF,(KC_FP_TYPE*)mxGetPr(prhs[5]),sizeof(KC_FP_TYPE)*NC*maxJump,hipMemcpyHostToDevice)); KC_FP_TYPE dt = mxGetScalar(prhs[6]); //put model parameters onto the GPU KC_FP_TYPE * alphas; checkCudaErrors(hipMalloc((void**)&alphas,sizeof(KC_FP_TYPE)*3)); checkCudaErrors(hipMemcpy(alphas,(KC_FP_TYPE*)mxGetPr(prhs[3]),sizeof(KC_FP_TYPE)*3,hipMemcpyHostToDevice)); KC_FP_TYPE * phi; checkCudaErrors(hipMalloc((void**)&phi,sizeof(KC_FP_TYPE)*NC)); checkCudaErrors(hipMemcpy(phi,(KC_FP_TYPE*)mxGetPr(prhs[4]),sizeof(KC_FP_TYPE)*NC,hipMemcpyHostToDevice)); //setup space on GPU for sampling // z,s,sampleStats // log_post2 - size(TT,1) // log_post3 - size(TT,1) KC_FP_TYPE * log_post2; KC_FP_TYPE * log_post3; checkCudaErrors(hipMalloc((void**)&log_post2,sizeof(KC_FP_TYPE)*TT)); checkCudaErrors(hipMalloc((void**)&log_post3,sizeof(KC_FP_TYPE)*TT)); KC_FP_TYPE * z; checkCudaErrors(hipMalloc((void**)&z,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * s; checkCudaErrors(hipMalloc((void**)&s,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * sampleStats; checkCudaErrors(hipMalloc((void**)&sampleStats,sizeof(KC_FP_TYPE)*6*NT)); KC_FP_TYPE * calculationSpace; checkCudaErrors(hipMalloc((void**)&calculationSpace,sizeof(KC_FP_TYPE)*maxJump*NT*2)); //setup random number generator hiprandGenerator_t curandGen = 0; hiprandStatus_t hiprandStatus_t; hiprandStatus_t = hiprandCreateGenerator(&curandGen, HIPRAND_RNG_PSEUDO_DEFAULT); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND-1 error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors sampling semi markov "); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); hiprandStatus_t = hiprandSetPseudoRandomGeneratorSeed(curandGen, mySeed); if(hiprandStatus_t != HIPRAND_STATUS_SUCCESS ) { mexPrintf("CURAND-2 error %d\n",(int)hiprandStatus_t); mexErrMsgTxt("CUDA errors sampling semi markov"); } //generate a uniform random number set (size NT*2) KC_FP_TYPE * randU; int randSize = NT+((NT%2==0)?0:1); checkCudaErrors(hipMalloc((void**)&randU,sizeof(KC_FP_TYPE)*randSize)); hiprandStatus_t = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randU,randSize); hipDeviceSynchronize(); //sample the states hipLaunchKernelGGL(( kcSampleSMStates), dim3(NT),dim3(1), 0, 0, z, s, sampleStats, y, trIndex, cohIndex, NT, alphas, phi, dt, maxJump, randU, nbPDF, calculationSpace, spe); hipDeviceSynchronize(); //combine the sample stats KC_FP_TYPE * sampleStats_local; sampleStats_local = (KC_FP_TYPE*)malloc(sizeof(KC_FP_TYPE)*6*NT); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)sampleStats_local,sampleStats,sizeof(KC_FP_TYPE)*6*NT,hipMemcpyDeviceToHost)); hipDeviceSynchronize(); plhs[2] = mxCreateNumericMatrix(3,2,KC_FP_TYPE_MATLAB,mxREAL); KC_FP_TYPE * sampleStats_sum = (KC_FP_TYPE*)mxGetPr(plhs[2]); for(int jj = 0; jj < 6; jj++) { sampleStats_sum[jj] = 0; for(int ii = 0; ii < NT; ii++) { sampleStats_sum[jj] += sampleStats_local[ii*6 + jj]; } } //move sampled values to MATLAB plhs[0] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),z,sizeof(KC_FP_TYPE)*NT,hipMemcpyDeviceToHost)); checkCudaErrors(hipMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),s,sizeof(KC_FP_TYPE)*NT,hipMemcpyDeviceToHost)); //clear out random number generator checkCudaErrors(hipDeviceSynchronize()); checkCudaErrors(hiprandDestroyGenerator(curandGen)); //clear GPU values // negative binomial distribution items checkCudaErrors(hipFree(nbPDF)); // model params checkCudaErrors(hipFree(alphas)); checkCudaErrors(hipFree(phi)); // sampler stuff checkCudaErrors(hipFree(log_post2)); checkCudaErrors(hipFree(log_post3)); checkCudaErrors(hipFree(z)); checkCudaErrors(hipFree(s)); checkCudaErrors(hipFree(sampleStats)); free(sampleStats_local); checkCudaErrors(hipFree(calculationSpace)); // random nums checkCudaErrors(hipFree(randU)); }
ea2a749cfb67353d383ffc2db7fce6fc8f4f9ec6.cu
#include <math.h> #include <stdlib.h> #include <stdio.h> #include <string.h> #include <cuda_runtime.h> #include <cusparse_v2.h> #include "cublas_v2.h" #include <curand.h> #include <helper_functions.h> #include <helper_cuda.h> #include "mex.h" #include "kcDefs.h" //see for info on anything starting with KC_ #include "kcArrayFunctions.h" /* * log_p_y * log likelihood for a poisson * y = observed count * dt = length of observation */ //__device__ KC_FP_TYPE log_p_y( KC_FP_TYPE y, KC_FP_TYPE rate, KC_FP_TYPE dt, KC_FP_TYPE sh) { // return y*(KC_LOG(rate)+KC_LOG(dt)) - dt*rate;// - KC_GAMMALN(y+1) //} __device__ KC_FP_TYPE log_p_y(KC_FP_TYPE y, KC_FP_TYPE rate, KC_FP_TYPE dt, KC_FP_TYPE sh) { KC_FP_TYPE ex = rate*KC_MAX(KC_MIN(KC_EXP(sh),KC_MAXN),KC_MINN); return y*(KC_LOG(ex)+KC_LOG(dt)) - dt*ex - KC_GAMMALN(y+1.0); } /* kcSampleSMStates * kernel runs on each trial (not timebin) * outputs: * z = jump times per each trial * s = which state jumped to * sampleStats = (3,2,NT) array, spike counts observed in each hidden state (divided up by trial) * inputs * y = spike counts * trialIndex = index for y ( first spike count for trial i is y[trialIndex[i]] and the last spike count is y[trialIndex[i+1]-1] * y is indexed at 0. This array includes final value that should be length of y) * trialCoh = coherence level for each trial (coherence controls prior jump time distribution and jump to state probability) * coherence labels/indices begin at 0 instead of 1 to be consistent with C, unlike MATLAB * NT = number of trials * alpha = (3,1) array, spike rates * phi = (numCoherences,1) jump probabilities (p(s=3) = phi, p(s=2) = 1-phi), trial coherence dependent * delta_t = length of each timebin * maxJump = the longest to calculate out possible jump time values for * randU = (NT,1) array a set of uniform random numbers on [0,1] * * nbPDF = (maxJump,numberOfCoherences) array, negative binomial pdf values (up to some limit) for each of the parameters of coherences * * jumpToProbs = (maxJump*NT,2) preallocated space to do calculations over */ __global__ void kcSampleSMStates(KC_FP_TYPE * z, KC_FP_TYPE * s, KC_FP_TYPE * sampleStats, KC_FP_TYPE * y, int * trialIndex, int * trialCoh, int NT, KC_FP_TYPE * alphas, KC_FP_TYPE * phi, KC_FP_TYPE delta_t, int maxJump, KC_FP_TYPE * randU, KC_FP_TYPE * nbPDF, KC_FP_TYPE * jumpToProbs, KC_FP_TYPE * spe) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if(idx < NT) { int T1 = trialIndex[idx]; int T = trialIndex[idx+1]-T1; //index in jumpToProbs for jumping to state 2 int jumpT1_2 = idx*(maxJump*2); //index in jumpToProbs for jumping to state 3 int jumpT1_3 = idx*(maxJump*2) + maxJump; int cohIndex = trialCoh[idx]*maxJump; KC_FP_TYPE p2 = (phi[trialCoh[idx]] < 1)?KC_LOG(1-phi[trialCoh[idx]]):0; KC_FP_TYPE p3 = KC_LOG(phi[trialCoh[idx]]); //calculate jump time probabilities for jump time happening within observed window (else model says jump happens after trial observations end) for(int ii = T-1; ii >= 0; ii--) { //taking a cumulative sum over p(y_{ii:end}|z=ii,s=2 or 3) jumpToProbs[jumpT1_2+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_2+ii+1]):(0)) + log_p_y(y[T1+ii],alphas[1],delta_t,spe[T1+ii]) ; jumpToProbs[jumpT1_3+ii] = ((ii < T-1)?(jumpToProbs[jumpT1_3+ii+1]):(0)) + log_p_y(y[T1+ii],alphas[2],delta_t,spe[T1+ii]) ; } KC_FP_TYPE initStateCumsum = 0; KC_FP_TYPE maxLog = 0; for(int ii = 0; ii < maxJump; ii++) { // p (y_{1:t}|z==ii<=T), my comments are starting indexes at 1 while the code starts at 0 if(ii < T) { KC_FP_TYPE p_y_init = log_p_y(y[T1+ii],alphas[0],delta_t,spe[T1+ii]); initStateCumsum += p_y_init; if(ii < T-1) { jumpToProbs[jumpT1_2+ii+1] += initStateCumsum; jumpToProbs[jumpT1_3+ii+1] += initStateCumsum; } } else { jumpToProbs[jumpT1_2+ii] = initStateCumsum; jumpToProbs[jumpT1_3+ii] = initStateCumsum; } jumpToProbs[jumpT1_2+ii] = jumpToProbs[jumpT1_2+ii] + nbPDF[cohIndex+ii] + p2; jumpToProbs[jumpT1_3+ii] = jumpToProbs[jumpT1_3+ii] + nbPDF[cohIndex+ii] + p3; maxLog = KC_MAX(KC_MAX(maxLog,jumpToProbs[jumpT1_2+ii]),jumpToProbs[jumpT1_3+ii]); //maxLog = jumpToProbs[jumpT1_2+ii]+jumpToProbs[jumpT1_3+ii]; } //maxLog /= (maxJump*2.0); KC_FP_TYPE maxNumToExp = 8; KC_FP_TYPE minNumToExp = 2; KC_FP_TYPE extraConst = 0; //this helps numerical stability when going from log p to p (quick and dirty method) if(maxLog > maxNumToExp) { extraConst = maxLog-maxNumToExp; } else if(maxLog < minNumToExp) { extraConst = minNumToExp-maxLog; } KC_FP_TYPE totalProbCumsum = 0; for(int ii = 0; ii < maxJump; ii++) { jumpToProbs[jumpT1_3+ii] = KC_EXP(jumpToProbs[jumpT1_3+ii] + extraConst); if(phi[trialCoh[idx]] < 1.0) { jumpToProbs[jumpT1_2+ii] = KC_EXP(jumpToProbs[jumpT1_2+ii] + extraConst); totalProbCumsum += jumpToProbs[jumpT1_3+ii] + jumpToProbs[jumpT1_2+ii]; } else { totalProbCumsum += jumpToProbs[jumpT1_3+ii]; jumpToProbs[jumpT1_2+ii] = 0.0; } } //goes back through and finds a sampling time + sample to state KC_FP_TYPE post_cdf = 0; int switchFound = -1; int switchTime = 0; KC_FP_TYPE randn = randU[idx] * totalProbCumsum; for(int ii = 0; ii < maxJump && switchFound < 1; ii++) { post_cdf += jumpToProbs[jumpT1_2+ii]; if(post_cdf > randn && phi[trialCoh[idx]] < 1) { switchFound = 2; switchTime = ii; } else { post_cdf += jumpToProbs[jumpT1_3+ii]; if(post_cdf > randn) { switchFound = 3; switchTime = ii; } } } if(switchFound <= 0) { //just to make sure it doesn't crash switchFound = (KC_LOG(randU[idx])>p3)?2:3; switchTime = 101; } s[idx] = switchFound; z[idx] = switchTime; //sum up observed spike count info sampleStats[idx*6] = KC_MIN((KC_FP_TYPE)switchTime,(KC_FP_TYPE)T); sampleStats[idx*6+3] = 0; sampleStats[idx*6+4] = 0; sampleStats[idx*6+5] = 0; if(switchFound == 2) { sampleStats[idx*6+1] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ; sampleStats[idx*6+2] = 0.0; for(int ii = 0; ii < T;ii++) { if(ii<switchTime) { sampleStats[idx*6+3] += y[T1+ii]; } else { sampleStats[idx*6+4] += y[T1+ii]; } } } else { sampleStats[idx*6+2] = ((KC_FP_TYPE)T)-sampleStats[idx*6] ; sampleStats[idx*6+1] = 0.0; for(int ii = 0; ii < T;ii++) { if(ii<switchTime) { sampleStats[idx*6+3] += y[T1+ii]; } else { sampleStats[idx*6+5] += y[T1+ii]; } } } } } /* * [SMSamples.z(:,ss) SMSamples.s(:,ss) SMSamples.spikeStats(:,:,ss)] = kcStepTimeSampler(gpu_y,gpu_trIndex,gpu_trCoh,SMSamples.alpha(:,ss-1),SMSamples.phi(:,ss-1),nbPDF,nbCDF,gpu_spe); * Inputs: * 0 = y (spikes) - one long vector of all the spike times for all trials (GPU array) * 1 = trial index - 0:end-1 are the trial start times (GPU array) * 2 = trial coherence - on GPU, coherence levels per each trial (GPU array) * 3 = alpha, firing rates per each state (MATLAB array) * 4 = phi, probability of switiching to state 3 for each coherence (MATLAB array) * 5 = nbPDF, negative binomial pdf values (up to some limit) for each of the parameters of coherences nbPDF(k,c) = P(z=k| p_c,r) (MATLAB array) * 6 = delta_t, length of each timebins * 7 = spe, spike history effect (GPU array) * * Outputs (all in MATLAB array form) * 0 = z, switching times per each trial, size (NT,1) * 1 = s, which state was switched to per each trial (either 2 or 3), size (NT,1) * 2 = spikeStats, summary statistics on how many spikes were fired per each state of the semi-markov model and how many observations per state, size (3,2) */ void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[]) { //load up the GPU array inputs unsigned int TT = kcGetArrayNumEl(prhs[0]); KC_FP_TYPE * y = kcGetArrayData(prhs[0]); unsigned int NT = kcGetArrayNumEl(prhs[1])-1; int * trIndex = kcGetArrayDataInt(prhs[1]); int * cohIndex = kcGetArrayDataInt(prhs[2],NT); KC_FP_TYPE * spe = kcGetArrayData(prhs[7]); //put the precalculated negative binomial PDF, CDF values onto the GPU const mwSize * precalcSize = mxGetDimensions(prhs[5]); int maxJump = precalcSize[0]; int NC = precalcSize[1]; //mexPrintf("Sampling SM states. Max jump = %d, NC = %d, TT = %d, NT = %d\n",maxJump,NC,TT,NT); KC_FP_TYPE * nbPDF; checkCudaErrors(cudaMalloc((void**)&nbPDF,sizeof(KC_FP_TYPE)*NC*maxJump)); checkCudaErrors(cudaMemcpy(nbPDF,(KC_FP_TYPE*)mxGetPr(prhs[5]),sizeof(KC_FP_TYPE)*NC*maxJump,cudaMemcpyHostToDevice)); KC_FP_TYPE dt = mxGetScalar(prhs[6]); //put model parameters onto the GPU KC_FP_TYPE * alphas; checkCudaErrors(cudaMalloc((void**)&alphas,sizeof(KC_FP_TYPE)*3)); checkCudaErrors(cudaMemcpy(alphas,(KC_FP_TYPE*)mxGetPr(prhs[3]),sizeof(KC_FP_TYPE)*3,cudaMemcpyHostToDevice)); KC_FP_TYPE * phi; checkCudaErrors(cudaMalloc((void**)&phi,sizeof(KC_FP_TYPE)*NC)); checkCudaErrors(cudaMemcpy(phi,(KC_FP_TYPE*)mxGetPr(prhs[4]),sizeof(KC_FP_TYPE)*NC,cudaMemcpyHostToDevice)); //setup space on GPU for sampling // z,s,sampleStats // log_post2 - size(TT,1) // log_post3 - size(TT,1) KC_FP_TYPE * log_post2; KC_FP_TYPE * log_post3; checkCudaErrors(cudaMalloc((void**)&log_post2,sizeof(KC_FP_TYPE)*TT)); checkCudaErrors(cudaMalloc((void**)&log_post3,sizeof(KC_FP_TYPE)*TT)); KC_FP_TYPE * z; checkCudaErrors(cudaMalloc((void**)&z,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * s; checkCudaErrors(cudaMalloc((void**)&s,sizeof(KC_FP_TYPE)*NT)); KC_FP_TYPE * sampleStats; checkCudaErrors(cudaMalloc((void**)&sampleStats,sizeof(KC_FP_TYPE)*6*NT)); KC_FP_TYPE * calculationSpace; checkCudaErrors(cudaMalloc((void**)&calculationSpace,sizeof(KC_FP_TYPE)*maxJump*NT*2)); //setup random number generator curandGenerator_t curandGen = 0; curandStatus_t curandStatus; curandStatus = curandCreateGenerator(&curandGen, CURAND_RNG_PSEUDO_DEFAULT); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND-1 error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors sampling semi markov "); } struct timeval now; gettimeofday(&now,NULL); unsigned long long mySeed = (unsigned long long)now.tv_usec+(unsigned long long)(1e7*(unsigned long long)now.tv_sec); curandStatus = curandSetPseudoRandomGeneratorSeed(curandGen, mySeed); if(curandStatus != CURAND_STATUS_SUCCESS ) { mexPrintf("CURAND-2 error %d\n",(int)curandStatus); mexErrMsgTxt("CUDA errors sampling semi markov"); } //generate a uniform random number set (size NT*2) KC_FP_TYPE * randU; int randSize = NT+((NT%2==0)?0:1); checkCudaErrors(cudaMalloc((void**)&randU,sizeof(KC_FP_TYPE)*randSize)); curandStatus = KC_RANDOM_UNIFORM_FUNCTION(curandGen,randU,randSize); cudaDeviceSynchronize(); //sample the states kcSampleSMStates<<<NT,1>>>(z, s, sampleStats, y, trIndex, cohIndex, NT, alphas, phi, dt, maxJump, randU, nbPDF, calculationSpace, spe); cudaDeviceSynchronize(); //combine the sample stats KC_FP_TYPE * sampleStats_local; sampleStats_local = (KC_FP_TYPE*)malloc(sizeof(KC_FP_TYPE)*6*NT); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)sampleStats_local,sampleStats,sizeof(KC_FP_TYPE)*6*NT,cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); plhs[2] = mxCreateNumericMatrix(3,2,KC_FP_TYPE_MATLAB,mxREAL); KC_FP_TYPE * sampleStats_sum = (KC_FP_TYPE*)mxGetPr(plhs[2]); for(int jj = 0; jj < 6; jj++) { sampleStats_sum[jj] = 0; for(int ii = 0; ii < NT; ii++) { sampleStats_sum[jj] += sampleStats_local[ii*6 + jj]; } } //move sampled values to MATLAB plhs[0] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); plhs[1] = mxCreateNumericMatrix(NT,1,KC_FP_TYPE_MATLAB,mxREAL); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[0]),z,sizeof(KC_FP_TYPE)*NT,cudaMemcpyDeviceToHost)); checkCudaErrors(cudaMemcpy((KC_FP_TYPE*)mxGetPr(plhs[1]),s,sizeof(KC_FP_TYPE)*NT,cudaMemcpyDeviceToHost)); //clear out random number generator checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(curandDestroyGenerator(curandGen)); //clear GPU values // negative binomial distribution items checkCudaErrors(cudaFree(nbPDF)); // model params checkCudaErrors(cudaFree(alphas)); checkCudaErrors(cudaFree(phi)); // sampler stuff checkCudaErrors(cudaFree(log_post2)); checkCudaErrors(cudaFree(log_post3)); checkCudaErrors(cudaFree(z)); checkCudaErrors(cudaFree(s)); checkCudaErrors(cudaFree(sampleStats)); free(sampleStats_local); checkCudaErrors(cudaFree(calculationSpace)); // random nums checkCudaErrors(cudaFree(randU)); }
b51e95b081ccd85de6cdbfb4ff81e06f4002d57c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void minusScalar(float* in, float* out, float minus, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] - minus; }
b51e95b081ccd85de6cdbfb4ff81e06f4002d57c.cu
#include "includes.h" using namespace std; #ifndef MAP_FILE #define MAP_FILE MAP_SHARED #endif __global__ void minusScalar(float* in, float* out, float minus, int size) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = gridDim.x * blockDim.x; for (; tid < size; tid += stride) if (tid < size) out[tid] = in[tid] - minus; }
6f43426ff3eac366262fd685daaa9d71624b43b7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<iostream> using namespace std; __global__ void hello() { printf("Hello world from device\n"); } int main() { hipLaunchKernelGGL(( hello), dim3(1), dim3(1), 0, 0, ); cout << "Hello world from host" << endl; hipDeviceSynchronize(); return 0; }
6f43426ff3eac366262fd685daaa9d71624b43b7.cu
#include<iostream> using namespace std; __global__ void hello() { printf("Hello world from device\n"); } int main() { hello<<<1, 1>>>(); cout << "Hello world from host" << endl; cudaDeviceSynchronize(); return 0; }
95d291ffc3de7a677883a50dd48bbb4ab2f5f96b.hip
// !!! This is a file automatically generated by hipify!!! #include <algorithm> #include<bits/stdc++.h> #include <hip/hip_runtime.h> using namespace std; class Node{ public: int size=0; bool leaf; int keys[9]; int values[8][20]; Node* pointers[9]; Node* next=NULL;//leaf node //Node* parent=NULL; Node(){ size=0; leaf=false; } }; Node* findpar(Node* child,Node* root){ if(root->leaf==true||root->pointers[0]->leaf==true){ return NULL; } for(int i=0;i<root->size+1;i++){ if(root->pointers[i]==child){ return root; } else if(findpar(child,root->pointers[i])!=NULL){ return findpar(child,root->pointers[i]); } } return NULL; } Node* insertinternal(Node* parent,Node* newchild, Node* root,int val){ //parent may or may not have overflow if(parent->size<7){ int itr=0; while(val>parent->keys[itr]&&itr<parent->size){ itr++; } for(int i= parent->size-1;i>=itr;i--){ parent->keys[i+1] = parent->keys[i]; } for(int i=parent->size;i>itr;i--){ parent->pointers[i+1] = parent->pointers[i]; } parent->pointers[itr+1] = newchild; parent->size++; parent->keys[itr] = val; return root; } //parent has overflow,handle this and call its parent with a new child node vector <int> temparray ; for(int i=0;i<8;i++){ temparray.push_back(parent->keys[i]); } vector <Node*> temppointers ; for(int i=0;i<=8;i++){ temppointers.push_back(parent->pointers[i]); } int itr = 0; while(val>temparray[itr]&&itr<7){ itr++; } for(int i=6;i>=itr;i--){ temparray[i+1] = temparray[i]; } for(int i=7;i>itr;i--){ temppointers[i+1] = temppointers[i]; } temppointers[itr+1] = newchild; temparray[itr] = val; //define new node and distribute keys and links Node* internalnode = new Node(); internalnode->leaf=false; internalnode->size=3; parent->size = 4; for(int i=0;i<parent->size;i++){ parent->keys[i] = temparray[i]; } for(int i=0;i<5;i++){ parent->pointers[i] = temppointers[i]; } int transfer = temparray[4]; for(int i=0;i<3;i++){ internalnode->keys[i] = temparray[5+i]; } for(int i=0;i<4;i++){ internalnode->pointers[i] = temppointers[5+i]; } if(parent==root){ Node* root2 = new Node(); root2->keys[0] = transfer; //root2->pointers[0] = ne atomicAdd(&(ptr->values[i][attr-2]),upd);w Node(); root2->pointers[0] = parent; //root2->pointers[1] = new Node(); root2->pointers[1] = internalnode; root2->size=1; root2->leaf=false; return root2; } //write condition if it is root. return insertinternal(findpar(parent,root),internalnode,root,transfer); } Node* insert(Node* root,int val,int m){ if (root==NULL){ root = new Node(); for(int i=0;i<8;i++){ for(int j=0;j<m-1;j++){ root->values[i][j] = 0; } } root->leaf=true; root->keys[0]=val; root->size=1; return root; } Node* ptr = root; Node* parent; while(ptr->leaf==false){ parent = ptr; for(int i=0;i<ptr->size;i++){ if(val<ptr->keys[i]){ ptr = ptr->pointers[i]; break; } if(i==ptr->size-1){ ptr = ptr->pointers[i+1]; break; } } } if(ptr->size<7){ int i=0; while(val>ptr->keys[i]&&i<ptr->size){ i++; } for(int itr=ptr->size-1;itr>=i;itr--){ ptr->keys[itr+1] = ptr->keys[itr]; } ptr->keys[i] = val; ptr->size++; return root; } //overflow condition Node* leaf2 = new Node(); for(int i=0;i<8;i++){ for(int j=0;j<m-1;j++){ leaf2->values[i][j]=0; } } leaf2->leaf=true; vector <int> temparray; for(int i=0;i<8;i++){ temparray.push_back(ptr->keys[i]); } int itr = 0; while(val>temparray[itr]&&itr<7){ itr++; } for(int i=6;i>=itr;i--){ temparray[i+1] = temparray[i]; } temparray[itr] = val; //cout<<itr<<endl; ptr->size = 4; leaf2->size = 4; leaf2->next = ptr->next; ptr->next = leaf2; //after inserting x,there is possibility that keys of ptr may change //so we update keys of both ptr and leaf2 for(int i=0;i<ptr->size;i++){ ptr->keys[i] = temparray[i]; } for(int i=0;i<leaf2->size;i++){ leaf2->keys[i] = temparray[ptr->size+i]; //cout<<leaf2->keys[i]<<" "; } //cout<<endl; //updates done //now overflow mightve happened at root or internal node/leaf if(ptr==root){ Node* root2 = new Node(); root2->keys[0] = leaf2->keys[0]; //root2->pointers[0] = new Node(); root2->pointers[0] = ptr; //root2->pointers[1] = new Node(); root2->pointers[1] = leaf2; root2->size=1; root2->leaf=false; return root2; } //overflow happened at some leaf node which is not the root. return insertinternal(parent,leaf2,root,leaf2->keys[0]); } bool searchtree(Node* root,int val){ if(root==NULL){ return false; } Node* ptr = root; while(ptr->leaf==false){ for(int i=0;i<ptr->size;i++){ if(val<ptr->keys[i]){ ptr = ptr->pointers[i]; break; } if(i==ptr->size-1){ ptr=ptr->pointers[i+1]; break; } } } for(int i=0;i<ptr->size;i++){ if(ptr->keys[i]==val){ cout<<ptr->keys[i]<<" "; for(int j=0;j<ptr->size;j++){ cout<<ptr->values[i][j]<<" "; } cout<<endl; return true; } } return false; } __global__ void gpusearch(int n,int m,int num,int *gpuarray,int *gpuoutput, Node* gputree){ //printf("hi"); int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < num) { int val = gpuarray[id]; Node* ptr = gputree; for(int i=0;i<m;i++){ gpuoutput[id*m+i]=-1; } //printf("%d %d",ptr->keys[0],ptr->keys[1]); while(ptr->leaf==false){ for(int i=0;i<ptr->size;i++){ if(val<ptr->keys[i]){ ptr = ptr->pointers[i]; break; } if(i==ptr->size-1){ ptr=ptr->pointers[i+1]; break; } } } for(int i=0;i<ptr->size;i++){ if(ptr->keys[i]==val){ gpuoutput[id*m] = val; //printf("%d ",val); for(int j=0;j<m-1;j++){ gpuoutput[id*m+1+j] = ptr->values[i][j]; //printf("%d ",ptr->values[i][j]); } //printf("\n"); return; } } //printf("-1\n"); gpuoutput[id*m]=-1; } } __global__ void gpurange(int n,int m,int num,int *gpuarray,Node* gputree,Node **startpoint){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<num) { int a = gpuarray[2*id]; int b = gpuarray[2*id+1]; int itr=0; Node* ptr=gputree; if(ptr==NULL){ return; } while(ptr->leaf==false){ for(int i=0;i<ptr->size;i++){ if(a<ptr->keys[i]){ ptr = ptr->pointers[i]; break; } if(i==ptr->size-1){ ptr=ptr->pointers[i+1]; break; } } } startpoint[id] = ptr->next; } } __global__ void gpuadd(int n,int m,int num,int *gpuarray,Node* gputree,Node **leafpoint){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < num) { int key = gpuarray[id*3]; int attr = gpuarray[id*3+1]; int upd = gpuarray[id*3+2]; Node* ptr = gputree; while(ptr->leaf==false){ for(int i=0;i<ptr->size;i++){ if(key<ptr->keys[i]){ ptr = ptr->pointers[i]; break; } if(i==ptr->size-1){ ptr=ptr->pointers[i+1]; break; } } } for(int i=0;i<ptr->size;i++){ if(ptr->keys[i]==key){ //ptr->values[m*i+attr-2] = upd; atomicAdd(&(ptr->values[i][attr-2]),upd); } } leafpoint[id] = ptr->next; } } __global__ void gpupaths(int n,int m,int val,int *gpuoutput,Node* gputree){ int itr=0; Node* ptr = gputree; for(int i=0;i<n;i++){ gpuoutput[i]=-1; } gpuoutput[itr]=ptr->keys[0]; itr++; while(ptr->leaf==false){ for(int i=0;i<ptr->size;i++){ if(val<ptr->keys[i]){ ptr = ptr->pointers[i]; gpuoutput[itr]=ptr->keys[0]; itr++; //v.push_back(ptr->keys[0]); break; } if(i==ptr->size-1){ ptr=ptr->pointers[i+1]; gpuoutput[itr]=ptr->keys[0]; itr++; //v.push_back(ptr->keys[0]); break; } } } //v.push_bac } Node* gpucopy(Node* ptr){ Node* temp; hipMalloc(&temp,sizeof(Node)); hipMemcpy(temp,ptr,sizeof(Node),hipMemcpyHostToDevice); return temp; } Node* copy(Node* head,int m){ Node* temp = new Node(); temp->leaf = head->leaf; temp->size=head->size; for(int i=0;i<8;i++){ temp->keys[i] = head->keys[i]; } //temp->values=head->values; if(head->leaf==true){ for(int i=0;i<8;i++){ for(int j=0;j<m-1;j++){ temp->values[i][j] = head->values[i][j]; } } temp->next = head; return gpucopy(temp); } for(int i=0;i<=head->size;i++){ temp->pointers[i] = (copy(head->pointers[i],m)); } return gpucopy(temp); } void main2 ( int n, int m, int q, int *database, int **queries ,char* outputfilename) { ofstream fout; fout.open(outputfilename); Node* tree = NULL; for(int i=0;i<n;i++){ tree = insert(tree,database[i*m],m); } //cout<<tree->keys[0]<<" "<<tree->keys[1]<<endl; Node* gputree = copy(tree,m); //copy tree into gputree. for(int i=0;i<q;i++){ if(queries[i][0]==1){ //cout<<"main2"; int num = queries[i][1]; int *gpuarray,*gpuoutput,*array; array = (int *) malloc(num*sizeof(int)); hipMalloc(&gpuoutput,(num*m)*(sizeof(int))); hipMalloc(&gpuarray,num*(sizeof(int))); for(int j=0;j<num;j++){ //cout<<queries[i][2+j]<<" "; } //cout<<endl; for(int j=0;j<num;j++){ array[j] = queries[i][2+j]; //cout<<queries[i][2+j]<<" "; //bool temp = searchtree(tree,queries[i][2+j]); } // cout<<endl; hipMemcpy(gpuarray,array,num*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( gpusearch), dim3(11),dim3(num/10 + 1), 0, 0, n,m,num,gpuarray,gpuoutput,gputree); hipDeviceSynchronize(); int *output; output = (int *) malloc((num*m)*sizeof(int)); hipMemcpy(output,gpuoutput,(num*m)*sizeof(int),hipMemcpyDeviceToHost); for(int j=0;j<num;j++){ //cout<<"a"; if(output[j*m]==-1){ fout<<"-1"<<endl; continue; } for(int k=0;k<m;k++){ fout<<output[j*m+k]<<" "; } fout<<endl; } } else if(queries[i][0]==2){ //continue; int num = queries[i][1]; int *gpuarray,*array; array = (int *) malloc((2*num)*sizeof(int)); hipMalloc(&gpuarray,(2*num)*sizeof(int)); for(int j=0;j<2*num;j++){ array[j] = queries[i][2+j]; } hipMemcpy(gpuarray,array,(2*num)*sizeof(int),hipMemcpyHostToDevice); Node **startpoint; hipMalloc(&startpoint,num*sizeof(Node*)); Node **cpustartpoint; hipLaunchKernelGGL(( gpurange), dim3(11),dim3(num/10 + 1), 0, 0, n,m,num,gpuarray,gputree,startpoint); cpustartpoint = (Node**)malloc(num*sizeof(Node*)); hipMemcpy(cpustartpoint,startpoint,num*sizeof(Node*),hipMemcpyDeviceToHost); for(int j=0;j<num;j++){ Node* ptr = cpustartpoint[j]; int a = array[2*j]; int b = array[2*j+1]; int itr=0; while(ptr!=NULL){ for(int k=0;k<ptr->size;k++){ if(ptr->keys[k]>=a&&ptr->keys[k]<=b){ itr++; fout<<ptr->keys[k]<<" "; for(int l=0;l<m-1;l++){ fout<<ptr->values[k][l]<<" "; } fout<<endl; } } ptr=ptr->next; } if(itr==0){ fout<<"-1"<<endl; } } } else if(queries[i][0]==3){ //no output reqd int num = queries[i][1]; int *gpuarray,*array; Node **leafpoint,**cpuleafpoint; hipMalloc(&leafpoint,num*sizeof(Node*)); cpuleafpoint = (Node**) malloc(num*sizeof(Node *)); array = (int *) malloc((3*num)*sizeof(int)); hipMalloc(&gpuarray,(3*num)*sizeof(int)); for(int j=0;j<3*num;j++){ array[j] = queries[i][2+j]; } hipMemcpy(gpuarray,array,(3*num)*sizeof(int),hipMemcpyHostToDevice); hipLaunchKernelGGL(( gpuadd), dim3(11),dim3(num/10 + 1), 0, 0, n,m,num,gpuarray,gputree,leafpoint); hipMemcpy(cpuleafpoint,leafpoint,num*sizeof(Node*),hipMemcpyDeviceToHost); for(int j=0;j<num;j++){ Node* ptr= cpuleafpoint[j]; int key,attr,upd; key = array[3*j]; attr = array[3*j+1]; upd = array[3*j+2]; for(int k=0;k<ptr->size;k++){ if(ptr->keys[k]==key){ ptr->values[k][attr-2] += upd; } } } } else{ int *gpuoutput; int val = queries[i][1]; hipMalloc(&gpuoutput,(n)*sizeof(int)); hipLaunchKernelGGL(( gpupaths), dim3(1),dim3(1), 0, 0, n,m,val,gpuoutput,gputree); int *output; output = (int *) malloc(n*sizeof(int)); hipMemcpy(output,gpuoutput,n*sizeof(int),hipMemcpyDeviceToHost); for(int i=0;i<n;i++){ if(output[i]==-1){ break; } fout<<output[i]<<" "; } fout<<endl; } } } int main(int argc,char **argv){ //variable declarations int n,m,q; //Input file pointer declaration FILE *inputfilepointer; //File Opening for read char *inputfilename = argv[1]; inputfilepointer = fopen( inputfilename , "r"); //Checking if file ptr is NULL if ( inputfilepointer == NULL ) { printf( "input.txt file failed to open." ); return 0; } fscanf( inputfilepointer, "%d", &n ); //scaning for number of rows fscanf( inputfilepointer, "%d", &m ); //scaning for number of columns int *database = (int *) malloc(n*m*sizeof(int)); for(int i=0;i<n;i++){ for(int j=0;j<m;j++){ fscanf( inputfilepointer, "%d", &database[i*m+j] ); } } fscanf( inputfilepointer, "%d", &q ); //scanning for number of queries int **queries = (int **) malloc(q*sizeof(int *)); for(int i=0;i<q;i++){ int typeop; fscanf( inputfilepointer, "%d", &typeop ); if(typeop==4){ //cout<<"a"<<endl; queries[i] = (int *) malloc (2*sizeof(int)); queries[i][0] = 4; fscanf( inputfilepointer, "%d", &queries[i][1]); } else if(typeop==3){ //cout<<"b"<<endl; int num; fscanf( inputfilepointer, "%d", &num ); queries[i] = (int *) malloc((2+3*num)*sizeof(int)); queries[i][0] = 3; queries[i][1] = num; for(int j=0;j<3*num;j++){ fscanf( inputfilepointer, "%d", &queries[i][2+j] ); } } else if(typeop==2){ //cout<<"c"<<endl; int num; fscanf( inputfilepointer, "%d", &num ); queries[i] = (int *) malloc((2+2*num)*sizeof(int)); queries[i][0] = 2; queries[i][1] = num; for(int j=0;j<2*num;j++){ fscanf( inputfilepointer, "%d", &queries[i][2+j] ); } } else { //cout<<"d"<<endl; int num; fscanf( inputfilepointer, "%d", &num ); queries[i] = (int *) malloc((2+num)*sizeof(int)); queries[i][0] = 1; queries[i][1] = num; for(int j=0;j<num;j++){ fscanf( inputfilepointer, "%d", &queries[i][2+j] ); } } } char *outputfilename = argv[2]; main2 ( n, m, q, database, queries, outputfilename); //cout<<"done"; fclose( inputfilepointer ); return 0; }
95d291ffc3de7a677883a50dd48bbb4ab2f5f96b.cu
#include <algorithm> #include<bits/stdc++.h> #include <cuda.h> using namespace std; class Node{ public: int size=0; bool leaf; int keys[9]; int values[8][20]; Node* pointers[9]; Node* next=NULL;//leaf node //Node* parent=NULL; Node(){ size=0; leaf=false; } }; Node* findpar(Node* child,Node* root){ if(root->leaf==true||root->pointers[0]->leaf==true){ return NULL; } for(int i=0;i<root->size+1;i++){ if(root->pointers[i]==child){ return root; } else if(findpar(child,root->pointers[i])!=NULL){ return findpar(child,root->pointers[i]); } } return NULL; } Node* insertinternal(Node* parent,Node* newchild, Node* root,int val){ //parent may or may not have overflow if(parent->size<7){ int itr=0; while(val>parent->keys[itr]&&itr<parent->size){ itr++; } for(int i= parent->size-1;i>=itr;i--){ parent->keys[i+1] = parent->keys[i]; } for(int i=parent->size;i>itr;i--){ parent->pointers[i+1] = parent->pointers[i]; } parent->pointers[itr+1] = newchild; parent->size++; parent->keys[itr] = val; return root; } //parent has overflow,handle this and call its parent with a new child node vector <int> temparray ; for(int i=0;i<8;i++){ temparray.push_back(parent->keys[i]); } vector <Node*> temppointers ; for(int i=0;i<=8;i++){ temppointers.push_back(parent->pointers[i]); } int itr = 0; while(val>temparray[itr]&&itr<7){ itr++; } for(int i=6;i>=itr;i--){ temparray[i+1] = temparray[i]; } for(int i=7;i>itr;i--){ temppointers[i+1] = temppointers[i]; } temppointers[itr+1] = newchild; temparray[itr] = val; //define new node and distribute keys and links Node* internalnode = new Node(); internalnode->leaf=false; internalnode->size=3; parent->size = 4; for(int i=0;i<parent->size;i++){ parent->keys[i] = temparray[i]; } for(int i=0;i<5;i++){ parent->pointers[i] = temppointers[i]; } int transfer = temparray[4]; for(int i=0;i<3;i++){ internalnode->keys[i] = temparray[5+i]; } for(int i=0;i<4;i++){ internalnode->pointers[i] = temppointers[5+i]; } if(parent==root){ Node* root2 = new Node(); root2->keys[0] = transfer; //root2->pointers[0] = ne atomicAdd(&(ptr->values[i][attr-2]),upd);w Node(); root2->pointers[0] = parent; //root2->pointers[1] = new Node(); root2->pointers[1] = internalnode; root2->size=1; root2->leaf=false; return root2; } //write condition if it is root. return insertinternal(findpar(parent,root),internalnode,root,transfer); } Node* insert(Node* root,int val,int m){ if (root==NULL){ root = new Node(); for(int i=0;i<8;i++){ for(int j=0;j<m-1;j++){ root->values[i][j] = 0; } } root->leaf=true; root->keys[0]=val; root->size=1; return root; } Node* ptr = root; Node* parent; while(ptr->leaf==false){ parent = ptr; for(int i=0;i<ptr->size;i++){ if(val<ptr->keys[i]){ ptr = ptr->pointers[i]; break; } if(i==ptr->size-1){ ptr = ptr->pointers[i+1]; break; } } } if(ptr->size<7){ int i=0; while(val>ptr->keys[i]&&i<ptr->size){ i++; } for(int itr=ptr->size-1;itr>=i;itr--){ ptr->keys[itr+1] = ptr->keys[itr]; } ptr->keys[i] = val; ptr->size++; return root; } //overflow condition Node* leaf2 = new Node(); for(int i=0;i<8;i++){ for(int j=0;j<m-1;j++){ leaf2->values[i][j]=0; } } leaf2->leaf=true; vector <int> temparray; for(int i=0;i<8;i++){ temparray.push_back(ptr->keys[i]); } int itr = 0; while(val>temparray[itr]&&itr<7){ itr++; } for(int i=6;i>=itr;i--){ temparray[i+1] = temparray[i]; } temparray[itr] = val; //cout<<itr<<endl; ptr->size = 4; leaf2->size = 4; leaf2->next = ptr->next; ptr->next = leaf2; //after inserting x,there is possibility that keys of ptr may change //so we update keys of both ptr and leaf2 for(int i=0;i<ptr->size;i++){ ptr->keys[i] = temparray[i]; } for(int i=0;i<leaf2->size;i++){ leaf2->keys[i] = temparray[ptr->size+i]; //cout<<leaf2->keys[i]<<" "; } //cout<<endl; //updates done //now overflow mightve happened at root or internal node/leaf if(ptr==root){ Node* root2 = new Node(); root2->keys[0] = leaf2->keys[0]; //root2->pointers[0] = new Node(); root2->pointers[0] = ptr; //root2->pointers[1] = new Node(); root2->pointers[1] = leaf2; root2->size=1; root2->leaf=false; return root2; } //overflow happened at some leaf node which is not the root. return insertinternal(parent,leaf2,root,leaf2->keys[0]); } bool searchtree(Node* root,int val){ if(root==NULL){ return false; } Node* ptr = root; while(ptr->leaf==false){ for(int i=0;i<ptr->size;i++){ if(val<ptr->keys[i]){ ptr = ptr->pointers[i]; break; } if(i==ptr->size-1){ ptr=ptr->pointers[i+1]; break; } } } for(int i=0;i<ptr->size;i++){ if(ptr->keys[i]==val){ cout<<ptr->keys[i]<<" "; for(int j=0;j<ptr->size;j++){ cout<<ptr->values[i][j]<<" "; } cout<<endl; return true; } } return false; } __global__ void gpusearch(int n,int m,int num,int *gpuarray,int *gpuoutput, Node* gputree){ //printf("hi"); int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < num) { int val = gpuarray[id]; Node* ptr = gputree; for(int i=0;i<m;i++){ gpuoutput[id*m+i]=-1; } //printf("%d %d",ptr->keys[0],ptr->keys[1]); while(ptr->leaf==false){ for(int i=0;i<ptr->size;i++){ if(val<ptr->keys[i]){ ptr = ptr->pointers[i]; break; } if(i==ptr->size-1){ ptr=ptr->pointers[i+1]; break; } } } for(int i=0;i<ptr->size;i++){ if(ptr->keys[i]==val){ gpuoutput[id*m] = val; //printf("%d ",val); for(int j=0;j<m-1;j++){ gpuoutput[id*m+1+j] = ptr->values[i][j]; //printf("%d ",ptr->values[i][j]); } //printf("\n"); return; } } //printf("-1\n"); gpuoutput[id*m]=-1; } } __global__ void gpurange(int n,int m,int num,int *gpuarray,Node* gputree,Node **startpoint){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id<num) { int a = gpuarray[2*id]; int b = gpuarray[2*id+1]; int itr=0; Node* ptr=gputree; if(ptr==NULL){ return; } while(ptr->leaf==false){ for(int i=0;i<ptr->size;i++){ if(a<ptr->keys[i]){ ptr = ptr->pointers[i]; break; } if(i==ptr->size-1){ ptr=ptr->pointers[i+1]; break; } } } startpoint[id] = ptr->next; } } __global__ void gpuadd(int n,int m,int num,int *gpuarray,Node* gputree,Node **leafpoint){ int id = blockIdx.x*blockDim.x+threadIdx.x; if(id < num) { int key = gpuarray[id*3]; int attr = gpuarray[id*3+1]; int upd = gpuarray[id*3+2]; Node* ptr = gputree; while(ptr->leaf==false){ for(int i=0;i<ptr->size;i++){ if(key<ptr->keys[i]){ ptr = ptr->pointers[i]; break; } if(i==ptr->size-1){ ptr=ptr->pointers[i+1]; break; } } } for(int i=0;i<ptr->size;i++){ if(ptr->keys[i]==key){ //ptr->values[m*i+attr-2] = upd; atomicAdd(&(ptr->values[i][attr-2]),upd); } } leafpoint[id] = ptr->next; } } __global__ void gpupaths(int n,int m,int val,int *gpuoutput,Node* gputree){ int itr=0; Node* ptr = gputree; for(int i=0;i<n;i++){ gpuoutput[i]=-1; } gpuoutput[itr]=ptr->keys[0]; itr++; while(ptr->leaf==false){ for(int i=0;i<ptr->size;i++){ if(val<ptr->keys[i]){ ptr = ptr->pointers[i]; gpuoutput[itr]=ptr->keys[0]; itr++; //v.push_back(ptr->keys[0]); break; } if(i==ptr->size-1){ ptr=ptr->pointers[i+1]; gpuoutput[itr]=ptr->keys[0]; itr++; //v.push_back(ptr->keys[0]); break; } } } //v.push_bac } Node* gpucopy(Node* ptr){ Node* temp; cudaMalloc(&temp,sizeof(Node)); cudaMemcpy(temp,ptr,sizeof(Node),cudaMemcpyHostToDevice); return temp; } Node* copy(Node* head,int m){ Node* temp = new Node(); temp->leaf = head->leaf; temp->size=head->size; for(int i=0;i<8;i++){ temp->keys[i] = head->keys[i]; } //temp->values=head->values; if(head->leaf==true){ for(int i=0;i<8;i++){ for(int j=0;j<m-1;j++){ temp->values[i][j] = head->values[i][j]; } } temp->next = head; return gpucopy(temp); } for(int i=0;i<=head->size;i++){ temp->pointers[i] = (copy(head->pointers[i],m)); } return gpucopy(temp); } void main2 ( int n, int m, int q, int *database, int **queries ,char* outputfilename) { ofstream fout; fout.open(outputfilename); Node* tree = NULL; for(int i=0;i<n;i++){ tree = insert(tree,database[i*m],m); } //cout<<tree->keys[0]<<" "<<tree->keys[1]<<endl; Node* gputree = copy(tree,m); //copy tree into gputree. for(int i=0;i<q;i++){ if(queries[i][0]==1){ //cout<<"main2"; int num = queries[i][1]; int *gpuarray,*gpuoutput,*array; array = (int *) malloc(num*sizeof(int)); cudaMalloc(&gpuoutput,(num*m)*(sizeof(int))); cudaMalloc(&gpuarray,num*(sizeof(int))); for(int j=0;j<num;j++){ //cout<<queries[i][2+j]<<" "; } //cout<<endl; for(int j=0;j<num;j++){ array[j] = queries[i][2+j]; //cout<<queries[i][2+j]<<" "; //bool temp = searchtree(tree,queries[i][2+j]); } // cout<<endl; cudaMemcpy(gpuarray,array,num*sizeof(int),cudaMemcpyHostToDevice); gpusearch<<<11,num/10 + 1>>>(n,m,num,gpuarray,gpuoutput,gputree); cudaDeviceSynchronize(); int *output; output = (int *) malloc((num*m)*sizeof(int)); cudaMemcpy(output,gpuoutput,(num*m)*sizeof(int),cudaMemcpyDeviceToHost); for(int j=0;j<num;j++){ //cout<<"a"; if(output[j*m]==-1){ fout<<"-1"<<endl; continue; } for(int k=0;k<m;k++){ fout<<output[j*m+k]<<" "; } fout<<endl; } } else if(queries[i][0]==2){ //continue; int num = queries[i][1]; int *gpuarray,*array; array = (int *) malloc((2*num)*sizeof(int)); cudaMalloc(&gpuarray,(2*num)*sizeof(int)); for(int j=0;j<2*num;j++){ array[j] = queries[i][2+j]; } cudaMemcpy(gpuarray,array,(2*num)*sizeof(int),cudaMemcpyHostToDevice); Node **startpoint; cudaMalloc(&startpoint,num*sizeof(Node*)); Node **cpustartpoint; gpurange<<<11,num/10 + 1>>>(n,m,num,gpuarray,gputree,startpoint); cpustartpoint = (Node**)malloc(num*sizeof(Node*)); cudaMemcpy(cpustartpoint,startpoint,num*sizeof(Node*),cudaMemcpyDeviceToHost); for(int j=0;j<num;j++){ Node* ptr = cpustartpoint[j]; int a = array[2*j]; int b = array[2*j+1]; int itr=0; while(ptr!=NULL){ for(int k=0;k<ptr->size;k++){ if(ptr->keys[k]>=a&&ptr->keys[k]<=b){ itr++; fout<<ptr->keys[k]<<" "; for(int l=0;l<m-1;l++){ fout<<ptr->values[k][l]<<" "; } fout<<endl; } } ptr=ptr->next; } if(itr==0){ fout<<"-1"<<endl; } } } else if(queries[i][0]==3){ //no output reqd int num = queries[i][1]; int *gpuarray,*array; Node **leafpoint,**cpuleafpoint; cudaMalloc(&leafpoint,num*sizeof(Node*)); cpuleafpoint = (Node**) malloc(num*sizeof(Node *)); array = (int *) malloc((3*num)*sizeof(int)); cudaMalloc(&gpuarray,(3*num)*sizeof(int)); for(int j=0;j<3*num;j++){ array[j] = queries[i][2+j]; } cudaMemcpy(gpuarray,array,(3*num)*sizeof(int),cudaMemcpyHostToDevice); gpuadd<<<11,num/10 + 1>>>(n,m,num,gpuarray,gputree,leafpoint); cudaMemcpy(cpuleafpoint,leafpoint,num*sizeof(Node*),cudaMemcpyDeviceToHost); for(int j=0;j<num;j++){ Node* ptr= cpuleafpoint[j]; int key,attr,upd; key = array[3*j]; attr = array[3*j+1]; upd = array[3*j+2]; for(int k=0;k<ptr->size;k++){ if(ptr->keys[k]==key){ ptr->values[k][attr-2] += upd; } } } } else{ int *gpuoutput; int val = queries[i][1]; cudaMalloc(&gpuoutput,(n)*sizeof(int)); gpupaths<<<1,1>>>(n,m,val,gpuoutput,gputree); int *output; output = (int *) malloc(n*sizeof(int)); cudaMemcpy(output,gpuoutput,n*sizeof(int),cudaMemcpyDeviceToHost); for(int i=0;i<n;i++){ if(output[i]==-1){ break; } fout<<output[i]<<" "; } fout<<endl; } } } int main(int argc,char **argv){ //variable declarations int n,m,q; //Input file pointer declaration FILE *inputfilepointer; //File Opening for read char *inputfilename = argv[1]; inputfilepointer = fopen( inputfilename , "r"); //Checking if file ptr is NULL if ( inputfilepointer == NULL ) { printf( "input.txt file failed to open." ); return 0; } fscanf( inputfilepointer, "%d", &n ); //scaning for number of rows fscanf( inputfilepointer, "%d", &m ); //scaning for number of columns int *database = (int *) malloc(n*m*sizeof(int)); for(int i=0;i<n;i++){ for(int j=0;j<m;j++){ fscanf( inputfilepointer, "%d", &database[i*m+j] ); } } fscanf( inputfilepointer, "%d", &q ); //scanning for number of queries int **queries = (int **) malloc(q*sizeof(int *)); for(int i=0;i<q;i++){ int typeop; fscanf( inputfilepointer, "%d", &typeop ); if(typeop==4){ //cout<<"a"<<endl; queries[i] = (int *) malloc (2*sizeof(int)); queries[i][0] = 4; fscanf( inputfilepointer, "%d", &queries[i][1]); } else if(typeop==3){ //cout<<"b"<<endl; int num; fscanf( inputfilepointer, "%d", &num ); queries[i] = (int *) malloc((2+3*num)*sizeof(int)); queries[i][0] = 3; queries[i][1] = num; for(int j=0;j<3*num;j++){ fscanf( inputfilepointer, "%d", &queries[i][2+j] ); } } else if(typeop==2){ //cout<<"c"<<endl; int num; fscanf( inputfilepointer, "%d", &num ); queries[i] = (int *) malloc((2+2*num)*sizeof(int)); queries[i][0] = 2; queries[i][1] = num; for(int j=0;j<2*num;j++){ fscanf( inputfilepointer, "%d", &queries[i][2+j] ); } } else { //cout<<"d"<<endl; int num; fscanf( inputfilepointer, "%d", &num ); queries[i] = (int *) malloc((2+num)*sizeof(int)); queries[i][0] = 1; queries[i][1] = num; for(int j=0;j<num;j++){ fscanf( inputfilepointer, "%d", &queries[i][2+j] ); } } } char *outputfilename = argv[2]; main2 ( n, m, q, database, queries, outputfilename); //cout<<"done"; fclose( inputfilepointer ); return 0; }
5eb3ddc42d74919be26027d2ec871a3411e6f63f.hip
// !!! This is a file automatically generated by hipify!!! #include "debug_macros.h" #include <assert.h> using namespace std; namespace popart { static bool cuda_only_sync_calls = false; void pop_cuda_only_sync_calls( bool on ) { cuda_only_sync_calls = on; } void pop_cuda_checkerror_ifsync( const char* file, size_t line ) { if( not cuda_only_sync_calls ) return; hipDeviceSynchronize(); hipError_t err = hipGetLastError( ); if( err != hipSuccess ) { std::cerr << __FILE__ << ":" << __LINE__ << std::endl << " called from " << file << ":" << line << std::endl << " hipGetLastError failed: " << hipGetErrorString(err) << std::endl; exit( -__LINE__ ); } } void pop_info_gridsize( bool silent, dim3& grid,dim3& block, const string& kernel, const char* file, size_t line ) { if( silent ) return; // std::cerr << __FILE__ << ":" << __LINE__ << std::endl // << " called from " << file << ":" << line << std::endl; std::cerr << " " << kernel << " started with " << grid.x*grid.y*grid.z*block.x*block.y*block.z << " threads ("; if( grid.z == 1 && grid.y == 1 ) std::cerr << grid.x; else if( grid.z == 1 ) std::cerr << "{" << grid.x << "," << grid.y << ")"; else std::cerr << "{" << grid.x << "," << grid.y << "," << grid.z << ")"; std::cerr << " blocks a "; if( block.z == 1 && block.y == 1 ) std::cerr << block.x; else if( block.z == 1 ) std::cerr << "{" << block.x << "," << block.y << ")"; else std::cerr << "{" << block.x << "," << block.y << "," << block.z << ")"; std::cerr << " threads)" << endl; } void pop_stream_synchronize( hipStream_t stream, const char* file, size_t line ) { hipError_t err = hipStreamSynchronize( stream ); if( err != hipSuccess ) { std::cerr << __FILE__ << ":" << __LINE__ << std::endl << " called from " << file << ":" << line << std::endl << " hipStreamSynchronize failed: " << hipGetErrorString(err) << std::endl; exit( -__LINE__ ); } } void pop_check_last_error( const char* file, size_t line ) { hipError_t err = hipGetLastError( ); if( err != hipSuccess ) { std::cerr << __FILE__ << ":" << __LINE__ << std::endl << " called from " << file << ":" << line << std::endl << " hipGetLastError failed: " << hipGetErrorString(err) << std::endl; exit( -__LINE__ ); } } void pop_sync_and_check_last_error( const char* file, size_t line ) { hipDeviceSynchronize(); hipError_t err = hipGetLastError( ); if( err != hipSuccess ) { std::cerr << __FILE__ << ":" << __LINE__ << std::endl << " called from " << file << ":" << line << std::endl << " hipGetLastError failed: " << hipGetErrorString(err) << std::endl; exit( -__LINE__ ); } } void pop_cuda_malloc( void** ptr, uint32_t byte_size, const char* file, uint32_t line ) { hipError_t err; err = hipMalloc( ptr, byte_size ); POP_CUDA_FATAL_TEST_FL( err, "hipMalloc failed to allocate device memory: ", file, line ); #ifndef NDEBUG pop_cuda_memset_sync( *ptr, 255, byte_size, file, line ); #endif // NDEBUG } void pop_cuda_malloc_pitch( void** ptr, size_t* byte_pitch, uint32_t byte_width, uint32_t byte_height, const char* file, uint32_t line ) { hipError_t err; err = hipMallocPitch( ptr, byte_pitch, byte_width, byte_height ); POP_CUDA_FATAL_TEST_FL( err, "hipMallocPitch failed to allocate device memory: ", file, line ); #ifndef NDEBUG pop_cuda_memset_sync( *ptr, 255, (*byte_pitch)*byte_height, file, line ); #endif // NDEBUG } void pop_cuda_free( void* ptr, const char* file, uint32_t line ) { hipError_t err; err = hipFree( ptr ); POP_CUDA_FATAL_TEST_FL( err, "hipFree failed to release device memory: ", file, line ); } void pop_cuda_free_host( void* ptr, const char* file, uint32_t line ) { hipError_t err; err = hipHostFree( ptr ); POP_CUDA_FATAL_TEST_FL( err, "hipFree failed to release device memory: ", file, line ); } /************************************************************* * Group: memcpy *************************************************************/ namespace cuda { namespace toHost { void memcpy( void* dst, const void* src, size_t sz, const char* file, size_t line ) { POP_CHECK_NON_NULL_FL( dst, "Dest ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( src, "Source ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( sz, "Size in memcpy async is null.", file, line ); hipError_t err; err = hipMemcpy( dst, src, sz, hipMemcpyDeviceToHost ); if( err != hipSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy device-to-host: " << hipGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy device-to-host: " ); } void memcpy( void* dst, const void* src, size_t sz, hipStream_t stream, const char* file, size_t line ) { POP_CHECK_NON_NULL_FL( dst, "Dest ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( src, "Source ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( sz, "Size in memcpy async is null.", file, line ); hipError_t err; err = hipMemcpyAsync( dst, src, sz, hipMemcpyDeviceToHost, stream ); if( err != hipSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy device-to-host: " << hipGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy device-to-host: " ); } void memcpy2D( void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, const char* file, size_t line ) { hipError_t err; err = hipMemcpy2D( dst, dpitch, src, spitch, width, height, hipMemcpyDeviceToHost, stream ); if( err != hipSuccess ) { cerr << file << ":" << line << endl << " hipMemcpy2DAsync failed to copy device-to-host: " << hipGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } void memcpy2D( void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, hipStream_t stream, const char* file, size_t line ) { hipError_t err; err = hipMemcpy2DAsync( dst, dpitch, src, spitch, width, height, hipMemcpyDeviceToHost, stream ); if( err != hipSuccess ) { cerr << file << ":" << line << endl << " hipMemcpy2DAsync failed to copy device-to-host: " << hipGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } } // namespace toHost } // namespace cuda namespace cuda { namespace toDev { void memcpy( void* dst, const void* src, size_t sz, const char* file, size_t line ) { POP_CHECK_NON_NULL_FL( dst, "Dest ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( src, "Source ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( sz, "Size in memcpy async is null.", file, line ); hipError_t err; err = hipMemcpy( dst, src, sz, hipMemcpyHostToDevice ); if( err != hipSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy host-to-device: " << hipGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } void memcpy( void* dst, const void* src, size_t sz, hipStream_t stream, const char* file, size_t line ) { POP_CHECK_NON_NULL_FL( dst, "Dest ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( src, "Source ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( sz, "Size in memcpy async is null.", file, line ); hipError_t err; err = hipMemcpyAsync( dst, src, sz, hipMemcpyHostToDevice, stream ); if( err != hipSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy host-to-device: " << hipGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } void memcpy2D( void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, const char* file, size_t line ) { hipError_t err; err = hipMemcpy2D( dst, dpitch, src, spitch, width, height, hipMemcpyHostToDevice, stream ); if( err != hipSuccess ) { cerr << file << ":" << line << endl << " hipMemcpy2DAsync failed to copy host-to-device: " << hipGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } void memcpy2D( void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, hipStream_t stream, const char* file, size_t line ) { hipError_t err; err = hipMemcpy2DAsync( dst, dpitch, src, spitch, width, height, hipMemcpyHostToDevice, stream ); if( err != hipSuccess ) { cerr << file << ":" << line << endl << " hipMemcpy2DAsync failed to copy host-to-device: " << hipGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } } // namespace toDev } // namespace cuda void pop_cuda_memcpy_to_symbol_async( const void* symbol, const void* src, size_t sz, size_t offset, hipMemcpyKind type, hipStream_t stream, const char* file, size_t line ) { POP_CHECK_NON_NULL( src, "Source ptr in memcpy async is null." ); POP_CHECK_NON_NULL( sz, "Size in memcpy async is null." ); hipError_t err; err = hipMemcpyToSymbolAsync( symbol, src, sz, offset, type, stream ); if( err != hipSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy to symbol " << (type==hipMemcpyHostToDevice?"host-to-device":"device-to-host") << ": "; cerr << hipGetErrorString(err) << endl; cerr << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)symbol << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } void pop_cuda_memcpy_to_symbol_sync( const void* symbol, const void* src, size_t sz, size_t offset, hipMemcpyKind type, const char* file, size_t line ) { POP_CHECK_NON_NULL( src, "Source ptr in memcpy async is null." ); POP_CHECK_NON_NULL( sz, "Size in memcpy async is null." ); hipError_t err; err = hipMemcpyToSymbol( symbol, src, sz, offset, type ); if( err != hipSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy to symbol " << (type==hipMemcpyHostToDevice?"host-to-device":"device-to-host") << ": "; cerr << hipGetErrorString(err) << endl; cerr << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)symbol << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } /************************************************************* * Group: memset *************************************************************/ void pop_cuda_memset_async( void* ptr, int value, size_t bytes, hipStream_t stream, const char* file, size_t line ) { if( cuda_only_sync_calls ) { pop_cuda_memset_sync( ptr, value, bytes, file, line ); return; } hipError_t err; err = hipMemsetAsync( ptr, value, bytes, stream ); POP_CUDA_FATAL_TEST_FL( err, "hipMemsetAsync failed: ", file, line ); } void pop_cuda_memset_sync( void* ptr, int value, size_t bytes, const char* file, size_t line ) { hipError_t err; err = hipMemset( ptr, value, bytes ); POP_CUDA_FATAL_TEST_FL( err, "hipMemset failed: ", file, line ); } void pop_cuda_stream_create( hipStream_t* stream, const char* file, uint32_t line ) { hipError_t err; err = hipStreamCreate( stream ); POP_CUDA_FATAL_TEST_FL( err, "hipStreamCreate failed: ", file, line ); } void pop_cuda_stream_destroy( hipStream_t stream, const char* file, uint32_t line ) { hipError_t err; err = hipStreamDestroy( stream ); POP_CUDA_FATAL_TEST_FL( err, "hipStreamDestroy failed: ", file, line ); } }; // namespace popart
5eb3ddc42d74919be26027d2ec871a3411e6f63f.cu
#include "debug_macros.h" #include <assert.h> using namespace std; namespace popart { static bool cuda_only_sync_calls = false; void pop_cuda_only_sync_calls( bool on ) { cuda_only_sync_calls = on; } void pop_cuda_checkerror_ifsync( const char* file, size_t line ) { if( not cuda_only_sync_calls ) return; cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError( ); if( err != cudaSuccess ) { std::cerr << __FILE__ << ":" << __LINE__ << std::endl << " called from " << file << ":" << line << std::endl << " cudaGetLastError failed: " << cudaGetErrorString(err) << std::endl; exit( -__LINE__ ); } } void pop_info_gridsize( bool silent, dim3& grid,dim3& block, const string& kernel, const char* file, size_t line ) { if( silent ) return; // std::cerr << __FILE__ << ":" << __LINE__ << std::endl // << " called from " << file << ":" << line << std::endl; std::cerr << " " << kernel << " started with " << grid.x*grid.y*grid.z*block.x*block.y*block.z << " threads ("; if( grid.z == 1 && grid.y == 1 ) std::cerr << grid.x; else if( grid.z == 1 ) std::cerr << "{" << grid.x << "," << grid.y << ")"; else std::cerr << "{" << grid.x << "," << grid.y << "," << grid.z << ")"; std::cerr << " blocks a "; if( block.z == 1 && block.y == 1 ) std::cerr << block.x; else if( block.z == 1 ) std::cerr << "{" << block.x << "," << block.y << ")"; else std::cerr << "{" << block.x << "," << block.y << "," << block.z << ")"; std::cerr << " threads)" << endl; } void pop_stream_synchronize( cudaStream_t stream, const char* file, size_t line ) { cudaError_t err = cudaStreamSynchronize( stream ); if( err != cudaSuccess ) { std::cerr << __FILE__ << ":" << __LINE__ << std::endl << " called from " << file << ":" << line << std::endl << " cudaStreamSynchronize failed: " << cudaGetErrorString(err) << std::endl; exit( -__LINE__ ); } } void pop_check_last_error( const char* file, size_t line ) { cudaError_t err = cudaGetLastError( ); if( err != cudaSuccess ) { std::cerr << __FILE__ << ":" << __LINE__ << std::endl << " called from " << file << ":" << line << std::endl << " cudaGetLastError failed: " << cudaGetErrorString(err) << std::endl; exit( -__LINE__ ); } } void pop_sync_and_check_last_error( const char* file, size_t line ) { cudaDeviceSynchronize(); cudaError_t err = cudaGetLastError( ); if( err != cudaSuccess ) { std::cerr << __FILE__ << ":" << __LINE__ << std::endl << " called from " << file << ":" << line << std::endl << " cudaGetLastError failed: " << cudaGetErrorString(err) << std::endl; exit( -__LINE__ ); } } void pop_cuda_malloc( void** ptr, uint32_t byte_size, const char* file, uint32_t line ) { cudaError_t err; err = cudaMalloc( ptr, byte_size ); POP_CUDA_FATAL_TEST_FL( err, "cudaMalloc failed to allocate device memory: ", file, line ); #ifndef NDEBUG pop_cuda_memset_sync( *ptr, 255, byte_size, file, line ); #endif // NDEBUG } void pop_cuda_malloc_pitch( void** ptr, size_t* byte_pitch, uint32_t byte_width, uint32_t byte_height, const char* file, uint32_t line ) { cudaError_t err; err = cudaMallocPitch( ptr, byte_pitch, byte_width, byte_height ); POP_CUDA_FATAL_TEST_FL( err, "cudaMallocPitch failed to allocate device memory: ", file, line ); #ifndef NDEBUG pop_cuda_memset_sync( *ptr, 255, (*byte_pitch)*byte_height, file, line ); #endif // NDEBUG } void pop_cuda_free( void* ptr, const char* file, uint32_t line ) { cudaError_t err; err = cudaFree( ptr ); POP_CUDA_FATAL_TEST_FL( err, "cudaFree failed to release device memory: ", file, line ); } void pop_cuda_free_host( void* ptr, const char* file, uint32_t line ) { cudaError_t err; err = cudaFreeHost( ptr ); POP_CUDA_FATAL_TEST_FL( err, "cudaFree failed to release device memory: ", file, line ); } /************************************************************* * Group: memcpy *************************************************************/ namespace cuda { namespace toHost { void memcpy( void* dst, const void* src, size_t sz, const char* file, size_t line ) { POP_CHECK_NON_NULL_FL( dst, "Dest ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( src, "Source ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( sz, "Size in memcpy async is null.", file, line ); cudaError_t err; err = cudaMemcpy( dst, src, sz, cudaMemcpyDeviceToHost ); if( err != cudaSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy device-to-host: " << cudaGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy device-to-host: " ); } void memcpy( void* dst, const void* src, size_t sz, cudaStream_t stream, const char* file, size_t line ) { POP_CHECK_NON_NULL_FL( dst, "Dest ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( src, "Source ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( sz, "Size in memcpy async is null.", file, line ); cudaError_t err; err = cudaMemcpyAsync( dst, src, sz, cudaMemcpyDeviceToHost, stream ); if( err != cudaSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy device-to-host: " << cudaGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy device-to-host: " ); } void memcpy2D( void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, const char* file, size_t line ) { cudaError_t err; err = cudaMemcpy2D( dst, dpitch, src, spitch, width, height, cudaMemcpyDeviceToHost, stream ); if( err != cudaSuccess ) { cerr << file << ":" << line << endl << " cudaMemcpy2DAsync failed to copy device-to-host: " << cudaGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } void memcpy2D( void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream, const char* file, size_t line ) { cudaError_t err; err = cudaMemcpy2DAsync( dst, dpitch, src, spitch, width, height, cudaMemcpyDeviceToHost, stream ); if( err != cudaSuccess ) { cerr << file << ":" << line << endl << " cudaMemcpy2DAsync failed to copy device-to-host: " << cudaGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } } // namespace toHost } // namespace cuda namespace cuda { namespace toDev { void memcpy( void* dst, const void* src, size_t sz, const char* file, size_t line ) { POP_CHECK_NON_NULL_FL( dst, "Dest ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( src, "Source ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( sz, "Size in memcpy async is null.", file, line ); cudaError_t err; err = cudaMemcpy( dst, src, sz, cudaMemcpyHostToDevice ); if( err != cudaSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy host-to-device: " << cudaGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } void memcpy( void* dst, const void* src, size_t sz, cudaStream_t stream, const char* file, size_t line ) { POP_CHECK_NON_NULL_FL( dst, "Dest ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( src, "Source ptr in memcpy async is null.", file, line ); POP_CHECK_NON_NULL_FL( sz, "Size in memcpy async is null.", file, line ); cudaError_t err; err = cudaMemcpyAsync( dst, src, sz, cudaMemcpyHostToDevice, stream ); if( err != cudaSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy host-to-device: " << cudaGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } void memcpy2D( void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, const char* file, size_t line ) { cudaError_t err; err = cudaMemcpy2D( dst, dpitch, src, spitch, width, height, cudaMemcpyHostToDevice, stream ); if( err != cudaSuccess ) { cerr << file << ":" << line << endl << " cudaMemcpy2DAsync failed to copy host-to-device: " << cudaGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } void memcpy2D( void* dst, size_t dpitch, const void* src, size_t spitch, size_t width, size_t height, cudaStream_t stream, const char* file, size_t line ) { cudaError_t err; err = cudaMemcpy2DAsync( dst, dpitch, src, spitch, width, height, cudaMemcpyHostToDevice, stream ); if( err != cudaSuccess ) { cerr << file << ":" << line << endl << " cudaMemcpy2DAsync failed to copy host-to-device: " << cudaGetErrorString(err) << endl << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)dst << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } } // namespace toDev } // namespace cuda void pop_cuda_memcpy_to_symbol_async( const void* symbol, const void* src, size_t sz, size_t offset, cudaMemcpyKind type, cudaStream_t stream, const char* file, size_t line ) { POP_CHECK_NON_NULL( src, "Source ptr in memcpy async is null." ); POP_CHECK_NON_NULL( sz, "Size in memcpy async is null." ); cudaError_t err; err = cudaMemcpyToSymbolAsync( symbol, src, sz, offset, type, stream ); if( err != cudaSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy to symbol " << (type==cudaMemcpyHostToDevice?"host-to-device":"device-to-host") << ": "; cerr << cudaGetErrorString(err) << endl; cerr << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)symbol << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } void pop_cuda_memcpy_to_symbol_sync( const void* symbol, const void* src, size_t sz, size_t offset, cudaMemcpyKind type, const char* file, size_t line ) { POP_CHECK_NON_NULL( src, "Source ptr in memcpy async is null." ); POP_CHECK_NON_NULL( sz, "Size in memcpy async is null." ); cudaError_t err; err = cudaMemcpyToSymbol( symbol, src, sz, offset, type ); if( err != cudaSuccess ) { cerr << file << ":" << line << endl << " " << "Failed to copy to symbol " << (type==cudaMemcpyHostToDevice?"host-to-device":"device-to-host") << ": "; cerr << cudaGetErrorString(err) << endl; cerr << " src ptr=" << hex << (size_t)src << dec << endl << " dst ptr=" << hex << (size_t)symbol << dec << endl; exit( -__LINE__ ); } POP_CUDA_FATAL_TEST( err, "Failed to copy host-to-device: " ); } /************************************************************* * Group: memset *************************************************************/ void pop_cuda_memset_async( void* ptr, int value, size_t bytes, cudaStream_t stream, const char* file, size_t line ) { if( cuda_only_sync_calls ) { pop_cuda_memset_sync( ptr, value, bytes, file, line ); return; } cudaError_t err; err = cudaMemsetAsync( ptr, value, bytes, stream ); POP_CUDA_FATAL_TEST_FL( err, "cudaMemsetAsync failed: ", file, line ); } void pop_cuda_memset_sync( void* ptr, int value, size_t bytes, const char* file, size_t line ) { cudaError_t err; err = cudaMemset( ptr, value, bytes ); POP_CUDA_FATAL_TEST_FL( err, "cudaMemset failed: ", file, line ); } void pop_cuda_stream_create( cudaStream_t* stream, const char* file, uint32_t line ) { cudaError_t err; err = cudaStreamCreate( stream ); POP_CUDA_FATAL_TEST_FL( err, "cudaStreamCreate failed: ", file, line ); } void pop_cuda_stream_destroy( cudaStream_t stream, const char* file, uint32_t line ) { cudaError_t err; err = cudaStreamDestroy( stream ); POP_CUDA_FATAL_TEST_FL( err, "cudaStreamDestroy failed: ", file, line ); } }; // namespace popart
9487b59b629c436116618ca93153f6326152ce92.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. hipError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != hipSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = hipDeviceReset(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. hipError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; hipError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = hipMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } cudaStatus = hipMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = hipMemcpy(dev_a, a, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } cudaStatus = hipMemcpy(dev_b, b, size * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. hipLaunchKernelGGL(( addKernel), dim3(1), dim3(size), 0, 0, dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", hipGetErrorString(cudaStatus)); goto Error; } // hipDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = hipDeviceSynchronize(); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = hipMemcpy(c, dev_c, size * sizeof(int), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); goto Error; } Error: hipFree(dev_c); hipFree(dev_a); hipFree(dev_b); return cudaStatus; }
9487b59b629c436116618ca93153f6326152ce92.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size); __global__ void addKernel(int *c, const int *a, const int *b) { int i = threadIdx.x; c[i] = a[i] + b[i]; } int main() { const int arraySize = 5; const int a[arraySize] = { 1, 2, 3, 4, 5 }; const int b[arraySize] = { 10, 20, 30, 40, 50 }; int c[arraySize] = { 0 }; // Add vectors in parallel. cudaError_t cudaStatus = addWithCuda(c, a, b, arraySize); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addWithCuda failed!"); return 1; } printf("{1,2,3,4,5} + {10,20,30,40,50} = {%d,%d,%d,%d,%d}\n", c[0], c[1], c[2], c[3], c[4]); // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaStatus = cudaDeviceReset(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceReset failed!"); return 1; } return 0; } // Helper function for using CUDA to add vectors in parallel. cudaError_t addWithCuda(int *c, const int *a, const int *b, unsigned int size) { int *dev_a = 0; int *dev_b = 0; int *dev_c = 0; cudaError_t cudaStatus; // Choose which GPU to run on, change this on a multi-GPU system. cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaSetDevice failed! Do you have a CUDA-capable GPU installed?"); goto Error; } // Allocate GPU buffers for three vectors (two input, one output) . cudaStatus = cudaMalloc((void**)&dev_c, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_a, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } cudaStatus = cudaMalloc((void**)&dev_b, size * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); goto Error; } // Copy input vectors from host memory to GPU buffers. cudaStatus = cudaMemcpy(dev_a, a, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } cudaStatus = cudaMemcpy(dev_b, b, size * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } // Launch a kernel on the GPU with one thread for each element. addKernel<<<1, size>>>(dev_c, dev_a, dev_b); // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "addKernel launch failed: %s\n", cudaGetErrorString(cudaStatus)); goto Error; } // cudaDeviceSynchronize waits for the kernel to finish, and returns // any errors encountered during the launch. cudaStatus = cudaDeviceSynchronize(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaDeviceSynchronize returned error code %d after launching addKernel!\n", cudaStatus); goto Error; } // Copy output vector from GPU buffer to host memory. cudaStatus = cudaMemcpy(c, dev_c, size * sizeof(int), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); goto Error; } Error: cudaFree(dev_c); cudaFree(dev_a); cudaFree(dev_b); return cudaStatus; }
311ae94fdccdbdc121dfc131daf3ea155b5d59f6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_t1; int xdim0_update_halo_kernel1_t1_h = -1; int ydim0_update_halo_kernel1_t1_h = -1; __constant__ int xdim1_update_halo_kernel1_t1; int xdim1_update_halo_kernel1_t1_h = -1; int ydim1_update_halo_kernel1_t1_h = -1; __constant__ int xdim2_update_halo_kernel1_t1; int xdim2_update_halo_kernel1_t1_h = -1; int ydim2_update_halo_kernel1_t1_h = -1; __constant__ int xdim3_update_halo_kernel1_t1; int xdim3_update_halo_kernel1_t1_h = -1; int ydim3_update_halo_kernel1_t1_h = -1; __constant__ int xdim4_update_halo_kernel1_t1; int xdim4_update_halo_kernel1_t1_h = -1; int ydim4_update_halo_kernel1_t1_h = -1; __constant__ int xdim5_update_halo_kernel1_t1; int xdim5_update_halo_kernel1_t1_h = -1; int ydim5_update_halo_kernel1_t1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC0(x,y) (x+xdim0_update_halo_kernel1_t1*(y)) #define OPS_ACC1(x,y) (x+xdim1_update_halo_kernel1_t1*(y)) #define OPS_ACC2(x,y) (x+xdim2_update_halo_kernel1_t1*(y)) #define OPS_ACC3(x,y) (x+xdim3_update_halo_kernel1_t1*(y)) #define OPS_ACC4(x,y) (x+xdim4_update_halo_kernel1_t1*(y)) #define OPS_ACC5(x,y) (x+xdim5_update_halo_kernel1_t1*(y)) //user function __device__ inline void update_halo_kernel1_t1_gpu(double *density0, double *energy0, double *energy1, double *u, double *p, double *sd , const int* fields) { if(fields[FIELD_DENSITY] == 1) density0[OPS_ACC0(0,0)] = density0[OPS_ACC0(0,-1)]; if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC1(0,0)] = energy0[OPS_ACC1(0,-1)]; if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC2(0,0)] = energy1[OPS_ACC2(0,-1)]; if(fields[FIELD_U] == 1) u[OPS_ACC3(0,0)] = u[OPS_ACC3(0,-1)]; if(fields[FIELD_P] == 1) p[OPS_ACC4(0,0)] = p[OPS_ACC4(0,-1)]; if(fields[FIELD_SD] == 1) sd[OPS_ACC5(0,0)] = sd[OPS_ACC5(0,-1)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_update_halo_kernel1_t1( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, const int* __restrict arg6, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_t1; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_t1; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_t1; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_t1; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_t1; arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_t1; if (idx_x < size0 && idx_y < size1) { update_halo_kernel1_t1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6); } } // host stub function void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { //Timing double t1,t2,c1,c2; ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args,7,range,52)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(52,"update_halo_kernel1_t1"); OPS_kernels[52].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; if (xdim0 != xdim0_update_halo_kernel1_t1_h || xdim1 != xdim1_update_halo_kernel1_t1_h || xdim2 != xdim2_update_halo_kernel1_t1_h || xdim3 != xdim3_update_halo_kernel1_t1_h || xdim4 != xdim4_update_halo_kernel1_t1_h || xdim5 != xdim5_update_halo_kernel1_t1_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int) ); xdim0_update_halo_kernel1_t1_h = xdim0; hipMemcpyToSymbol( xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int) ); xdim1_update_halo_kernel1_t1_h = xdim1; hipMemcpyToSymbol( xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int) ); xdim2_update_halo_kernel1_t1_h = xdim2; hipMemcpyToSymbol( xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int) ); xdim3_update_halo_kernel1_t1_h = xdim3; hipMemcpyToSymbol( xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int) ); xdim4_update_halo_kernel1_t1_h = xdim4; hipMemcpyToSymbol( xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int) ); xdim5_update_halo_kernel1_t1_h = xdim5; } int *arg6h = (int *)arg6.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg6.data = OPS_consts_h + consts_bytes; arg6.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; char *p_a[7]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); p_a[5] = (char *)args[5].data_d + base5; ops_H_D_exchanges_device(args, 7); ops_halo_exchanges(args,7,range); if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[52].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel1_t1), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (int *)arg6.data_d,x_size, y_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[52].time += t1-t2; } ops_set_dirtybit_device(args, 7); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[52].mpi_time += t2-t1; OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg5); } }
311ae94fdccdbdc121dfc131daf3ea155b5d59f6.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel1_t1; int xdim0_update_halo_kernel1_t1_h = -1; int ydim0_update_halo_kernel1_t1_h = -1; __constant__ int xdim1_update_halo_kernel1_t1; int xdim1_update_halo_kernel1_t1_h = -1; int ydim1_update_halo_kernel1_t1_h = -1; __constant__ int xdim2_update_halo_kernel1_t1; int xdim2_update_halo_kernel1_t1_h = -1; int ydim2_update_halo_kernel1_t1_h = -1; __constant__ int xdim3_update_halo_kernel1_t1; int xdim3_update_halo_kernel1_t1_h = -1; int ydim3_update_halo_kernel1_t1_h = -1; __constant__ int xdim4_update_halo_kernel1_t1; int xdim4_update_halo_kernel1_t1_h = -1; int ydim4_update_halo_kernel1_t1_h = -1; __constant__ int xdim5_update_halo_kernel1_t1; int xdim5_update_halo_kernel1_t1_h = -1; int ydim5_update_halo_kernel1_t1_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 #define OPS_ACC0(x,y) (x+xdim0_update_halo_kernel1_t1*(y)) #define OPS_ACC1(x,y) (x+xdim1_update_halo_kernel1_t1*(y)) #define OPS_ACC2(x,y) (x+xdim2_update_halo_kernel1_t1*(y)) #define OPS_ACC3(x,y) (x+xdim3_update_halo_kernel1_t1*(y)) #define OPS_ACC4(x,y) (x+xdim4_update_halo_kernel1_t1*(y)) #define OPS_ACC5(x,y) (x+xdim5_update_halo_kernel1_t1*(y)) //user function __device__ inline void update_halo_kernel1_t1_gpu(double *density0, double *energy0, double *energy1, double *u, double *p, double *sd , const int* fields) { if(fields[FIELD_DENSITY] == 1) density0[OPS_ACC0(0,0)] = density0[OPS_ACC0(0,-1)]; if(fields[FIELD_ENERGY0] == 1) energy0[OPS_ACC1(0,0)] = energy0[OPS_ACC1(0,-1)]; if(fields[FIELD_ENERGY1] == 1) energy1[OPS_ACC2(0,0)] = energy1[OPS_ACC2(0,-1)]; if(fields[FIELD_U] == 1) u[OPS_ACC3(0,0)] = u[OPS_ACC3(0,-1)]; if(fields[FIELD_P] == 1) p[OPS_ACC4(0,0)] = p[OPS_ACC4(0,-1)]; if(fields[FIELD_SD] == 1) sd[OPS_ACC5(0,0)] = sd[OPS_ACC5(0,-1)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #undef OPS_ACC5 __global__ void ops_update_halo_kernel1_t1( double* __restrict arg0, double* __restrict arg1, double* __restrict arg2, double* __restrict arg3, double* __restrict arg4, double* __restrict arg5, const int* __restrict arg6, int size0, int size1 ){ int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_update_halo_kernel1_t1; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_update_halo_kernel1_t1; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_update_halo_kernel1_t1; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_update_halo_kernel1_t1; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_update_halo_kernel1_t1; arg5 += idx_x * 1*1 + idx_y * 1*1 * xdim5_update_halo_kernel1_t1; if (idx_x < size0 && idx_y < size1) { update_halo_kernel1_t1_gpu(arg0, arg1, arg2, arg3, arg4, arg5, arg6); } } // host stub function void ops_par_loop_update_halo_kernel1_t1(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) { //Timing double t1,t2,c1,c2; ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args,7,range,52)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(52,"update_halo_kernel1_t1"); OPS_kernels[52].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[2]; int end[2]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<2; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<2; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int xdim0 = args[0].dat->size[0]; int xdim1 = args[1].dat->size[0]; int xdim2 = args[2].dat->size[0]; int xdim3 = args[3].dat->size[0]; int xdim4 = args[4].dat->size[0]; int xdim5 = args[5].dat->size[0]; if (xdim0 != xdim0_update_halo_kernel1_t1_h || xdim1 != xdim1_update_halo_kernel1_t1_h || xdim2 != xdim2_update_halo_kernel1_t1_h || xdim3 != xdim3_update_halo_kernel1_t1_h || xdim4 != xdim4_update_halo_kernel1_t1_h || xdim5 != xdim5_update_halo_kernel1_t1_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel1_t1, &xdim0, sizeof(int) ); xdim0_update_halo_kernel1_t1_h = xdim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel1_t1, &xdim1, sizeof(int) ); xdim1_update_halo_kernel1_t1_h = xdim1; cudaMemcpyToSymbol( xdim2_update_halo_kernel1_t1, &xdim2, sizeof(int) ); xdim2_update_halo_kernel1_t1_h = xdim2; cudaMemcpyToSymbol( xdim3_update_halo_kernel1_t1, &xdim3, sizeof(int) ); xdim3_update_halo_kernel1_t1_h = xdim3; cudaMemcpyToSymbol( xdim4_update_halo_kernel1_t1, &xdim4, sizeof(int) ); xdim4_update_halo_kernel1_t1_h = xdim4; cudaMemcpyToSymbol( xdim5_update_halo_kernel1_t1, &xdim5, sizeof(int) ); xdim5_update_halo_kernel1_t1_h = xdim5; } int *arg6h = (int *)arg6.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, 1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg6.data = OPS_consts_h + consts_bytes; arg6.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg6.data)[d] = arg6h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; int dat2 = args[2].dat->elem_size; int dat3 = args[3].dat->elem_size; int dat4 = args[4].dat->elem_size; int dat5 = args[5].dat->elem_size; char *p_a[7]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); p_a[1] = (char *)args[1].data_d + base1; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[2].dat->d_m[d]; #endif int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] - args[2].dat->base[0] - d_m[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] - args[2].dat->base[1] - d_m[1]); p_a[2] = (char *)args[2].data_d + base2; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d] + OPS_sub_dat_list[args[3].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[3].dat->d_m[d]; #endif int base3 = dat3 * 1 * (start[0] * args[3].stencil->stride[0] - args[3].dat->base[0] - d_m[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1] - args[3].dat->base[1] - d_m[1]); p_a[3] = (char *)args[3].data_d + base3; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d] + OPS_sub_dat_list[args[4].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[4].dat->d_m[d]; #endif int base4 = dat4 * 1 * (start[0] * args[4].stencil->stride[0] - args[4].dat->base[0] - d_m[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1] - args[4].dat->base[1] - d_m[1]); p_a[4] = (char *)args[4].data_d + base4; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d] + OPS_sub_dat_list[args[5].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[5].dat->d_m[d]; #endif int base5 = dat5 * 1 * (start[0] * args[5].stencil->stride[0] - args[5].dat->base[0] - d_m[0]); base5 = base5+ dat5 * args[5].dat->size[0] * (start[1] * args[5].stencil->stride[1] - args[5].dat->base[1] - d_m[1]); p_a[5] = (char *)args[5].data_d + base5; ops_H_D_exchanges_device(args, 7); ops_halo_exchanges(args,7,range); if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[52].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data ops_update_halo_kernel1_t1<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], (double *)p_a[5], (int *)arg6.data_d,x_size, y_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[52].time += t1-t2; } ops_set_dirtybit_device(args, 7); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); ops_set_halo_dirtybit3(&args[2],range); ops_set_halo_dirtybit3(&args[3],range); ops_set_halo_dirtybit3(&args[4],range); ops_set_halo_dirtybit3(&args[5],range); if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[52].mpi_time += t2-t1; OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg4); OPS_kernels[52].transfer += ops_compute_transfer(dim, start, end, &arg5); } }
38b4b8bc02d5fc2840d30bb9e9496e1e0b86c829.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2018-2019 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector, ctx_->Threads())); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("linear_train_param"), &tparam_); FromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = ToJson(tparam_); out["coordinate_param"] = ToJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (ctx_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); auto page = batch.GetView(); if (IsEmpty()) { return; } dh::safe_cuda(hipSetDevice(ctx_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = page[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back( std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } data_.resize(row_ptr_.back()); gpair_.resize(num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = page[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(hipMemcpy( data_.data().get() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), hipMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (ctx_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->Bias()[group_idx] += dbias; // Update residual if (ctx_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (ctx_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(hipSetDevice(ctx_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = dh::ToSpan(gpair_); dh::LaunchN(num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(hipSetDevice(ctx_->gpu_id)); common::Span<xgboost::Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); common::Span<Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(hipMemcpyAsync( gpair_.data().get(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), hipMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; std::vector<size_t> row_ptr_; dh::device_vector<xgboost::Entry> data_; dh::caching_device_vector<GradientPair> gpair_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
38b4b8bc02d5fc2840d30bb9e9496e1e0b86c829.cu
/*! * Copyright 2018-2019 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "xgboost/span.h" #include "coordinate_common.h" #include "../common/common.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { // NOLINT public: // set training parameter void Configure(Args const& args) override { tparam_.UpdateAllowUnknown(args); coord_param_.UpdateAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector, ctx_->Threads())); monitor_.Init("GPUCoordinateUpdater"); } void LoadConfig(Json const& in) override { auto const& config = get<Object const>(in); FromJson(config.at("linear_train_param"), &tparam_); FromJson(config.at("coordinate_param"), &coord_param_); } void SaveConfig(Json* p_out) const override { auto& out = *p_out; out["linear_train_param"] = ToJson(tparam_); out["coordinate_param"] = ToJson(coord_param_); } void LazyInitDevice(DMatrix *p_fmat, const LearnerModelParam &model_param) { if (ctx_->gpu_id < 0) return; num_row_ = static_cast<size_t>(p_fmat->Info().num_row_); CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetBatches<CSCPage>().begin()); auto page = batch.GetView(); if (IsEmpty()) { return; } dh::safe_cuda(cudaSetDevice(ctx_->gpu_id)); // The begin and end indices for the section of each column associated with // this device std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (size_t fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = page[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(0, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(num_row_, 0.0f), cmp); column_segments.emplace_back( std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } data_.resize(row_ptr_.back()); gpair_.resize(num_row_ * model_param.num_output_group); for (size_t fidx = 0; fidx < batch.Size(); fidx++) { auto col = page[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(cudaMemcpy( data_.data().get() + row_ptr_[fidx], col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), cudaMemcpyHostToDevice)); } } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitDevice"); this->LazyInitDevice(p_fmat, *(model->learner_model_param)); monitor_.Stop("LazyInitDevice"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair if (ctx_->gpu_id >= 0) { this->UpdateGpair(in_gpair_host); } monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { for (auto i = 0U; i < model->learner_model_param->num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->learner_model_param->num_output_group; ++group_idx) { // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetBiasGradient(group_idx, model->learner_model_param->num_output_group); } auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->Bias()[group_idx] += dbias; // Update residual if (ctx_->gpu_id >= 0) { UpdateBiasResidual(dbias, group_idx, model->learner_model_param->num_output_group); } } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = GradientPair(0, 0); if (ctx_->gpu_id >= 0) { grad = GetGradient(group_idx, model->learner_model_param->num_output_group, fidx); } auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; if (ctx_->gpu_id >= 0) { UpdateResidual(dw, group_idx, model->learner_model_param->num_output_group, fidx); } } // This needs to be public because of the __device__ lambda. GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(cudaSetDevice(ctx_->gpu_id)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(perm, num_row_); } // This needs to be public because of the __device__ lambda. void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = dh::ToSpan(gpair_); dh::LaunchN(num_row_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } // This needs to be public because of the __device__ lambda. GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(cudaSetDevice(ctx_->gpu_id)); common::Span<xgboost::Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(multiply_iterator, col_size); } // This needs to be public because of the __device__ lambda. void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = dh::ToSpan(gpair_); common::Span<Entry> d_col = dh::ToSpan(data_).subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } private: bool IsEmpty() { return num_row_ == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair) { dh::safe_cuda(cudaMemcpyAsync( gpair_.data().get(), host_gpair.data(), gpair_.size() * sizeof(GradientPair), cudaMemcpyHostToDevice)); } // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; std::vector<size_t> row_ptr_; dh::device_vector<xgboost::Entry> data_; dh::caching_device_vector<GradientPair> gpair_; size_t num_row_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
ae23cea4ba25dfdb6cde91040931d02406a899f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> #define BLOCK_SIZE 16 #define CONVERGENCE_CHECK 1 __global__ void convoluteBlock(unsigned char *src, unsigned char *dst, int x, int y, int multiplier) { int x_dim = blockIdx.x * blockDim.x + threadIdx.x; int y_dim = blockIdx.y * blockDim.y + threadIdx.y; float h[3][3] = {{1/16.0, 2/16.0, 1/16.0}, {2/16.0, 4/16.0, 2/16.0}, {1/16.0, 2/16.0, 1/16.0}}; float red = 0.0, green = 0.0, blue = 0.0; if (x_dim >= 0 && x_dim < y && y_dim >= 0 && y_dim < x) { if (multiplier == 1) { dst[x_dim* x + y_dim] = h[0][0] * src[(x_dim- 1) * x + y_dim-1] + h[0][1] * src[(x_dim- 1) * x + y_dim] + h[0][2] * src[(x_dim- 1) * x + y_dim+1] + h[1][0] * src[x_dim* x + y_dim-1] + h[1][1] * src[x_dim* x + y_dim] + h[1][2] * src[x_dim* x + y_dim+1] + h[2][0] * src[(x_dim+ 1) * x + y_dim-1] + h[2][1] * src[(x_dim+ 1) * x + y_dim] + h[2][2] * src[(x_dim+ 1) * x + y_dim+1]; } else { red = h[0][0] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier - multiplier] + h[0][1] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier] + h[0][2] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + multiplier] + h[1][0] * src[x_dim * x * multiplier + y_dim * multiplier - multiplier] + h[1][1] * src[x_dim * x * multiplier + y_dim * multiplier] + h[1][2] * src[x_dim * x * multiplier + y_dim * multiplier + multiplier] + h[2][0] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier - multiplier] + h[2][1] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier] + h[2][2] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + multiplier]; green = h[0][0] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier - multiplier + 1] + h[0][1] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + 1] + h[0][2] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + multiplier + 1] + h[1][0] * src[x_dim * x * multiplier + y_dim * multiplier - multiplier + 1] + h[1][1] * src[x_dim * x * multiplier + y_dim * multiplier + 1] + h[1][2] * src[x_dim * x * multiplier + y_dim * multiplier + multiplier + 1] + h[2][0] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier - multiplier + 1] + h[2][1] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + 1] + h[2][2] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + multiplier + 1]; blue = h[0][0] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier - multiplier + 2] + h[0][1] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + 2] + h[0][2] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + multiplier + 2] + h[1][0] * src[x_dim * x * multiplier + y_dim * multiplier - multiplier + 2] + h[1][1] * src[x_dim * x * multiplier + y_dim * multiplier + 2] + h[1][2] * src[x_dim * x * multiplier + y_dim * multiplier + multiplier + 2] + h[2][0] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier - multiplier + 2] + h[2][1] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + 2] + h[2][2] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + multiplier + 2]; dst[x_dim * x * multiplier + y_dim * multiplier] = red; dst[x_dim * x * multiplier + y_dim * multiplier + 1] = green; dst[x_dim * x * multiplier + y_dim * multiplier + 2] = blue; } } } __global__ void convergence_grey(unsigned char *src, unsigned char *dst, int x, int y, char *convbool, int multiplier) { int x_dim = blockIdx.x * blockDim.x + threadIdx.x; int y_dim = blockIdx.y * blockDim.y + threadIdx.y; int blockId = blockIdx.x + blockIdx.y * gridDim.x; /*Use of shared memory for the convergence check of the current thread's block*/ __shared__ char blockconvalues[BLOCK_SIZE][BLOCK_SIZE]; if (0 <= x_dim && x_dim < y && 0 <= y_dim && y_dim < x) { if (dst[x_dim * x + y_dim] != src[x_dim * x + y_dim]) blockconvalues[threadIdx.x][threadIdx.y] = 0; else blockconvalues[threadIdx.x][threadIdx.y] = 1; __syncthreads(); /*First thread of the block, checks if every thread of the block converges*/ if (threadIdx.x == 0 && threadIdx.y == 0) { int blockconv = 1; for (int i = 0; i < BLOCK_SIZE; i++) { for (int j = 0; j < BLOCK_SIZE; j++) { if (blockconvalues[i][j] != 1) { blockconv = 0; break; } } if (blockconv == 0) break; } if (blockconv == 1) convbool[blockId] = 1; else convbool[blockId] = 0; } } } __global__ void convergence_rgb(unsigned char *src, unsigned char *dst, int x, int y, char *convbool, int multiplier) { int x_dim = blockIdx.x * blockDim.x + threadIdx.x; int y_dim = blockIdx.y * blockDim.y + threadIdx.y; int blockId = blockIdx.x + blockIdx.y * gridDim.x; /*Use of shared memory for the convergence check of the current thread's block*/ __shared__ char blockconvalues[BLOCK_SIZE][BLOCK_SIZE * 3]; if (0 <= x_dim && x_dim < x && 0 <= y_dim && y_dim < y) { if (dst[x_dim * x * multiplier + y_dim * multiplier] != src[x_dim * x * multiplier + y_dim * multiplier]) blockconvalues[threadIdx.x][threadIdx.y] = 0; else blockconvalues[threadIdx.x][threadIdx.y] = 1; __syncthreads(); /*First thread of the block, checks if every thread of the block converges*/ if (threadIdx.x == 0 && threadIdx.y == 0) { int blockconv = 1; for (int i = 0; i < BLOCK_SIZE; i++) { for (int j = 0; j < BLOCK_SIZE * 3; j += 3) { if (blockconvalues[i][j] != 1 || blockconvalues[i][j+1] != 1 || blockconvalues[i][j+2] != 1) { blockconv = 0; break; } } if (blockconv == 0) break; } if (blockconv == 1) convbool[blockId] = 1; else convbool[blockId] = 0; } } } extern "C" void convolute(unsigned char *vector, int x, int y, int multiplier, int loops) { unsigned char *vector_a, *vector_b, *temp; char *convbool, *convboolhost; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); // Initialize arrays //printf("%d %d %d\n", x, y, x * y); convboolhost = (char *)calloc((x * y * multiplier) / BLOCK_SIZE, sizeof(char)); assert(convboolhost != NULL); hipMalloc(&vector_a, x * y * multiplier * sizeof(unsigned char)); hipMalloc(&vector_b, x * y * multiplier * sizeof(unsigned char)); assert(vector_a != NULL); assert(vector_b != NULL); hipMalloc(&convbool, sizeof(char) * ((x * y * multiplier) / BLOCK_SIZE)); assert(convbool != NULL); hipMemcpy(vector_a, vector, x * y * multiplier * sizeof(unsigned char), hipMemcpyHostToDevice); hipMemset(vector_b, 0, x * y * multiplier * sizeof(unsigned char)); int blocksperlinex = (int)ceil((double)((x * multiplier)/ BLOCK_SIZE)); int blocksperliney = (int)ceil((double)(y / BLOCK_SIZE)); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blocksperliney, blocksperlinex); int i = 0; int totalconv = 0, first_conv = -1;; hipEventRecord(start); for (i = 0; i < loops; i++) { if (i > 0) { temp = vector_a; vector_a = vector_b; vector_b = temp; } hipLaunchKernelGGL(( convoluteBlock), dim3(dimGrid), dim3(dimBlock), 0, 0, vector_a, vector_b, x, y, multiplier); if (i % CONVERGENCE_CHECK == 0 & i > 0) { for (int j = 0; j < (x * y * multiplier) / BLOCK_SIZE; j++) convboolhost[i] = 0; hipMemcpy(convbool, convboolhost, sizeof(char) * ((x * y * multiplier) / BLOCK_SIZE), hipMemcpyHostToDevice); if (multiplier == 1) hipLaunchKernelGGL(( convergence_grey), dim3(dimGrid), dim3(dimBlock), 0, 0, vector_a, vector_b, x, y, convbool, multiplier); else hipLaunchKernelGGL(( convergence_rgb), dim3(dimGrid), dim3(dimBlock), 0, 0, vector_a, vector_b, x, y, convbool, multiplier); hipMemcpy(convboolhost, convbool, sizeof(char) * ((x * y * multiplier) / BLOCK_SIZE), hipMemcpyDeviceToHost); for (int j = 0; j < (x * y * multiplier) / BLOCK_SIZE; j++) { if (convboolhost[i] != 0) totalconv = 1; else { totalconv = 0; break; } } } if (totalconv == 1 && first_conv == -1) { first_conv = i; } hipDeviceSynchronize(); } hipEventRecord(stop); hipMemcpy(vector, vector_a, x * y * multiplier * sizeof(unsigned char), hipMemcpyDeviceToHost); if (first_conv >= 0) printf("Convergence at %d\n", first_conv); else printf("No convergence\n"); hipFree(vector_a); hipFree(vector_b); float msecs = 0.0f; hipEventElapsedTime(&msecs, start, stop); printf("Elapsed time = %3.2lf secs\n", msecs / 1000.0); hipEventDestroy(start); hipEventDestroy(stop); }
ae23cea4ba25dfdb6cde91040931d02406a899f9.cu
#include <stdio.h> #include <stdlib.h> #include <assert.h> #include <time.h> #define BLOCK_SIZE 16 #define CONVERGENCE_CHECK 1 __global__ void convoluteBlock(unsigned char *src, unsigned char *dst, int x, int y, int multiplier) { int x_dim = blockIdx.x * blockDim.x + threadIdx.x; int y_dim = blockIdx.y * blockDim.y + threadIdx.y; float h[3][3] = {{1/16.0, 2/16.0, 1/16.0}, {2/16.0, 4/16.0, 2/16.0}, {1/16.0, 2/16.0, 1/16.0}}; float red = 0.0, green = 0.0, blue = 0.0; if (x_dim >= 0 && x_dim < y && y_dim >= 0 && y_dim < x) { if (multiplier == 1) { dst[x_dim* x + y_dim] = h[0][0] * src[(x_dim- 1) * x + y_dim-1] + h[0][1] * src[(x_dim- 1) * x + y_dim] + h[0][2] * src[(x_dim- 1) * x + y_dim+1] + h[1][0] * src[x_dim* x + y_dim-1] + h[1][1] * src[x_dim* x + y_dim] + h[1][2] * src[x_dim* x + y_dim+1] + h[2][0] * src[(x_dim+ 1) * x + y_dim-1] + h[2][1] * src[(x_dim+ 1) * x + y_dim] + h[2][2] * src[(x_dim+ 1) * x + y_dim+1]; } else { red = h[0][0] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier - multiplier] + h[0][1] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier] + h[0][2] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + multiplier] + h[1][0] * src[x_dim * x * multiplier + y_dim * multiplier - multiplier] + h[1][1] * src[x_dim * x * multiplier + y_dim * multiplier] + h[1][2] * src[x_dim * x * multiplier + y_dim * multiplier + multiplier] + h[2][0] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier - multiplier] + h[2][1] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier] + h[2][2] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + multiplier]; green = h[0][0] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier - multiplier + 1] + h[0][1] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + 1] + h[0][2] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + multiplier + 1] + h[1][0] * src[x_dim * x * multiplier + y_dim * multiplier - multiplier + 1] + h[1][1] * src[x_dim * x * multiplier + y_dim * multiplier + 1] + h[1][2] * src[x_dim * x * multiplier + y_dim * multiplier + multiplier + 1] + h[2][0] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier - multiplier + 1] + h[2][1] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + 1] + h[2][2] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + multiplier + 1]; blue = h[0][0] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier - multiplier + 2] + h[0][1] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + 2] + h[0][2] * src[(x_dim - 1) * x * multiplier + y_dim * multiplier + multiplier + 2] + h[1][0] * src[x_dim * x * multiplier + y_dim * multiplier - multiplier + 2] + h[1][1] * src[x_dim * x * multiplier + y_dim * multiplier + 2] + h[1][2] * src[x_dim * x * multiplier + y_dim * multiplier + multiplier + 2] + h[2][0] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier - multiplier + 2] + h[2][1] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + 2] + h[2][2] * src[(x_dim + 1) * x * multiplier + y_dim * multiplier + multiplier + 2]; dst[x_dim * x * multiplier + y_dim * multiplier] = red; dst[x_dim * x * multiplier + y_dim * multiplier + 1] = green; dst[x_dim * x * multiplier + y_dim * multiplier + 2] = blue; } } } __global__ void convergence_grey(unsigned char *src, unsigned char *dst, int x, int y, char *convbool, int multiplier) { int x_dim = blockIdx.x * blockDim.x + threadIdx.x; int y_dim = blockIdx.y * blockDim.y + threadIdx.y; int blockId = blockIdx.x + blockIdx.y * gridDim.x; /*Use of shared memory for the convergence check of the current thread's block*/ __shared__ char blockconvalues[BLOCK_SIZE][BLOCK_SIZE]; if (0 <= x_dim && x_dim < y && 0 <= y_dim && y_dim < x) { if (dst[x_dim * x + y_dim] != src[x_dim * x + y_dim]) blockconvalues[threadIdx.x][threadIdx.y] = 0; else blockconvalues[threadIdx.x][threadIdx.y] = 1; __syncthreads(); /*First thread of the block, checks if every thread of the block converges*/ if (threadIdx.x == 0 && threadIdx.y == 0) { int blockconv = 1; for (int i = 0; i < BLOCK_SIZE; i++) { for (int j = 0; j < BLOCK_SIZE; j++) { if (blockconvalues[i][j] != 1) { blockconv = 0; break; } } if (blockconv == 0) break; } if (blockconv == 1) convbool[blockId] = 1; else convbool[blockId] = 0; } } } __global__ void convergence_rgb(unsigned char *src, unsigned char *dst, int x, int y, char *convbool, int multiplier) { int x_dim = blockIdx.x * blockDim.x + threadIdx.x; int y_dim = blockIdx.y * blockDim.y + threadIdx.y; int blockId = blockIdx.x + blockIdx.y * gridDim.x; /*Use of shared memory for the convergence check of the current thread's block*/ __shared__ char blockconvalues[BLOCK_SIZE][BLOCK_SIZE * 3]; if (0 <= x_dim && x_dim < x && 0 <= y_dim && y_dim < y) { if (dst[x_dim * x * multiplier + y_dim * multiplier] != src[x_dim * x * multiplier + y_dim * multiplier]) blockconvalues[threadIdx.x][threadIdx.y] = 0; else blockconvalues[threadIdx.x][threadIdx.y] = 1; __syncthreads(); /*First thread of the block, checks if every thread of the block converges*/ if (threadIdx.x == 0 && threadIdx.y == 0) { int blockconv = 1; for (int i = 0; i < BLOCK_SIZE; i++) { for (int j = 0; j < BLOCK_SIZE * 3; j += 3) { if (blockconvalues[i][j] != 1 || blockconvalues[i][j+1] != 1 || blockconvalues[i][j+2] != 1) { blockconv = 0; break; } } if (blockconv == 0) break; } if (blockconv == 1) convbool[blockId] = 1; else convbool[blockId] = 0; } } } extern "C" void convolute(unsigned char *vector, int x, int y, int multiplier, int loops) { unsigned char *vector_a, *vector_b, *temp; char *convbool, *convboolhost; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); // Initialize arrays //printf("%d %d %d\n", x, y, x * y); convboolhost = (char *)calloc((x * y * multiplier) / BLOCK_SIZE, sizeof(char)); assert(convboolhost != NULL); cudaMalloc(&vector_a, x * y * multiplier * sizeof(unsigned char)); cudaMalloc(&vector_b, x * y * multiplier * sizeof(unsigned char)); assert(vector_a != NULL); assert(vector_b != NULL); cudaMalloc(&convbool, sizeof(char) * ((x * y * multiplier) / BLOCK_SIZE)); assert(convbool != NULL); cudaMemcpy(vector_a, vector, x * y * multiplier * sizeof(unsigned char), cudaMemcpyHostToDevice); cudaMemset(vector_b, 0, x * y * multiplier * sizeof(unsigned char)); int blocksperlinex = (int)ceil((double)((x * multiplier)/ BLOCK_SIZE)); int blocksperliney = (int)ceil((double)(y / BLOCK_SIZE)); dim3 dimBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 dimGrid(blocksperliney, blocksperlinex); int i = 0; int totalconv = 0, first_conv = -1;; cudaEventRecord(start); for (i = 0; i < loops; i++) { if (i > 0) { temp = vector_a; vector_a = vector_b; vector_b = temp; } convoluteBlock<<<dimGrid, dimBlock>>>(vector_a, vector_b, x, y, multiplier); if (i % CONVERGENCE_CHECK == 0 & i > 0) { for (int j = 0; j < (x * y * multiplier) / BLOCK_SIZE; j++) convboolhost[i] = 0; cudaMemcpy(convbool, convboolhost, sizeof(char) * ((x * y * multiplier) / BLOCK_SIZE), cudaMemcpyHostToDevice); if (multiplier == 1) convergence_grey<<<dimGrid, dimBlock>>>(vector_a, vector_b, x, y, convbool, multiplier); else convergence_rgb<<<dimGrid, dimBlock>>>(vector_a, vector_b, x, y, convbool, multiplier); cudaMemcpy(convboolhost, convbool, sizeof(char) * ((x * y * multiplier) / BLOCK_SIZE), cudaMemcpyDeviceToHost); for (int j = 0; j < (x * y * multiplier) / BLOCK_SIZE; j++) { if (convboolhost[i] != 0) totalconv = 1; else { totalconv = 0; break; } } } if (totalconv == 1 && first_conv == -1) { first_conv = i; } cudaThreadSynchronize(); } cudaEventRecord(stop); cudaMemcpy(vector, vector_a, x * y * multiplier * sizeof(unsigned char), cudaMemcpyDeviceToHost); if (first_conv >= 0) printf("Convergence at %d\n", first_conv); else printf("No convergence\n"); cudaFree(vector_a); cudaFree(vector_b); float msecs = 0.0f; cudaEventElapsedTime(&msecs, start, stop); printf("Elapsed time = %3.2lf secs\n", msecs / 1000.0); cudaEventDestroy(start); cudaEventDestroy(stop); }