hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
a2f49c32b851c449daf356088215daf29d8c55e4.hip
// !!! This is a file automatically generated by hipify!!! /** * GPU FFT Example * * This example showcases the FFT capabilities of a P2P NVIDIA GPU with a * FlexRIO device. In this case, the FlexRIO module is creating a simulated CW * tone and can perform Additive White Gaussian Noise (AWGN) and/or a Finite * Impulse Response (FIR) Low-Pass Filter (LPF) on the generated signal. The * signal is then sent to the GPU where a parallelized FFT takes place using * NVIDIA's CUFFT library and some logarithmic conversion to calculate the * power spectrum of the signal. This resulting signal is then written to a file * and/or a GNUplot host application for data logging and plotting. * * For more information on NI FPGA functions, see the NI FPGA Interface C API * Help. For more information on NVIDIA CUDA functions and operation, see the * help files included with the NVIDIA CUDA Driver. * * Date: 1/10/2016 * Author: John Gentile */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <sys/times.h> #include <ctype.h> #include <unistd.h> #include <hip/hip_runtime.h> #include <hipfft.h> #include <hipfftXt.h> #include "NiFpga_FPGA_Main.h" // NI FPGA C API Generated .h file for bitfile #define HELP_STRING \ "Usage: GPU_FFT [OPTIONS]\n" \ "GPU FFT Example with NI FlexRIO device\n\n" \ "\t-H,\tTransfer data from FPGA to host memory before transferring to GPU\n" \ "\t-l,\tPass simulated signal through digital Low-Pass FIR Filter on FPGA\n" \ "\t-a,\tAdd White Gaussian Noise to simulated signal on FPGA\n" \ "\t-t,\tWrite generated time-domain signal from FlexRIO to file (must be used with -H option)\n" \ "\t-b [./bitfile],\tPath to *.lvbitx bitfile\n" \ "\t-s [signature],\tSignature of the bitfile\n" \ "\t-r [RIO0],\tRIO resource string to open (e.g. RIO0 or rio://mysystem/RIO)\n" #define SAMPLES 1048576*4 #define COMPLEX_SIZE (SAMPLES*2 + 1) #define MAX_STR_LEN 256 // use inline method for error checking to allow easy app exit #define checkStatus(val) checkStatus__ ( (val), #val, __FILE__, __LINE__ ) template <typename T> // Templated to allow for different CUDA/NiFpga error types inline void checkStatus__(T code, const char *func, const char *file, int line) { if (code) { fprintf(stderr, "Error at %s:%d code=%d \"%s\" \n", file, line, (unsigned int)code, func); hipDeviceReset(); NiFpga_Finalize(); exit(EXIT_FAILURE); } } __device__ __host__ inline hipComplex ComplexMul(hipComplex a, hipComplex b) { hipComplex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // Used to convert int32_t data input from FlexRIO to hipfftReal // Other scaling can occur here as well __global__ void ConvertInputToComplex( const int32_t * __restrict__ dataIn, hipfftReal * __restrict__ dataOut) { const int numThreads = blockDim.x * gridDim.x; const int threadId = blockIdx.x * blockDim.x + threadIdx.x; for (size_t offset = threadId; offset < SAMPLES; offset += numThreads) dataOut[offset] = (hipfftReal)((float)dataIn[offset]/127.0f); } /* * Main Program Execution */ int main(int argc, char **argv) { /* * Initialization of program values, NI FPGA and CUDA */ int viahostflag = 0, // -H, do transfers and operations via host lpfflag = 0, // -l, pass signal through Low-Pass FIR on FlexRIO awgnflag = 0, // -a, add white gaussian noise to signal on FlexRIO timeflag = 0, // -t, write time-domain signal (only if -H is set) opt; char bitPath [MAX_STR_LEN], // -b [./bitfile], path to *.lvbitx bitfile bitSig [MAX_STR_LEN], // -s [signature], signature of the bitfile rioDev [MAX_STR_LEN]; // -r [RIO0], RIO resource string to open (e.g. RIO0 or rio://mysystem/RIO) // Process command line arguments and set above flags while ((opt = getopt(argc, argv, "Hlathb:s:r:")) != -1) { switch (opt) { case 'H': viahostflag = 1; break; case 'l': lpfflag = 1; break; case 'a': awgnflag = 1; break; case 't': timeflag = 1; break; case 'h': fprintf(stderr, HELP_STRING); exit(EXIT_SUCCESS); case 'b': strcpy(bitPath, optarg); break; case 's': strcpy(bitSig, optarg); break; case 'r': strcpy(rioDev, optarg); break; default: abort(); } } // initialize NI FPGA interfaces; use status variable for error handling fprintf(stderr, "Initializing NI FPGA: "); checkStatus(NiFpga_Initialize()); NiFpga_Session session; // Download bitfile to target; get path to bitfile // TODO: change to full path to bitfile as necessary fprintf(stderr, "Downloading bitfile "); checkStatus(NiFpga_Open(bitPath, bitSig, rioDev, 0, &session)); fprintf(stderr, "DONE\n"); struct hipDeviceProp_t d_props; int device = 0; // device specifies which GPU should be used. Change this to the index of the desired GPU, if necessary checkStatus(hipGetDevice(&device)); checkStatus(hipGetDeviceProperties(&d_props, device)); if (d_props.major < 2) { fprintf(stderr, "CUDA Error: This example requires a CUDA device with architecture SM2.0 or higher\n"); exit(EXIT_FAILURE); } /* * Allocate CUDA Memory for FFT and log scaling operations * As well, allocate complex CUDA Memory for R2C result */ fprintf(stderr, "Allocating CUDA and Host Memory: \n"); int32_t *init_signal; // initial storage for non-scaled data input hipfftReal *tmp_result1; // temp storage for scaling data input hipComplex *gpu_result; // Where CUFFT will be stored hipComplex *hcomp_data; // Where host memory will recieve complex data result checkStatus(hipMalloc((void **)&init_signal, sizeof(*init_signal)*(16*SAMPLES))); checkStatus(hipMalloc((void **)&tmp_result1, sizeof(hipfftReal)*SAMPLES)); checkStatus(hipMalloc((void **)&gpu_result, sizeof(hipComplex)*COMPLEX_SIZE)); hcomp_data = (hipComplex*) malloc(sizeof(hipComplex)*COMPLEX_SIZE); if (hcomp_data == NULL) { fprintf(stderr, "Host Error: Host failed to allocate memory\n"); return -1; } /* * Make CUFFT plan for 1D Real-to-Complex FFT * also link data path to CUDA device */ hipfftHandle plan; checkStatus(hipfftCreate(&plan)); checkStatus(hipfftPlan1d(&plan, SAMPLES, HIPFFT_R2C, 1)); // Configure P2P FIFO between FlexRIO and GPU using NVIDIA GPU Direct if (viahostflag == 1) { /* Host transfer */ checkStatus(NiFpga_ConfigureFifo(session, NiFpga_FPGA_Main_TargetToHostFifoI32_T2HDMAFIFO, (size_t)SAMPLES)); } else { /* P2P via RDMA */ checkStatus(NiFpga_ConfigureFifoBuffer(session, NiFpga_FPGA_Main_TargetToHostFifoI32_T2HDMAFIFO, (uint64_t)init_signal, SAMPLES, NULL, NiFpga_DmaBufferType_NvidiaGpuDirectRdma)); } /* * Set NI FPGA Control/Indicator Values */ // Set RMS Noise value of AWGN Algorithm on FlexRIO (out of i16 full scale, here set as 2048) NiFpga_WriteU16(session, NiFpga_FPGA_Main_ControlU16_RMSNoise, 2048); // Reset FPGA algorithms and clear FIFOs NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_aReset, NiFpga_True); NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_aReset, NiFpga_False); if (lpfflag == 1) { fprintf(stderr, "Enabling Low-Pass FIR Filter on FlexRIO\n"); NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_LPFEnable, NiFpga_True); } else NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_LPFEnable, NiFpga_False); if (awgnflag == 1) { fprintf(stderr, "Adding White Gaussian Noise to Signal\n"); NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_AWGNEnable, NiFpga_True); } else NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_AWGNEnable, NiFpga_False); /* * DMA (or copy from host) signal to GPU and execute FFT plan */ if (viahostflag == 1) { int32_t * h_data = NULL; // ptr for when transferring data to host first fprintf(stderr, "Copy host memory signal to CUDA Device\n"); h_data = (int32_t *)malloc(SAMPLES * sizeof(int32_t)); checkStatus(NiFpga_ReadFifoI32(session, NiFpga_FPGA_Main_TargetToHostFifoI32_T2HDMAFIFO, h_data, (size_t)SAMPLES, 5000, NULL)); if (timeflag == 1) { fprintf(stderr, "Writing time-domain signal to TimeSignal.dat\n"); FILE *f = fopen("TimeSignal.dat", "w"); for(int i = 0; i < SAMPLES; i++) { if (i == 0) fprintf(f, "# X Y\n"); // Write real component to file fprintf(f, "%d %d\n", i, h_data[i]); } fclose(f); } if (hipMemcpy(init_signal, h_data, (SAMPLES*sizeof(int32_t)), hipMemcpyHostToDevice) != hipSuccess) { fprintf(stderr, "CUDA Error: Device failed to copy host memory to device\n"); return -1; } free(h_data); } else { fprintf(stderr, "DMA'ing FlexRIO data to GPU\n"); size_t elemsAcquired, elemsRemaining; checkStatus(NiFpga_AcquireFifoReadElementsI32(session, NiFpga_FPGA_Main_TargetToHostFifoI32_T2HDMAFIFO, &init_signal, SAMPLES, 5000, &elemsAcquired, &elemsRemaining)); fprintf(stderr, "%d samples acquired with %d elements remaining in FIFO\n", elemsAcquired, elemsRemaining); } /* * Start FFT on GPU */ fprintf(stderr, "Executing CUFFT Plan...\n"); // create timers hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); float elapsedTime=0; checkStatus(hipEventRecord(start, 0)); // Convert input signal from real to complex hipLaunchKernelGGL(( ConvertInputToComplex), dim3(32), dim3(128), 0, 0, init_signal, tmp_result1); checkStatus(hipGetLastError()); // Execute FFT on data checkStatus(hipfftExecR2C(plan, tmp_result1, gpu_result)); checkStatus(hipGetLastError()); // Stop and record time taken checkStatus(hipEventRecord(end, 0)); // Sync and wait for completion checkStatus(hipDeviceSynchronize()); checkStatus(hipEventSynchronize(end)); checkStatus(hipEventElapsedTime(&elapsedTime, start, end)); fprintf(stderr, "FFT took %fms to complete on GPU\n", elapsedTime); // Copy FFT result back to host) fprintf(stderr, "Copying CUFFT result back to host memory...\n"); checkStatus(hipMemcpy(hcomp_data, gpu_result, SAMPLES, hipMemcpyDeviceToHost)); /* * Write resulting data to file */ fprintf(stderr, "Writing signal to stdout:\n\n"); for(int32_t i = 0; i < COMPLEX_SIZE; i++) { if (i == 0) fprintf(stdout, "# X Y\n"); // Write real component to file // TODO: Implement power conversion as CUFFT Callback fprintf(stdout, "%d %f\n", i, 20.0f*log(hcomp_data[i].y)); } // Close out CUFFT plan(s) and free CUDA memory fprintf(stderr, "Closing CUFFT and freeing CUDA memory...\n"); checkStatus(hipfftDestroy(plan)); checkStatus(hipFree(init_signal)); checkStatus(hipFree(tmp_result1)); checkStatus(hipFree(gpu_result)); free(hcomp_data); // Clean up NVIDIA Driver state hipDeviceReset(); // Close NI FPGA References; must be last NiFpga calls fprintf(stderr, "Stopping NI FPGA...\n"); checkStatus(NiFpga_Close(session, 0)); checkStatus(NiFpga_Finalize()); return EXIT_SUCCESS; }
a2f49c32b851c449daf356088215daf29d8c55e4.cu
/** * GPU FFT Example * * This example showcases the FFT capabilities of a P2P NVIDIA GPU with a * FlexRIO device. In this case, the FlexRIO module is creating a simulated CW * tone and can perform Additive White Gaussian Noise (AWGN) and/or a Finite * Impulse Response (FIR) Low-Pass Filter (LPF) on the generated signal. The * signal is then sent to the GPU where a parallelized FFT takes place using * NVIDIA's CUFFT library and some logarithmic conversion to calculate the * power spectrum of the signal. This resulting signal is then written to a file * and/or a GNUplot host application for data logging and plotting. * * For more information on NI FPGA functions, see the NI FPGA Interface C API * Help. For more information on NVIDIA CUDA functions and operation, see the * help files included with the NVIDIA CUDA Driver. * * Date: 1/10/2016 * Author: John Gentile */ #include <stdlib.h> #include <stdio.h> #include <math.h> #include <sys/time.h> #include <sys/times.h> #include <ctype.h> #include <unistd.h> #include <cuda_runtime.h> #include <cufft.h> #include <cufftXt.h> #include "NiFpga_FPGA_Main.h" // NI FPGA C API Generated .h file for bitfile #define HELP_STRING \ "Usage: GPU_FFT [OPTIONS]\n" \ "GPU FFT Example with NI FlexRIO device\n\n" \ "\t-H,\tTransfer data from FPGA to host memory before transferring to GPU\n" \ "\t-l,\tPass simulated signal through digital Low-Pass FIR Filter on FPGA\n" \ "\t-a,\tAdd White Gaussian Noise to simulated signal on FPGA\n" \ "\t-t,\tWrite generated time-domain signal from FlexRIO to file (must be used with -H option)\n" \ "\t-b [./bitfile],\tPath to *.lvbitx bitfile\n" \ "\t-s [signature],\tSignature of the bitfile\n" \ "\t-r [RIO0],\tRIO resource string to open (e.g. RIO0 or rio://mysystem/RIO)\n" #define SAMPLES 1048576*4 #define COMPLEX_SIZE (SAMPLES*2 + 1) #define MAX_STR_LEN 256 // use inline method for error checking to allow easy app exit #define checkStatus(val) checkStatus__ ( (val), #val, __FILE__, __LINE__ ) template <typename T> // Templated to allow for different CUDA/NiFpga error types inline void checkStatus__(T code, const char *func, const char *file, int line) { if (code) { fprintf(stderr, "Error at %s:%d code=%d \"%s\" \n", file, line, (unsigned int)code, func); cudaDeviceReset(); NiFpga_Finalize(); exit(EXIT_FAILURE); } } __device__ __host__ inline cuComplex ComplexMul(cuComplex a, cuComplex b) { cuComplex c; c.x = a.x * b.x - a.y * b.y; c.y = a.x * b.y + a.y * b.x; return c; } // Used to convert int32_t data input from FlexRIO to cufftReal // Other scaling can occur here as well __global__ void ConvertInputToComplex( const int32_t * __restrict__ dataIn, cufftReal * __restrict__ dataOut) { const int numThreads = blockDim.x * gridDim.x; const int threadId = blockIdx.x * blockDim.x + threadIdx.x; for (size_t offset = threadId; offset < SAMPLES; offset += numThreads) dataOut[offset] = (cufftReal)((float)dataIn[offset]/127.0f); } /* * Main Program Execution */ int main(int argc, char **argv) { /* * Initialization of program values, NI FPGA and CUDA */ int viahostflag = 0, // -H, do transfers and operations via host lpfflag = 0, // -l, pass signal through Low-Pass FIR on FlexRIO awgnflag = 0, // -a, add white gaussian noise to signal on FlexRIO timeflag = 0, // -t, write time-domain signal (only if -H is set) opt; char bitPath [MAX_STR_LEN], // -b [./bitfile], path to *.lvbitx bitfile bitSig [MAX_STR_LEN], // -s [signature], signature of the bitfile rioDev [MAX_STR_LEN]; // -r [RIO0], RIO resource string to open (e.g. RIO0 or rio://mysystem/RIO) // Process command line arguments and set above flags while ((opt = getopt(argc, argv, "Hlathb:s:r:")) != -1) { switch (opt) { case 'H': viahostflag = 1; break; case 'l': lpfflag = 1; break; case 'a': awgnflag = 1; break; case 't': timeflag = 1; break; case 'h': fprintf(stderr, HELP_STRING); exit(EXIT_SUCCESS); case 'b': strcpy(bitPath, optarg); break; case 's': strcpy(bitSig, optarg); break; case 'r': strcpy(rioDev, optarg); break; default: abort(); } } // initialize NI FPGA interfaces; use status variable for error handling fprintf(stderr, "Initializing NI FPGA: "); checkStatus(NiFpga_Initialize()); NiFpga_Session session; // Download bitfile to target; get path to bitfile // TODO: change to full path to bitfile as necessary fprintf(stderr, "Downloading bitfile "); checkStatus(NiFpga_Open(bitPath, bitSig, rioDev, 0, &session)); fprintf(stderr, "DONE\n"); struct cudaDeviceProp d_props; int device = 0; // device specifies which GPU should be used. Change this to the index of the desired GPU, if necessary checkStatus(cudaGetDevice(&device)); checkStatus(cudaGetDeviceProperties(&d_props, device)); if (d_props.major < 2) { fprintf(stderr, "CUDA Error: This example requires a CUDA device with architecture SM2.0 or higher\n"); exit(EXIT_FAILURE); } /* * Allocate CUDA Memory for FFT and log scaling operations * As well, allocate complex CUDA Memory for R2C result */ fprintf(stderr, "Allocating CUDA and Host Memory: \n"); int32_t *init_signal; // initial storage for non-scaled data input cufftReal *tmp_result1; // temp storage for scaling data input cuComplex *gpu_result; // Where CUFFT will be stored cuComplex *hcomp_data; // Where host memory will recieve complex data result checkStatus(cudaMalloc((void **)&init_signal, sizeof(*init_signal)*(16*SAMPLES))); checkStatus(cudaMalloc((void **)&tmp_result1, sizeof(cufftReal)*SAMPLES)); checkStatus(cudaMalloc((void **)&gpu_result, sizeof(cuComplex)*COMPLEX_SIZE)); hcomp_data = (cuComplex*) malloc(sizeof(cuComplex)*COMPLEX_SIZE); if (hcomp_data == NULL) { fprintf(stderr, "Host Error: Host failed to allocate memory\n"); return -1; } /* * Make CUFFT plan for 1D Real-to-Complex FFT * also link data path to CUDA device */ cufftHandle plan; checkStatus(cufftCreate(&plan)); checkStatus(cufftPlan1d(&plan, SAMPLES, CUFFT_R2C, 1)); // Configure P2P FIFO between FlexRIO and GPU using NVIDIA GPU Direct if (viahostflag == 1) { /* Host transfer */ checkStatus(NiFpga_ConfigureFifo(session, NiFpga_FPGA_Main_TargetToHostFifoI32_T2HDMAFIFO, (size_t)SAMPLES)); } else { /* P2P via RDMA */ checkStatus(NiFpga_ConfigureFifoBuffer(session, NiFpga_FPGA_Main_TargetToHostFifoI32_T2HDMAFIFO, (uint64_t)init_signal, SAMPLES, NULL, NiFpga_DmaBufferType_NvidiaGpuDirectRdma)); } /* * Set NI FPGA Control/Indicator Values */ // Set RMS Noise value of AWGN Algorithm on FlexRIO (out of i16 full scale, here set as 2048) NiFpga_WriteU16(session, NiFpga_FPGA_Main_ControlU16_RMSNoise, 2048); // Reset FPGA algorithms and clear FIFOs NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_aReset, NiFpga_True); NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_aReset, NiFpga_False); if (lpfflag == 1) { fprintf(stderr, "Enabling Low-Pass FIR Filter on FlexRIO\n"); NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_LPFEnable, NiFpga_True); } else NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_LPFEnable, NiFpga_False); if (awgnflag == 1) { fprintf(stderr, "Adding White Gaussian Noise to Signal\n"); NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_AWGNEnable, NiFpga_True); } else NiFpga_WriteBool(session, NiFpga_FPGA_Main_ControlBool_AWGNEnable, NiFpga_False); /* * DMA (or copy from host) signal to GPU and execute FFT plan */ if (viahostflag == 1) { int32_t * h_data = NULL; // ptr for when transferring data to host first fprintf(stderr, "Copy host memory signal to CUDA Device\n"); h_data = (int32_t *)malloc(SAMPLES * sizeof(int32_t)); checkStatus(NiFpga_ReadFifoI32(session, NiFpga_FPGA_Main_TargetToHostFifoI32_T2HDMAFIFO, h_data, (size_t)SAMPLES, 5000, NULL)); if (timeflag == 1) { fprintf(stderr, "Writing time-domain signal to TimeSignal.dat\n"); FILE *f = fopen("TimeSignal.dat", "w"); for(int i = 0; i < SAMPLES; i++) { if (i == 0) fprintf(f, "# X Y\n"); // Write real component to file fprintf(f, "%d %d\n", i, h_data[i]); } fclose(f); } if (cudaMemcpy(init_signal, h_data, (SAMPLES*sizeof(int32_t)), cudaMemcpyHostToDevice) != cudaSuccess) { fprintf(stderr, "CUDA Error: Device failed to copy host memory to device\n"); return -1; } free(h_data); } else { fprintf(stderr, "DMA'ing FlexRIO data to GPU\n"); size_t elemsAcquired, elemsRemaining; checkStatus(NiFpga_AcquireFifoReadElementsI32(session, NiFpga_FPGA_Main_TargetToHostFifoI32_T2HDMAFIFO, &init_signal, SAMPLES, 5000, &elemsAcquired, &elemsRemaining)); fprintf(stderr, "%d samples acquired with %d elements remaining in FIFO\n", elemsAcquired, elemsRemaining); } /* * Start FFT on GPU */ fprintf(stderr, "Executing CUFFT Plan...\n"); // create timers cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); float elapsedTime=0; checkStatus(cudaEventRecord(start, 0)); // Convert input signal from real to complex ConvertInputToComplex<<<32, 128>>>(init_signal, tmp_result1); checkStatus(cudaGetLastError()); // Execute FFT on data checkStatus(cufftExecR2C(plan, tmp_result1, gpu_result)); checkStatus(cudaGetLastError()); // Stop and record time taken checkStatus(cudaEventRecord(end, 0)); // Sync and wait for completion checkStatus(cudaDeviceSynchronize()); checkStatus(cudaEventSynchronize(end)); checkStatus(cudaEventElapsedTime(&elapsedTime, start, end)); fprintf(stderr, "FFT took %fms to complete on GPU\n", elapsedTime); // Copy FFT result back to host) fprintf(stderr, "Copying CUFFT result back to host memory...\n"); checkStatus(cudaMemcpy(hcomp_data, gpu_result, SAMPLES, cudaMemcpyDeviceToHost)); /* * Write resulting data to file */ fprintf(stderr, "Writing signal to stdout:\n\n"); for(int32_t i = 0; i < COMPLEX_SIZE; i++) { if (i == 0) fprintf(stdout, "# X Y\n"); // Write real component to file // TODO: Implement power conversion as CUFFT Callback fprintf(stdout, "%d %f\n", i, 20.0f*log(hcomp_data[i].y)); } // Close out CUFFT plan(s) and free CUDA memory fprintf(stderr, "Closing CUFFT and freeing CUDA memory...\n"); checkStatus(cufftDestroy(plan)); checkStatus(cudaFree(init_signal)); checkStatus(cudaFree(tmp_result1)); checkStatus(cudaFree(gpu_result)); free(hcomp_data); // Clean up NVIDIA Driver state cudaDeviceReset(); // Close NI FPGA References; must be last NiFpga calls fprintf(stderr, "Stopping NI FPGA...\n"); checkStatus(NiFpga_Close(session, 0)); checkStatus(NiFpga_Finalize()); return EXIT_SUCCESS; }
6101420b5273e913a8a5295fec8b9c8bfa678a52.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __device__ int sum = 1; __global__ void degreeCalc (int *array){ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i>=1000000){ return; } sum+=array[i]; // if (i==999999){ // printf("%d", sum); // } } __global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int start = -1, stop = -1; int diff=0; start = vertexArray[i]; stop = vertexArray[i+1]; diff = stop-start; degreeCount[i]=diff; }
6101420b5273e913a8a5295fec8b9c8bfa678a52.cu
#include "includes.h" __device__ int sum = 1; __global__ void degreeCalc (int *array){ int i = blockDim.x * blockIdx.x + threadIdx.x; if (i>=1000000){ return; } sum+=array[i]; // if (i==999999){ // printf("%d", sum); // } } __global__ void degreeCalc (int *vertexArray, int *neighbourArray, int *degreeCount, int n, int m){ int i= blockDim.x * blockIdx.x + threadIdx.x; if (i>=n){ return; } int start = -1, stop = -1; int diff=0; start = vertexArray[i]; stop = vertexArray[i+1]; diff = stop-start; degreeCount[i]=diff; }
9f51a842cc941177ed738f4f33b2ada12beb0f8a.hip
// !!! This is a file automatically generated by hipify!!! #include "AARect.h" #include "Box.h" #include "Camera.h" #include "Canvas.h" #include "HittableList.h" #include "Instance.h" #include "Material.h" #include "MovingSphere.h" #include "Perlin.h" #include "Random.h" #include "Ray.h" #include "Sphere.h" #include "Vec3.h" #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #define STB_IMAGE_IMPLEMENTATION #include <stb_image.h> __device__ Color RayColor( hiprandState_t* randState, const Ray& r, HittableList** world, Color background, int depth ) { HitRecord rec; Color totalAttenuation( 1.0, 1.0, 1.0 ); Ray curRay = r; for ( int i = 0; i < depth; ++i ) { if ( ( *world )->Hit( curRay, 0.001, DBL_MAX, rec ) ) { Color attenuation( 1.0, 1.0, 1.0 ); Color emitted = rec.m_material->Emitted( rec.m_u, rec.m_v, rec.m_hitPosition ); Ray scattered; if ( rec.m_material->Scatter( randState, curRay, rec, attenuation, scattered ) ) { totalAttenuation = totalAttenuation * attenuation + emitted; curRay = scattered; } else { return totalAttenuation * emitted; } } else { return background; } } return Color( 0, 0, 0 ); } __global__ void FillCanvas( Pixel* devPixels, HittableList** world, std::size_t width, std::size_t height, Color background, Camera cam, int numSample, int maxRayDepth ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * width; y = height - y - 1; // invert y if ( x < width && y < height ) { hiprandState_t randState; hiprand_init( offset, 0, 0, &randState ); Color pixelColor( 0, 0, 0 ); for ( int i = 0; i < numSample; ++i ) { double u = double( x + RandomDouble( &randState ) ) / ( width - 1 ); double v = double( y + RandomDouble( &randState ) ) / ( height - 1 ); Ray r = cam.GetRay( &randState, u, v ); pixelColor += RayColor( &randState, r, world, background, maxRayDepth ); } WriteColor( devPixels[offset].m_color, pixelColor, numSample ); } } __global__ void CreateWorld( HittableList** world ) { *world = new HittableList( ); //( *world )->Add( new Sphere( Point3( 0, -100.5, -1 ), 100, new Lambertian( Color( 0.8, 0.8, 0.0 ) ) ) ); //( *world )->Add( new Sphere( Point3( 0, 0, -1 ), 0.5, new Lambertian( Color( 0.1, 0.2, 0.5 ) ) ) ); //( *world )->Add( new Sphere( Point3( -1, 0, -1 ), 0.5, new Dielectric( 1.5 ) ) ); //( *world )->Add( new Sphere( Point3( -1, 0, -1 ), -0.45, new Dielectric( 1.5 ) ) ); //( *world )->Add( new Sphere( Point3( 1, 0, -1 ), 0.5, new Metal( Color( 0.8, 0.6, 0.2 ), 0.0 ) ) ); //double R = cos( CUDART_PI / 4 ); //( *world )->Add( new Sphere( Point3( -R, 0, -1 ), R, new Lambertian( Color( 0, 0, 1 ) ) ) ); //( *world )->Add( new Sphere( Point3( R, 0, -1 ), R, new Lambertian( Color( 1, 0, 0 ) ) ) ); ( *world )->Add( new Sphere( Point3( 0, -1000, 0 ), 1000, new Lambertian( new CheckerTexture( Color( 0.2, 0.3, 0.1 ), Color( 0.9, 0.9, 0.9 ) ) ) ) ); hiprandState_t randState; hiprand_init( 1024, 768, 0, &randState ); for ( int i = -11; i < 11; ++i ) { for ( int j = -11; j < 11; ++j ) { double chooseMaterial = RandomDouble( &randState ); Point3 center( i + 0.9 * RandomDouble( &randState ), 0.2, j + 0.9 * RandomDouble( &randState ) ); if ( ( center - Point3( 4, 0.2, 0 ) ).Length( ) > 0.9 ) { if ( chooseMaterial < 0.8 ) { // diffuse Color albedo = Random( &randState ) * Random( &randState ); Point3 center2 = center + Vec3( 0, RandomDouble( &randState, 0, 0.5 ), 0 ); ( *world )->Add( new MovingSphere( center, center2, 0.0, 1.0, 0.2, new Lambertian( albedo ) ) ); } else if ( chooseMaterial < 0.95 ) { // metal Color albedo = Random( &randState, 0.5, 1 ); double fuzz = RandomDouble( &randState, 0, 0.5 ); ( *world )->Add( new Sphere( center, 0.2, new Metal( albedo, fuzz ) ) ); } else { // glass ( *world )->Add( new Sphere( center, 0.2, new Dielectric( 1.5 ) ) ); } } } } ( *world )->Add( new Sphere( Point3( 0, 1, 0 ), 1.0, new Dielectric( 1.5 ) ) ); ( *world )->Add( new Sphere( Point3( -4, 1, 0 ), 1.0, new Lambertian( Color( 0.4, 0.2, 0.1 ) ) ) ); ( *world )->Add( new Sphere( Point3( 4, 1, 0 ), 1.0, new Metal( Color( 0.7, 0.6, 0.5 ), 0 ) ) ); } __global__ void CreateTwoSpheresWorld( HittableList** world ) { *world = new HittableList( ); ( *world )->Add( new Sphere( Point3( 0, -10, 0 ), 10, new Lambertian( new CheckerTexture( Color( 0.2, 0.3, 0.1 ), Color( 0.9, 0.9, 0.9 ) ) ) ) ); ( *world )->Add( new Sphere( Point3( 0, 10, 0 ), 10, new Lambertian( new CheckerTexture( Color( 0.2, 0.3, 0.1 ), Color( 0.9, 0.9, 0.9 ) ) ) ) ); } __global__ void CreatePerlinTextureWorld( HittableList** world, Perlin* perlin ) { *world = new HittableList( ); ( *world )->Add( new Sphere( Point3( 0, -1000, 0 ), 1000, new Lambertian( new NoiseTexture( perlin, 4 ) ) ) ); ( *world )->Add( new Sphere( Point3( 0, 2, 0 ), 2, new Lambertian( new NoiseTexture( perlin, 4 ) ) ) ); } texture<uchar4, 2> g_earth; __global__ void CreateEarthWorld( HittableList** world, int width, int height ) { *world = new HittableList( ); ( *world )->Add( new Sphere( Point3( 0, 0, 0 ), 2, new Lambertian( new ImageTexture( g_earth, width, height ) ) ) ); } __global__ void CreateSimpleLightWorld( HittableList** world, Perlin* perlin ) { *world = new HittableList( ); ( *world )->Add( new Sphere( Point3( 0, -1000, 0 ), 1000, new Lambertian( new NoiseTexture( perlin, 4 ) ) ) ); ( *world )->Add( new Sphere( Point3( 0, 2, 0 ), 2, new Lambertian( new NoiseTexture( perlin, 4 ) ) ) ); ( *world )->Add( new XYRect( 3, 5, 1, 3, -2, new DiffuseLight( Color( 4, 4, 4 ) ) ) ); ( *world )->Add( new Sphere( Point3( 0, 7, 0 ), 2, new DiffuseLight( Color( 4, 4, 4 ) ) ) ); } __global__ void CreateCornellBoxWorld( HittableList** world ) { *world = new HittableList( ); ( *world )->Add( new YZRect( 0, 555, 0, 555, 555, new Lambertian( Color( 0.12, 0.45, 0.15 ) ) ) ); ( *world )->Add( new YZRect( 0, 555, 0, 555, 0, new Lambertian( Color( 0.65, 0.05, 0.05 ) ) ) ); ( *world )->Add( new XZRect( 213, 343, 227, 332, 554, new DiffuseLight( Color( 15, 15, 15 ) ) ) ); ( *world )->Add( new XZRect( 0, 555, 0, 555, 0, new Lambertian( Color( 0.73, 0.73, 0.73 ) ) ) ); ( *world )->Add( new XZRect( 0, 555, 0, 555, 555, new Lambertian( Color( 0.73, 0.73, 0.73 ) ) ) ); ( *world )->Add( new XYRect( 0, 555, 0, 555, 555, new Lambertian( Color( 0.73, 0.73, 0.73 ) ) ) ); Hittable* box1 = new Box( Point3( 0, 0, 0 ), Point3( 165, 330, 165 ), new Lambertian( Color( 0.73, 0.73, 0.73 ) ) ); box1 = new RotateY( box1, 15 ); box1 = new Translate( box1, Vec3( 265, 0, 295 ) ); ( *world )->Add( box1 ); Hittable* box2 = new Box( Point3( 0, 0, 0 ), Point3( 165, 165, 165 ), new Lambertian( Color( 0.73, 0.73, 0.73 ) ) ); box2 = new RotateY( box2, -18 ); box2 = new Translate( box2, Vec3( 130, 0, 65 ) ); ( *world )->Add( box2 ); } __global__ void DestroyWorld( HittableList** world ) { (*world)->Clear( ); delete *world; } int main( ) { Point3 lookFrom( 13, 2, 3 ); Point3 lookAt( 0, 0, 0 ); double fov = 40.0; double aperture = 0.0; int samplesPerPixel = 100; Color background( 0.7, 0.8, 1.0 ); double aspectRatio = 16.0 / 9.0; int canvasWidth = 400; Perlin* perlinTexture = nullptr; HittableList** world = nullptr; hipMalloc( (void**)&world, sizeof( HittableList* ) ); float3* deviceEarth = nullptr; switch ( 0 ) { case 1: hipLaunchKernelGGL(( CreateWorld), dim3(1), dim3(1), 0, 0, world ); fov = 20.0; aperture = 0.1; break; case 2: hipLaunchKernelGGL(( CreateTwoSpheresWorld), dim3(1), dim3(1), 0, 0, world ); fov = 20.0; break; case 3: hipMalloc( &perlinTexture, sizeof( Perlin ) ); hipLaunchKernelGGL(( GeneratePerlinTexture), dim3(16), dim3(16), 0, 0, perlinTexture ); hipLaunchKernelGGL(( CreatePerlinTextureWorld), dim3(1), dim3(1), 0, 0, world, perlinTexture ); fov = 20.0; break; case 4: { int width = 0; int height = 0; int componentPerPixel = 4; unsigned char* data = stbi_load( "earthmap.jpg", &width, &height, &componentPerPixel, componentPerPixel ); hipMalloc( (void**)&deviceEarth, sizeof( uchar4 ) * width * height ); hipMemcpy( deviceEarth, data, sizeof( uchar4 ) * width * height, hipMemcpyHostToDevice ); delete[] data; hipChannelFormatDesc desc = hipCreateChannelDesc<uchar4>( ); hipBindTexture2D( nullptr, &g_earth, deviceEarth, &desc, width, height, sizeof( uchar4 ) * width ); hipLaunchKernelGGL(( CreateEarthWorld), dim3(1), dim3(1), 0, 0, world, width, height ); fov = 20.0; } break; case 5: hipMalloc( &perlinTexture, sizeof( Perlin ) ); hipLaunchKernelGGL(( GeneratePerlinTexture), dim3(16), dim3(16), 0, 0, perlinTexture ); hipLaunchKernelGGL(( CreateSimpleLightWorld), dim3(1), dim3(1), 0, 0, world, perlinTexture ); samplesPerPixel = 400; background = Color( 0, 0, 0 ); lookFrom = Point3( 26, 3, 6 ); lookAt = Point3( 0, 2, 0 ); fov = 20.0; break; case 6: default: hipLaunchKernelGGL(( CreateCornellBoxWorld), dim3(1), dim3(1), 0, 0, world ); aspectRatio = 1; canvasWidth = 600; samplesPerPixel = 200; background = Color( 0, 0, 0 ); lookFrom = Point3( 278, 278, -800 ); lookAt = Point3( 278, 278, 0 ); fov = 40.0; break; } // camera Camera cam( lookFrom, lookAt, fov, aspectRatio, aperture, 10, 0.0, 1.0 ); // canvas const int canvasHeight = static_cast<int>( canvasWidth / aspectRatio ); Canvas canvas( canvasWidth, canvasHeight ); Pixel* devPixels = nullptr; hipMalloc( (void**)&devPixels, canvas.Size( ) ); int curDevice = 0; hipGetDevice( &curDevice ); hipDeviceProp_t prop; hipGetDeviceProperties( &prop, curDevice ); dim3 grids( static_cast<unsigned int>( ( canvas.Width() + 7 ) / 8 ) , static_cast<unsigned int>( ( canvas.Height( ) + 7 ) / 8 ) ); dim3 threads( 8, 8 ); hipLaunchKernelGGL(( FillCanvas), dim3(grids), dim3(threads), 0, 0, devPixels, world, canvas.Width(), canvas.Height(), background, cam, samplesPerPixel, 50 ); hipLaunchKernelGGL(( DestroyWorld), dim3(1), dim3(1), 0, 0, world ); hipMemcpy( canvas.Pixels(), devPixels, canvas.Size( ), hipMemcpyDeviceToHost ); hipFree( devPixels ); hipFree( world ); hipFree( perlinTexture ); hipFree( deviceEarth ); hipUnbindTexture( &g_earth ); canvas.WriteFile( "./image6_2.ppm" ); }
9f51a842cc941177ed738f4f33b2ada12beb0f8a.cu
#include "AARect.h" #include "Box.h" #include "Camera.h" #include "Canvas.h" #include "HittableList.h" #include "Instance.h" #include "Material.h" #include "MovingSphere.h" #include "Perlin.h" #include "Random.h" #include "Ray.h" #include "Sphere.h" #include "Vec3.h" #include <cuda_runtime.h> #include <curand_kernel.h> #define STB_IMAGE_IMPLEMENTATION #include <stb_image.h> __device__ Color RayColor( curandState_t* randState, const Ray& r, HittableList** world, Color background, int depth ) { HitRecord rec; Color totalAttenuation( 1.0, 1.0, 1.0 ); Ray curRay = r; for ( int i = 0; i < depth; ++i ) { if ( ( *world )->Hit( curRay, 0.001, DBL_MAX, rec ) ) { Color attenuation( 1.0, 1.0, 1.0 ); Color emitted = rec.m_material->Emitted( rec.m_u, rec.m_v, rec.m_hitPosition ); Ray scattered; if ( rec.m_material->Scatter( randState, curRay, rec, attenuation, scattered ) ) { totalAttenuation = totalAttenuation * attenuation + emitted; curRay = scattered; } else { return totalAttenuation * emitted; } } else { return background; } } return Color( 0, 0, 0 ); } __global__ void FillCanvas( Pixel* devPixels, HittableList** world, std::size_t width, std::size_t height, Color background, Camera cam, int numSample, int maxRayDepth ) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * width; y = height - y - 1; // invert y if ( x < width && y < height ) { curandState_t randState; curand_init( offset, 0, 0, &randState ); Color pixelColor( 0, 0, 0 ); for ( int i = 0; i < numSample; ++i ) { double u = double( x + RandomDouble( &randState ) ) / ( width - 1 ); double v = double( y + RandomDouble( &randState ) ) / ( height - 1 ); Ray r = cam.GetRay( &randState, u, v ); pixelColor += RayColor( &randState, r, world, background, maxRayDepth ); } WriteColor( devPixels[offset].m_color, pixelColor, numSample ); } } __global__ void CreateWorld( HittableList** world ) { *world = new HittableList( ); //( *world )->Add( new Sphere( Point3( 0, -100.5, -1 ), 100, new Lambertian( Color( 0.8, 0.8, 0.0 ) ) ) ); //( *world )->Add( new Sphere( Point3( 0, 0, -1 ), 0.5, new Lambertian( Color( 0.1, 0.2, 0.5 ) ) ) ); //( *world )->Add( new Sphere( Point3( -1, 0, -1 ), 0.5, new Dielectric( 1.5 ) ) ); //( *world )->Add( new Sphere( Point3( -1, 0, -1 ), -0.45, new Dielectric( 1.5 ) ) ); //( *world )->Add( new Sphere( Point3( 1, 0, -1 ), 0.5, new Metal( Color( 0.8, 0.6, 0.2 ), 0.0 ) ) ); //double R = cos( CUDART_PI / 4 ); //( *world )->Add( new Sphere( Point3( -R, 0, -1 ), R, new Lambertian( Color( 0, 0, 1 ) ) ) ); //( *world )->Add( new Sphere( Point3( R, 0, -1 ), R, new Lambertian( Color( 1, 0, 0 ) ) ) ); ( *world )->Add( new Sphere( Point3( 0, -1000, 0 ), 1000, new Lambertian( new CheckerTexture( Color( 0.2, 0.3, 0.1 ), Color( 0.9, 0.9, 0.9 ) ) ) ) ); curandState_t randState; curand_init( 1024, 768, 0, &randState ); for ( int i = -11; i < 11; ++i ) { for ( int j = -11; j < 11; ++j ) { double chooseMaterial = RandomDouble( &randState ); Point3 center( i + 0.9 * RandomDouble( &randState ), 0.2, j + 0.9 * RandomDouble( &randState ) ); if ( ( center - Point3( 4, 0.2, 0 ) ).Length( ) > 0.9 ) { if ( chooseMaterial < 0.8 ) { // diffuse Color albedo = Random( &randState ) * Random( &randState ); Point3 center2 = center + Vec3( 0, RandomDouble( &randState, 0, 0.5 ), 0 ); ( *world )->Add( new MovingSphere( center, center2, 0.0, 1.0, 0.2, new Lambertian( albedo ) ) ); } else if ( chooseMaterial < 0.95 ) { // metal Color albedo = Random( &randState, 0.5, 1 ); double fuzz = RandomDouble( &randState, 0, 0.5 ); ( *world )->Add( new Sphere( center, 0.2, new Metal( albedo, fuzz ) ) ); } else { // glass ( *world )->Add( new Sphere( center, 0.2, new Dielectric( 1.5 ) ) ); } } } } ( *world )->Add( new Sphere( Point3( 0, 1, 0 ), 1.0, new Dielectric( 1.5 ) ) ); ( *world )->Add( new Sphere( Point3( -4, 1, 0 ), 1.0, new Lambertian( Color( 0.4, 0.2, 0.1 ) ) ) ); ( *world )->Add( new Sphere( Point3( 4, 1, 0 ), 1.0, new Metal( Color( 0.7, 0.6, 0.5 ), 0 ) ) ); } __global__ void CreateTwoSpheresWorld( HittableList** world ) { *world = new HittableList( ); ( *world )->Add( new Sphere( Point3( 0, -10, 0 ), 10, new Lambertian( new CheckerTexture( Color( 0.2, 0.3, 0.1 ), Color( 0.9, 0.9, 0.9 ) ) ) ) ); ( *world )->Add( new Sphere( Point3( 0, 10, 0 ), 10, new Lambertian( new CheckerTexture( Color( 0.2, 0.3, 0.1 ), Color( 0.9, 0.9, 0.9 ) ) ) ) ); } __global__ void CreatePerlinTextureWorld( HittableList** world, Perlin* perlin ) { *world = new HittableList( ); ( *world )->Add( new Sphere( Point3( 0, -1000, 0 ), 1000, new Lambertian( new NoiseTexture( perlin, 4 ) ) ) ); ( *world )->Add( new Sphere( Point3( 0, 2, 0 ), 2, new Lambertian( new NoiseTexture( perlin, 4 ) ) ) ); } texture<uchar4, 2> g_earth; __global__ void CreateEarthWorld( HittableList** world, int width, int height ) { *world = new HittableList( ); ( *world )->Add( new Sphere( Point3( 0, 0, 0 ), 2, new Lambertian( new ImageTexture( g_earth, width, height ) ) ) ); } __global__ void CreateSimpleLightWorld( HittableList** world, Perlin* perlin ) { *world = new HittableList( ); ( *world )->Add( new Sphere( Point3( 0, -1000, 0 ), 1000, new Lambertian( new NoiseTexture( perlin, 4 ) ) ) ); ( *world )->Add( new Sphere( Point3( 0, 2, 0 ), 2, new Lambertian( new NoiseTexture( perlin, 4 ) ) ) ); ( *world )->Add( new XYRect( 3, 5, 1, 3, -2, new DiffuseLight( Color( 4, 4, 4 ) ) ) ); ( *world )->Add( new Sphere( Point3( 0, 7, 0 ), 2, new DiffuseLight( Color( 4, 4, 4 ) ) ) ); } __global__ void CreateCornellBoxWorld( HittableList** world ) { *world = new HittableList( ); ( *world )->Add( new YZRect( 0, 555, 0, 555, 555, new Lambertian( Color( 0.12, 0.45, 0.15 ) ) ) ); ( *world )->Add( new YZRect( 0, 555, 0, 555, 0, new Lambertian( Color( 0.65, 0.05, 0.05 ) ) ) ); ( *world )->Add( new XZRect( 213, 343, 227, 332, 554, new DiffuseLight( Color( 15, 15, 15 ) ) ) ); ( *world )->Add( new XZRect( 0, 555, 0, 555, 0, new Lambertian( Color( 0.73, 0.73, 0.73 ) ) ) ); ( *world )->Add( new XZRect( 0, 555, 0, 555, 555, new Lambertian( Color( 0.73, 0.73, 0.73 ) ) ) ); ( *world )->Add( new XYRect( 0, 555, 0, 555, 555, new Lambertian( Color( 0.73, 0.73, 0.73 ) ) ) ); Hittable* box1 = new Box( Point3( 0, 0, 0 ), Point3( 165, 330, 165 ), new Lambertian( Color( 0.73, 0.73, 0.73 ) ) ); box1 = new RotateY( box1, 15 ); box1 = new Translate( box1, Vec3( 265, 0, 295 ) ); ( *world )->Add( box1 ); Hittable* box2 = new Box( Point3( 0, 0, 0 ), Point3( 165, 165, 165 ), new Lambertian( Color( 0.73, 0.73, 0.73 ) ) ); box2 = new RotateY( box2, -18 ); box2 = new Translate( box2, Vec3( 130, 0, 65 ) ); ( *world )->Add( box2 ); } __global__ void DestroyWorld( HittableList** world ) { (*world)->Clear( ); delete *world; } int main( ) { Point3 lookFrom( 13, 2, 3 ); Point3 lookAt( 0, 0, 0 ); double fov = 40.0; double aperture = 0.0; int samplesPerPixel = 100; Color background( 0.7, 0.8, 1.0 ); double aspectRatio = 16.0 / 9.0; int canvasWidth = 400; Perlin* perlinTexture = nullptr; HittableList** world = nullptr; cudaMalloc( (void**)&world, sizeof( HittableList* ) ); float3* deviceEarth = nullptr; switch ( 0 ) { case 1: CreateWorld<<<1, 1>>>( world ); fov = 20.0; aperture = 0.1; break; case 2: CreateTwoSpheresWorld<<<1, 1>>>( world ); fov = 20.0; break; case 3: cudaMalloc( &perlinTexture, sizeof( Perlin ) ); GeneratePerlinTexture<<<16, 16>>>( perlinTexture ); CreatePerlinTextureWorld<<<1, 1>>>( world, perlinTexture ); fov = 20.0; break; case 4: { int width = 0; int height = 0; int componentPerPixel = 4; unsigned char* data = stbi_load( "earthmap.jpg", &width, &height, &componentPerPixel, componentPerPixel ); cudaMalloc( (void**)&deviceEarth, sizeof( uchar4 ) * width * height ); cudaMemcpy( deviceEarth, data, sizeof( uchar4 ) * width * height, cudaMemcpyHostToDevice ); delete[] data; cudaChannelFormatDesc desc = cudaCreateChannelDesc<uchar4>( ); cudaBindTexture2D( nullptr, &g_earth, deviceEarth, &desc, width, height, sizeof( uchar4 ) * width ); CreateEarthWorld<<<1, 1>>>( world, width, height ); fov = 20.0; } break; case 5: cudaMalloc( &perlinTexture, sizeof( Perlin ) ); GeneratePerlinTexture<<<16, 16>>>( perlinTexture ); CreateSimpleLightWorld<<<1, 1>>>( world, perlinTexture ); samplesPerPixel = 400; background = Color( 0, 0, 0 ); lookFrom = Point3( 26, 3, 6 ); lookAt = Point3( 0, 2, 0 ); fov = 20.0; break; case 6: default: CreateCornellBoxWorld<<<1, 1>>>( world ); aspectRatio = 1; canvasWidth = 600; samplesPerPixel = 200; background = Color( 0, 0, 0 ); lookFrom = Point3( 278, 278, -800 ); lookAt = Point3( 278, 278, 0 ); fov = 40.0; break; } // camera Camera cam( lookFrom, lookAt, fov, aspectRatio, aperture, 10, 0.0, 1.0 ); // canvas const int canvasHeight = static_cast<int>( canvasWidth / aspectRatio ); Canvas canvas( canvasWidth, canvasHeight ); Pixel* devPixels = nullptr; cudaMalloc( (void**)&devPixels, canvas.Size( ) ); int curDevice = 0; cudaGetDevice( &curDevice ); cudaDeviceProp prop; cudaGetDeviceProperties( &prop, curDevice ); dim3 grids( static_cast<unsigned int>( ( canvas.Width() + 7 ) / 8 ) , static_cast<unsigned int>( ( canvas.Height( ) + 7 ) / 8 ) ); dim3 threads( 8, 8 ); FillCanvas<<<grids, threads>>>( devPixels, world, canvas.Width(), canvas.Height(), background, cam, samplesPerPixel, 50 ); DestroyWorld<<<1, 1>>>( world ); cudaMemcpy( canvas.Pixels(), devPixels, canvas.Size( ), cudaMemcpyDeviceToHost ); cudaFree( devPixels ); cudaFree( world ); cudaFree( perlinTexture ); cudaFree( deviceEarth ); cudaUnbindTexture( &g_earth ); canvas.WriteFile( "./image6_2.ppm" ); }
d4b85c0bc370a08a2ac724570b6ccebd20a992f5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" { #include "lua.h" #include "lualib.h" #include "lauxlib.h" } #include "luaT.h" #include "THH.h" #include <stdio.h> #include <assert.h> #include <math_constants.h> #include <stdint.h> #include <unistd.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <limits.h> #define TB 128 // PARAMETERS #define FB_R_VAR 0.01 #define FB_V_INCR 1 #define FB_V_DECR 0.1 #define FB_T_INCR 0.5 #define FB_T_DECR 0.25 #define FB_T_LOWER 2 #define FB_T_UPPER 255 #define UNSTABLE_REG_RATIO_MIN 0.1 #define UNSTABLE_REG_RDIST_MIN 3.0 #define Rd_0 0.4 #define Rc_0 50 #define Rc_ofs Rc_0/5 #define Rd_ofs 0.6 #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) THCState* getCutorchState(lua_State* L) { lua_getglobal(L, "cutorch"); lua_getfield(L, -1, "getState"); lua_call(L, 0, 1); THCState *state = (THCState*) lua_touserdata(L, -1); lua_pop(L, 2); return state; } void checkCudaError(lua_State *L) { hipError_t status = hipPeekAtLastError(); if (status != hipSuccess) { luaL_error(L, hipGetErrorString(status)); } } __global__ void update_model_(float *modelD, float *modelC, float *sampleD, float *sampleC, float *lR, int sizeHW, int sizeFHW, int sizeF, int sizeN, float *FG, float* r_smp, float* r_prb, int trigger){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < sizeHW){ int lr = (FG[id])?FB_T_LOWER:(trigger)?1:ceil(lR[id]); int prb = ceil(r_prb[id]); if ( prb % lr == 0 ){ int smp = ceil(r_smp[id]); int smp_idx_d = smp * sizeFHW; int smp_idx_c = smp * sizeHW; // copy color feature modelC[smp_idx_c + id] = sampleC[id]; // copy embedding for (int i = 0; i < sizeF; i++){ modelD[smp_idx_d + i*sizeHW + id] = sampleD[i*sizeHW + id]; } } } } int update_model(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *modelD = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *modelC = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *sampleD = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *sampleC = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *learningRate = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); THCudaTensor *FG = (THCudaTensor*)luaT_checkudata(L, 6, "torch.CudaTensor"); THCudaTensor *r_smp = (THCudaTensor*)luaT_checkudata(L, 7, "torch.CudaTensor"); THCudaTensor *r_prb = (THCudaTensor*)luaT_checkudata(L, 8, "torch.CudaTensor"); int trigger = luaL_checkinteger(L, 9); int sizeHW = THCudaTensor_size(state, modelD, 3) * THCudaTensor_size(state, modelD, 4); int sizeFHW = THCudaTensor_size(state, modelD, 2) * THCudaTensor_size(state, modelD, 3) * THCudaTensor_size(state, modelD, 4); int sizeF = THCudaTensor_size(state, modelD, 2); int sizeN = THCudaTensor_size(state, modelD, 0); hipLaunchKernelGGL(( update_model_), dim3((sizeHW - 1)/ TB + 1), dim3(TB) , 0, 0, THCudaTensor_data(state, modelD), THCudaTensor_data(state, modelC), THCudaTensor_data(state, sampleD), THCudaTensor_data(state, sampleC), THCudaTensor_data(state, learningRate), sizeHW, sizeFHW, sizeF, sizeN, THCudaTensor_data(state, FG), THCudaTensor_data(state, r_smp), THCudaTensor_data(state, r_prb), trigger ); checkCudaError(L); return 1; } __device__ void sort(float *x, int n){ for (int i = 0; i < n - 1; i++) { int min = i; for (int j = i + 1; j < n; j++) { if (x[j] < x[min]) { min = j; } } float tmp = x[min]; x[min] = x[i]; x[i] = tmp; } } __global__ void median2d(float *img, float *out, int size, int dim2, int dim3, int kernel_radius){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float xs[11 * 11]; int xs_size = 0; for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) { for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++) { if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2) { xs[xs_size++] = img[yy * dim3 + xx]; } } } sort(xs, xs_size); out[id] = xs[xs_size / 2]; } } int median2d(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int kernel_size = luaL_checkinteger(L, 3); assert(kernel_size % 2 == 1); assert(kernel_size <= 11); hipLaunchKernelGGL(( median2d), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, img), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), kernel_size / 2); checkCudaError(L); return 0; } __global__ void binary_dilate(float* img, float* out, int size, int dim2, int dim3, int r){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; bool flag = 0; for (int xx = x - r; xx <= x + r; xx++) { for (int yy = y - r; yy<= y + r; yy++){ if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2){ if (img[yy*dim3 + xx] == 1){ flag = 1; break; } } } if (flag) break; } out[id] = (flag)?1:0; } } int binary_dilate(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int kernel_size = luaL_checkinteger(L, 3); assert(kernel_size % 2 == 1); hipLaunchKernelGGL(( binary_dilate), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, img), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, img, 2), THCudaTensor_size(state, img, 3), kernel_size / 2); checkCudaError(L); return 1; } __global__ void binary_erode_(float* img, float* out, int size, int dim2, int dim3, int r){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; bool flag = 0; for (int xx = x - r; xx <= x + r; xx++) { for (int yy = y - r; yy<= y + r; yy++){ if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2){ if (img[yy*dim3 + xx] == 0){ flag = 1; break; } } } if (flag) break; } out[id] = (flag)?0:1; } } int binary_erode(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int kernel_size = luaL_checkinteger(L, 3); assert(kernel_size % 2 == 1); hipLaunchKernelGGL(( binary_erode_), dim3((THCudaTensor_nElement(state, out) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, img), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, img, 2), THCudaTensor_size(state, img, 3), kernel_size / 2); checkCudaError(L); return 1; } __global__ void update_params_(float a_lt, float a_st, float* R, float* T, float* v, float* D_LT, float* D_ST, float* rSgm_LT, float* rSgm_ST, float* Sgm_LT, float* Sgm_ST, float* US, float* curFG, float* lastFG, float* blink, float h, float w, float sizeHW, float* d_m){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < sizeHW) { if (curFG[id]) { // Update D_m D_LT[id] = D_LT[id] * (1.0 - a_lt) + d_m[id]*a_lt; D_ST[id] = D_ST[id] * (1.0 - a_st) + d_m[id]*a_st; // Update mean raw segmentation rSgm_LT[id] = rSgm_LT[id] * (1.0 - a_lt) + a_lt; rSgm_ST[id] = rSgm_ST[id] * (1.0 - a_st) + a_st; }else{ // Update D_m D_LT[id] = D_LT[id] * (1.0 - a_lt) + d_m[id]*a_lt; D_ST[id] = D_ST[id] * (1.0 - a_st) + d_m[id]*a_st; // Update mean raw segmentation rSgm_LT[id] = rSgm_LT[id] * (1.0 - a_lt); rSgm_ST[id] = rSgm_ST[id] * (1.0 - a_st); } // Update learning rate T if (lastFG[id] || (min(D_LT[id], D_ST[id]) < UNSTABLE_REG_RATIO_MIN) && curFG[id] ) { T[id] += FB_T_INCR/(max(D_LT[id], D_ST[id]) * v[id]); }else{ T[id] -= FB_T_DECR * v[id] / (max(D_LT[id], D_ST[id])); } if (T[id] > FB_T_UPPER) T[id] = FB_T_UPPER; else if (T[id] < FB_T_LOWER) T[id] = FB_T_LOWER; // Update v if (max(D_LT[id], D_ST[id]) > UNSTABLE_REG_RATIO_MIN && blink[id]) v[id] += FB_V_INCR; else if (v[id] > FB_V_DECR){ v[id] -= lastFG[id]?FB_V_DECR/4:US[id]?FB_V_DECR/2:FB_V_DECR; if (v[id] < FB_V_DECR) v[id] = FB_V_DECR; } // Update R float min_D = 2*min(D_LT[id], D_ST[id]) + 1; min_D *=min_D; if (R[id] < min_D) R[id] += FB_R_VAR*(v[id] - FB_V_DECR); else{ R[id] -= FB_R_VAR/(v[id]); } if (R[id] < 1.0) R[id] = 1.0; } } int update_params(lua_State *L){ THCState *state = getCutorchState(L); float a_lt = luaL_checknumber(L, 1); float a_st = luaL_checknumber(L, 2); THCudaTensor *R = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *T = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *v = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); THCudaTensor *D_LT = (THCudaTensor*)luaT_checkudata(L, 6, "torch.CudaTensor"); THCudaTensor *D_ST = (THCudaTensor*)luaT_checkudata(L, 7, "torch.CudaTensor"); THCudaTensor *rSgm_LT = (THCudaTensor*)luaT_checkudata(L, 8, "torch.CudaTensor"); THCudaTensor *rSgm_ST = (THCudaTensor*)luaT_checkudata(L, 9, "torch.CudaTensor"); THCudaTensor *Sgm_LT = (THCudaTensor*)luaT_checkudata(L, 10, "torch.CudaTensor"); THCudaTensor *Sgm_ST = (THCudaTensor*)luaT_checkudata(L, 11, "torch.CudaTensor"); THCudaTensor *US = (THCudaTensor*)luaT_checkudata(L, 12, "torch.CudaTensor"); THCudaTensor *curFG = (THCudaTensor*)luaT_checkudata(L, 13, "torch.CudaTensor"); THCudaTensor *lastFG = (THCudaTensor*)luaT_checkudata(L, 14, "torch.CudaTensor"); THCudaTensor *blink = (THCudaTensor*)luaT_checkudata(L, 15, "torch.CudaTensor"); THCudaTensor *d_m = (THCudaTensor*)luaT_checkudata(L, 16, "torch.CudaTensor"); hipLaunchKernelGGL(( update_params_), dim3((THCudaTensor_nElement(state, R) - 1) / TB + 1), dim3(TB), 0, 0, a_lt, a_st, THCudaTensor_data(state, R), THCudaTensor_data(state, T), THCudaTensor_data(state, v), THCudaTensor_data(state, D_LT), THCudaTensor_data(state, D_ST), THCudaTensor_data(state, rSgm_LT), THCudaTensor_data(state, rSgm_ST), THCudaTensor_data(state, Sgm_LT), THCudaTensor_data(state, Sgm_ST), THCudaTensor_data(state, US), THCudaTensor_data(state, curFG), THCudaTensor_data(state, lastFG), THCudaTensor_data(state, blink), THCudaTensor_size(state, R, 0), //height THCudaTensor_size(state, R, 1), //weight THCudaTensor_nElement(state, R), //h*w THCudaTensor_data(state, d_m) ); checkCudaError(L); return 1; } __global__ void update_threshold_(float* R, float* R_c, float* R_d, float* US, int size01){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size01){ R_c[id] = ((R[id] * Rc_0) - (!US[id] * Rc_ofs))/2; R_d[id] = (R[id] * Rd_0) + (US[id] * Rd_ofs); } } int update_threshold(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *R = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *R_c = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *R_d = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *US = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); hipLaunchKernelGGL(( update_threshold_), dim3((THCudaTensor_nElement(state, R_c) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, R), THCudaTensor_data(state, R_c), THCudaTensor_data(state, R_d), THCudaTensor_data(state, US), THCudaTensor_size(state, R_c, 0) * THCudaTensor_size(state, R_c, 1) ); checkCudaError(L); return 1; } __global__ void check_unstable_(float *US, float *R, float* rSgm_LT, float *rSgm_ST, float *Sgm_LT, float *Sgm_ST, int sizeHW){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < sizeHW){ US[id] = ( (R[id] > UNSTABLE_REG_RDIST_MIN) || ((rSgm_LT[id] - Sgm_LT[id]) > UNSTABLE_REG_RATIO_MIN) || ((rSgm_ST[id] - Sgm_ST[id]) > UNSTABLE_REG_RATIO_MIN) ) ? 1 : 0; } } int check_unstable(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *US = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *R = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *rSgm_LT = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *rSgm_ST = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *Sgm_LT = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); THCudaTensor *Sgm_ST = (THCudaTensor*)luaT_checkudata(L, 6, "torch.CudaTensor"); hipLaunchKernelGGL(( check_unstable_), dim3((THCudaTensor_nElement(state, R) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, US), THCudaTensor_data(state, R), THCudaTensor_data(state, rSgm_LT), THCudaTensor_data(state, rSgm_ST), THCudaTensor_data(state, Sgm_LT), THCudaTensor_data(state, Sgm_ST), THCudaTensor_size(state, R, 0) * THCudaTensor_size(state, R, 1) ); checkCudaError(L); return 1; } __global__ void Normalize_get_norm_(float *input, float *norm, int size1, int size23, int size023){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size023) { int dim23 = id % size23; int dim0 = id / size23; float sum = 0.0; for (int dim1 = 0; dim1 < size1; dim1++) { float x = input[(dim0 * size1 + dim1) * size23 + dim23]; sum += x * x; } norm[dim0 * size23 + dim23] = sum + 1e-5; } } __global__ void Normalize_forward_(float *input, float *norm, float *output, int size23, int size123, int size0123){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size0123) { int dim23 = id % size23; int dim0 = (id / size123); output[id] = input[id] / sqrtf(norm[dim0 * size23 + dim23]); } } int Normalize_forward(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *norm = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); hipLaunchKernelGGL(( Normalize_get_norm_), dim3((THCudaTensor_nElement(state, norm) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, input), THCudaTensor_data(state, norm), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_nElement(state, norm)); hipLaunchKernelGGL(( Normalize_forward_), dim3((THCudaTensor_nElement(state, output) - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, input), THCudaTensor_data(state, norm), THCudaTensor_data(state, output), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_size(state, input, 1) * THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_nElement(state, output)); checkCudaError(L); return 0; } __global__ void computeDescDist_(float *input_L, float *input_R, float *output_L, int size1_input, int size1, int size3, int size23){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size23) { int dim3 = id % size3; assert(size1_input <= 128); float L_cache[128]; for (int i = 0; i < size1_input; i++) { L_cache[i] = input_L[i * size23 + id]; } if (dim3 >= 0) { float sum = 0; for (int i = 0; i < size1_input; i++) { sum += L_cache[i] * input_R[i * size23 + id]; } output_L[id] = 1 - sum; } } } int computeDescDist(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *input_L = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *input_R = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output_L = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int size23 = THCudaTensor_size(state, output_L, 2) * THCudaTensor_size(state, output_L, 3); hipLaunchKernelGGL(( computeDescDist_), dim3((size23 - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, input_L), THCudaTensor_data(state, input_R), THCudaTensor_data(state, output_L), THCudaTensor_size(state, input_L, 1), THCudaTensor_size(state, output_L, 1), THCudaTensor_size(state, output_L, 3), size23); checkCudaError(L); return 0; } __global__ void update_seg_(float *Sgm_LT, float *Sgm_ST, float *FG, float a_LT, float a_ST, int sizeHW){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < sizeHW) { Sgm_LT[id] = Sgm_LT[id] * (1.0 - a_LT) + FG[id] * a_LT; Sgm_ST[id] = Sgm_ST[id] * (1.0 - a_ST) + FG[id] * a_ST; } } int update_seg(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *Sgm_LT = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *Sgm_ST = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *FG = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); float a_LT = luaL_checknumber(L, 4); float a_ST = luaL_checknumber(L, 5); int sizeHW = THCudaTensor_size(state, FG, 2) * THCudaTensor_size(state, FG, 3); hipLaunchKernelGGL(( update_seg_), dim3((sizeHW - 1) / TB + 1), dim3(TB), 0, 0, THCudaTensor_data(state, Sgm_LT), THCudaTensor_data(state, Sgm_ST), THCudaTensor_data(state, FG), a_LT, a_ST, sizeHW); checkCudaError(L); return 0; } static const struct luaL_Reg funcs[] = { {"update_model", update_model}, {"median2d", median2d}, {"binary_dilate", binary_dilate}, {"update_params", update_params}, {"Normalize_forward", Normalize_forward}, {"computeDescDist", computeDescDist}, {"update_threshold", update_threshold}, {"check_unstable", check_unstable}, {"update_seg", update_seg}, {"binary_erode", binary_erode}, {NULL, NULL} }; extern "C" int luaopen_libcutils(lua_State *L) { luaL_openlib(L, "cutils", funcs, 0); return 1; }
d4b85c0bc370a08a2ac724570b6ccebd20a992f5.cu
extern "C" { #include "lua.h" #include "lualib.h" #include "lauxlib.h" } #include "luaT.h" #include "THC.h" #include <stdio.h> #include <assert.h> #include <math_constants.h> #include <stdint.h> #include <unistd.h> #include <curand.h> #include <curand_kernel.h> #include <limits.h> #define TB 128 // PARAMETERS #define FB_R_VAR 0.01 #define FB_V_INCR 1 #define FB_V_DECR 0.1 #define FB_T_INCR 0.5 #define FB_T_DECR 0.25 #define FB_T_LOWER 2 #define FB_T_UPPER 255 #define UNSTABLE_REG_RATIO_MIN 0.1 #define UNSTABLE_REG_RDIST_MIN 3.0 #define Rd_0 0.4 #define Rc_0 50 #define Rc_ofs Rc_0/5 #define Rd_ofs 0.6 #define max( a, b ) ( ((a) > (b)) ? (a) : (b) ) #define min( a, b ) ( ((a) < (b)) ? (a) : (b) ) THCState* getCutorchState(lua_State* L) { lua_getglobal(L, "cutorch"); lua_getfield(L, -1, "getState"); lua_call(L, 0, 1); THCState *state = (THCState*) lua_touserdata(L, -1); lua_pop(L, 2); return state; } void checkCudaError(lua_State *L) { cudaError_t status = cudaPeekAtLastError(); if (status != cudaSuccess) { luaL_error(L, cudaGetErrorString(status)); } } __global__ void update_model_(float *modelD, float *modelC, float *sampleD, float *sampleC, float *lR, int sizeHW, int sizeFHW, int sizeF, int sizeN, float *FG, float* r_smp, float* r_prb, int trigger){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < sizeHW){ int lr = (FG[id])?FB_T_LOWER:(trigger)?1:ceil(lR[id]); int prb = ceil(r_prb[id]); if ( prb % lr == 0 ){ int smp = ceil(r_smp[id]); int smp_idx_d = smp * sizeFHW; int smp_idx_c = smp * sizeHW; // copy color feature modelC[smp_idx_c + id] = sampleC[id]; // copy embedding for (int i = 0; i < sizeF; i++){ modelD[smp_idx_d + i*sizeHW + id] = sampleD[i*sizeHW + id]; } } } } int update_model(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *modelD = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *modelC = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *sampleD = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *sampleC = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *learningRate = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); THCudaTensor *FG = (THCudaTensor*)luaT_checkudata(L, 6, "torch.CudaTensor"); THCudaTensor *r_smp = (THCudaTensor*)luaT_checkudata(L, 7, "torch.CudaTensor"); THCudaTensor *r_prb = (THCudaTensor*)luaT_checkudata(L, 8, "torch.CudaTensor"); int trigger = luaL_checkinteger(L, 9); int sizeHW = THCudaTensor_size(state, modelD, 3) * THCudaTensor_size(state, modelD, 4); int sizeFHW = THCudaTensor_size(state, modelD, 2) * THCudaTensor_size(state, modelD, 3) * THCudaTensor_size(state, modelD, 4); int sizeF = THCudaTensor_size(state, modelD, 2); int sizeN = THCudaTensor_size(state, modelD, 0); update_model_<<< (sizeHW - 1)/ TB + 1, TB >>>( THCudaTensor_data(state, modelD), THCudaTensor_data(state, modelC), THCudaTensor_data(state, sampleD), THCudaTensor_data(state, sampleC), THCudaTensor_data(state, learningRate), sizeHW, sizeFHW, sizeF, sizeN, THCudaTensor_data(state, FG), THCudaTensor_data(state, r_smp), THCudaTensor_data(state, r_prb), trigger ); checkCudaError(L); return 1; } __device__ void sort(float *x, int n){ for (int i = 0; i < n - 1; i++) { int min = i; for (int j = i + 1; j < n; j++) { if (x[j] < x[min]) { min = j; } } float tmp = x[min]; x[min] = x[i]; x[i] = tmp; } } __global__ void median2d(float *img, float *out, int size, int dim2, int dim3, int kernel_radius){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; float xs[11 * 11]; int xs_size = 0; for (int xx = x - kernel_radius; xx <= x + kernel_radius; xx++) { for (int yy = y - kernel_radius; yy <= y + kernel_radius; yy++) { if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2) { xs[xs_size++] = img[yy * dim3 + xx]; } } } sort(xs, xs_size); out[id] = xs[xs_size / 2]; } } int median2d(lua_State *L) { THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int kernel_size = luaL_checkinteger(L, 3); assert(kernel_size % 2 == 1); assert(kernel_size <= 11); median2d<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, img), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, out, 2), THCudaTensor_size(state, out, 3), kernel_size / 2); checkCudaError(L); return 0; } __global__ void binary_dilate(float* img, float* out, int size, int dim2, int dim3, int r){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; bool flag = 0; for (int xx = x - r; xx <= x + r; xx++) { for (int yy = y - r; yy<= y + r; yy++){ if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2){ if (img[yy*dim3 + xx] == 1){ flag = 1; break; } } } if (flag) break; } out[id] = (flag)?1:0; } } int binary_dilate(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int kernel_size = luaL_checkinteger(L, 3); assert(kernel_size % 2 == 1); binary_dilate<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, img), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, img, 2), THCudaTensor_size(state, img, 3), kernel_size / 2); checkCudaError(L); return 1; } __global__ void binary_erode_(float* img, float* out, int size, int dim2, int dim3, int r){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size) { int x = id % dim3; int y = id / dim3; bool flag = 0; for (int xx = x - r; xx <= x + r; xx++) { for (int yy = y - r; yy<= y + r; yy++){ if (0 <= xx && xx < dim3 && 0 <= yy && yy < dim2){ if (img[yy*dim3 + xx] == 0){ flag = 1; break; } } } if (flag) break; } out[id] = (flag)?0:1; } } int binary_erode(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *img = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *out = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); int kernel_size = luaL_checkinteger(L, 3); assert(kernel_size % 2 == 1); binary_erode_<<<(THCudaTensor_nElement(state, out) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, img), THCudaTensor_data(state, out), THCudaTensor_nElement(state, out), THCudaTensor_size(state, img, 2), THCudaTensor_size(state, img, 3), kernel_size / 2); checkCudaError(L); return 1; } __global__ void update_params_(float a_lt, float a_st, float* R, float* T, float* v, float* D_LT, float* D_ST, float* rSgm_LT, float* rSgm_ST, float* Sgm_LT, float* Sgm_ST, float* US, float* curFG, float* lastFG, float* blink, float h, float w, float sizeHW, float* d_m){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < sizeHW) { if (curFG[id]) { // Update D_m D_LT[id] = D_LT[id] * (1.0 - a_lt) + d_m[id]*a_lt; D_ST[id] = D_ST[id] * (1.0 - a_st) + d_m[id]*a_st; // Update mean raw segmentation rSgm_LT[id] = rSgm_LT[id] * (1.0 - a_lt) + a_lt; rSgm_ST[id] = rSgm_ST[id] * (1.0 - a_st) + a_st; }else{ // Update D_m D_LT[id] = D_LT[id] * (1.0 - a_lt) + d_m[id]*a_lt; D_ST[id] = D_ST[id] * (1.0 - a_st) + d_m[id]*a_st; // Update mean raw segmentation rSgm_LT[id] = rSgm_LT[id] * (1.0 - a_lt); rSgm_ST[id] = rSgm_ST[id] * (1.0 - a_st); } // Update learning rate T if (lastFG[id] || (min(D_LT[id], D_ST[id]) < UNSTABLE_REG_RATIO_MIN) && curFG[id] ) { T[id] += FB_T_INCR/(max(D_LT[id], D_ST[id]) * v[id]); }else{ T[id] -= FB_T_DECR * v[id] / (max(D_LT[id], D_ST[id])); } if (T[id] > FB_T_UPPER) T[id] = FB_T_UPPER; else if (T[id] < FB_T_LOWER) T[id] = FB_T_LOWER; // Update v if (max(D_LT[id], D_ST[id]) > UNSTABLE_REG_RATIO_MIN && blink[id]) v[id] += FB_V_INCR; else if (v[id] > FB_V_DECR){ v[id] -= lastFG[id]?FB_V_DECR/4:US[id]?FB_V_DECR/2:FB_V_DECR; if (v[id] < FB_V_DECR) v[id] = FB_V_DECR; } // Update R float min_D = 2*min(D_LT[id], D_ST[id]) + 1; min_D *=min_D; if (R[id] < min_D) R[id] += FB_R_VAR*(v[id] - FB_V_DECR); else{ R[id] -= FB_R_VAR/(v[id]); } if (R[id] < 1.0) R[id] = 1.0; } } int update_params(lua_State *L){ THCState *state = getCutorchState(L); float a_lt = luaL_checknumber(L, 1); float a_st = luaL_checknumber(L, 2); THCudaTensor *R = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *T = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *v = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); THCudaTensor *D_LT = (THCudaTensor*)luaT_checkudata(L, 6, "torch.CudaTensor"); THCudaTensor *D_ST = (THCudaTensor*)luaT_checkudata(L, 7, "torch.CudaTensor"); THCudaTensor *rSgm_LT = (THCudaTensor*)luaT_checkudata(L, 8, "torch.CudaTensor"); THCudaTensor *rSgm_ST = (THCudaTensor*)luaT_checkudata(L, 9, "torch.CudaTensor"); THCudaTensor *Sgm_LT = (THCudaTensor*)luaT_checkudata(L, 10, "torch.CudaTensor"); THCudaTensor *Sgm_ST = (THCudaTensor*)luaT_checkudata(L, 11, "torch.CudaTensor"); THCudaTensor *US = (THCudaTensor*)luaT_checkudata(L, 12, "torch.CudaTensor"); THCudaTensor *curFG = (THCudaTensor*)luaT_checkudata(L, 13, "torch.CudaTensor"); THCudaTensor *lastFG = (THCudaTensor*)luaT_checkudata(L, 14, "torch.CudaTensor"); THCudaTensor *blink = (THCudaTensor*)luaT_checkudata(L, 15, "torch.CudaTensor"); THCudaTensor *d_m = (THCudaTensor*)luaT_checkudata(L, 16, "torch.CudaTensor"); update_params_<<<(THCudaTensor_nElement(state, R) - 1) / TB + 1, TB>>>( a_lt, a_st, THCudaTensor_data(state, R), THCudaTensor_data(state, T), THCudaTensor_data(state, v), THCudaTensor_data(state, D_LT), THCudaTensor_data(state, D_ST), THCudaTensor_data(state, rSgm_LT), THCudaTensor_data(state, rSgm_ST), THCudaTensor_data(state, Sgm_LT), THCudaTensor_data(state, Sgm_ST), THCudaTensor_data(state, US), THCudaTensor_data(state, curFG), THCudaTensor_data(state, lastFG), THCudaTensor_data(state, blink), THCudaTensor_size(state, R, 0), //height THCudaTensor_size(state, R, 1), //weight THCudaTensor_nElement(state, R), //h*w THCudaTensor_data(state, d_m) ); checkCudaError(L); return 1; } __global__ void update_threshold_(float* R, float* R_c, float* R_d, float* US, int size01){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size01){ R_c[id] = ((R[id] * Rc_0) - (!US[id] * Rc_ofs))/2; R_d[id] = (R[id] * Rd_0) + (US[id] * Rd_ofs); } } int update_threshold(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *R = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *R_c = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *R_d = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *US = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); update_threshold_<<<(THCudaTensor_nElement(state, R_c) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, R), THCudaTensor_data(state, R_c), THCudaTensor_data(state, R_d), THCudaTensor_data(state, US), THCudaTensor_size(state, R_c, 0) * THCudaTensor_size(state, R_c, 1) ); checkCudaError(L); return 1; } __global__ void check_unstable_(float *US, float *R, float* rSgm_LT, float *rSgm_ST, float *Sgm_LT, float *Sgm_ST, int sizeHW){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < sizeHW){ US[id] = ( (R[id] > UNSTABLE_REG_RDIST_MIN) || ((rSgm_LT[id] - Sgm_LT[id]) > UNSTABLE_REG_RATIO_MIN) || ((rSgm_ST[id] - Sgm_ST[id]) > UNSTABLE_REG_RATIO_MIN) ) ? 1 : 0; } } int check_unstable(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *US = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *R = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *rSgm_LT = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); THCudaTensor *rSgm_ST = (THCudaTensor*)luaT_checkudata(L, 4, "torch.CudaTensor"); THCudaTensor *Sgm_LT = (THCudaTensor*)luaT_checkudata(L, 5, "torch.CudaTensor"); THCudaTensor *Sgm_ST = (THCudaTensor*)luaT_checkudata(L, 6, "torch.CudaTensor"); check_unstable_<<<(THCudaTensor_nElement(state, R) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, US), THCudaTensor_data(state, R), THCudaTensor_data(state, rSgm_LT), THCudaTensor_data(state, rSgm_ST), THCudaTensor_data(state, Sgm_LT), THCudaTensor_data(state, Sgm_ST), THCudaTensor_size(state, R, 0) * THCudaTensor_size(state, R, 1) ); checkCudaError(L); return 1; } __global__ void Normalize_get_norm_(float *input, float *norm, int size1, int size23, int size023){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size023) { int dim23 = id % size23; int dim0 = id / size23; float sum = 0.0; for (int dim1 = 0; dim1 < size1; dim1++) { float x = input[(dim0 * size1 + dim1) * size23 + dim23]; sum += x * x; } norm[dim0 * size23 + dim23] = sum + 1e-5; } } __global__ void Normalize_forward_(float *input, float *norm, float *output, int size23, int size123, int size0123){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size0123) { int dim23 = id % size23; int dim0 = (id / size123); output[id] = input[id] / sqrtf(norm[dim0 * size23 + dim23]); } } int Normalize_forward(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *norm = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); Normalize_get_norm_<<<(THCudaTensor_nElement(state, norm) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, input), THCudaTensor_data(state, norm), THCudaTensor_size(state, input, 1), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_nElement(state, norm)); Normalize_forward_<<<(THCudaTensor_nElement(state, output) - 1) / TB + 1, TB>>>( THCudaTensor_data(state, input), THCudaTensor_data(state, norm), THCudaTensor_data(state, output), THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_size(state, input, 1) * THCudaTensor_size(state, input, 2) * THCudaTensor_size(state, input, 3), THCudaTensor_nElement(state, output)); checkCudaError(L); return 0; } __global__ void computeDescDist_(float *input_L, float *input_R, float *output_L, int size1_input, int size1, int size3, int size23){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < size23) { int dim3 = id % size3; assert(size1_input <= 128); float L_cache[128]; for (int i = 0; i < size1_input; i++) { L_cache[i] = input_L[i * size23 + id]; } if (dim3 >= 0) { float sum = 0; for (int i = 0; i < size1_input; i++) { sum += L_cache[i] * input_R[i * size23 + id]; } output_L[id] = 1 - sum; } } } int computeDescDist(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *input_L = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *input_R = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output_L = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); int size23 = THCudaTensor_size(state, output_L, 2) * THCudaTensor_size(state, output_L, 3); computeDescDist_<<<(size23 - 1) / TB + 1, TB>>>( THCudaTensor_data(state, input_L), THCudaTensor_data(state, input_R), THCudaTensor_data(state, output_L), THCudaTensor_size(state, input_L, 1), THCudaTensor_size(state, output_L, 1), THCudaTensor_size(state, output_L, 3), size23); checkCudaError(L); return 0; } __global__ void update_seg_(float *Sgm_LT, float *Sgm_ST, float *FG, float a_LT, float a_ST, int sizeHW){ int id = blockIdx.x * blockDim.x + threadIdx.x; if (id < sizeHW) { Sgm_LT[id] = Sgm_LT[id] * (1.0 - a_LT) + FG[id] * a_LT; Sgm_ST[id] = Sgm_ST[id] * (1.0 - a_ST) + FG[id] * a_ST; } } int update_seg(lua_State *L){ THCState *state = getCutorchState(L); THCudaTensor *Sgm_LT = (THCudaTensor*)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *Sgm_ST = (THCudaTensor*)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *FG = (THCudaTensor*)luaT_checkudata(L, 3, "torch.CudaTensor"); float a_LT = luaL_checknumber(L, 4); float a_ST = luaL_checknumber(L, 5); int sizeHW = THCudaTensor_size(state, FG, 2) * THCudaTensor_size(state, FG, 3); update_seg_<<<(sizeHW - 1) / TB + 1, TB>>>( THCudaTensor_data(state, Sgm_LT), THCudaTensor_data(state, Sgm_ST), THCudaTensor_data(state, FG), a_LT, a_ST, sizeHW); checkCudaError(L); return 0; } static const struct luaL_Reg funcs[] = { {"update_model", update_model}, {"median2d", median2d}, {"binary_dilate", binary_dilate}, {"update_params", update_params}, {"Normalize_forward", Normalize_forward}, {"computeDescDist", computeDescDist}, {"update_threshold", update_threshold}, {"check_unstable", check_unstable}, {"update_seg", update_seg}, {"binary_erode", binary_erode}, {NULL, NULL} }; extern "C" int luaopen_libcutils(lua_State *L) { luaL_openlib(L, "cutils", funcs, 0); return 1; }
abf749a1cfb1ae03f80f4bf5f6034a71a6aae627.hip
// !!! This is a file automatically generated by hipify!!! #include <blas_magma.h> #include <string.h> #include <vector> #include <algorithm> #include <util_quda.h> #include <quda_internal.h> #ifdef MAGMA_LIB #include <magma.h> #define _cV MagmaVec #define _cU MagmaUpper #define _cR MagmaRight #define _cL MagmaLeft #define _cC MagmaConjTrans #define _cN MagmaNoTrans #define _cNV MagmaNoVec #endif #ifdef MAGMA_LIB template<typename magmaFloat> void magma_gesv(void *sol, const int ldn, const int n, void *Mat, const int ldm) { hipPointerAttribute_t ptr_attr; if(hipPointerGetAttributes(&ptr_attr, Mat) == hipErrorInvalidValue) errorQuda("In magma_gesv, a pointer was not allocated in, mapped by or registered with current CUDA context.\n"); magma_int_t *ipiv; magma_int_t err, info; magma_imalloc_pinned(&ipiv, n); void *tmp; magma_malloc_pinned((void**)&tmp, ldm*n*sizeof(magmaFloat)); memcpy(tmp, Mat, ldm*n*sizeof(magmaFloat)); if ( ptr_attr.memoryType == hipMemoryTypeDevice ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { err = magma_cgesv_gpu(n, 1, static_cast<magmaFloatComplex* >(tmp), ldm, ipiv, static_cast<magmaFloatComplex* >(sol), ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_cgesv_gpu), exit ...\n"); } else { err = magma_zgesv_gpu(n, 1, static_cast<magmaDoubleComplex*>(tmp), ldm, ipiv, static_cast<magmaDoubleComplex*>(sol), ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_zgesv_gpu), exit ...\n"); } } else if ( ptr_attr.memoryType == hipMemoryTypeHost ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { err = magma_cgesv(n, 1, static_cast<magmaFloatComplex* >(tmp), ldm, ipiv, static_cast<magmaFloatComplex* >(sol), ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_cgesv), exit ...\n"); } else { err = magma_zgesv(n, 1, static_cast<magmaDoubleComplex*>(tmp), ldm, ipiv, static_cast<magmaDoubleComplex*>(sol), ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_zgesv), exit ...\n"); } } magma_free_pinned(ipiv); magma_free_pinned(tmp); return; } ///// template<typename magmaFloat> void magma_geev(void *Mat, const int m, const int ldm, void *vr, void *evalues, const int ldv) { hipPointerAttribute_t ptr_attr; if(hipPointerGetAttributes(&ptr_attr, Mat) == hipErrorInvalidValue) errorQuda("In magma_geev, a pointer was not allocated in, mapped by or registered with current CUDA context.\n"); magma_int_t err, info; void *work_ = nullptr, *rwork_ = nullptr; if ( ptr_attr.memoryType == hipMemoryTypeDevice ) { errorQuda("\nGPU version is not supported.\n"); } else if ( ptr_attr.memoryType == hipMemoryTypeHost ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { magmaFloatComplex qwork; magmaFloatComplex *work = static_cast<magmaFloatComplex*>(work_); float *rwork = static_cast<float*>(rwork_); err = magma_cgeev(_cNV, _cV, m, nullptr, ldm, nullptr, nullptr, ldv, nullptr, ldv, &qwork, -1, nullptr, &info); if( err != 0 ) errorQuda( "Error: CGEEVX, info %d\n",info); magma_int_t lwork = static_cast<magma_int_t>( MAGMA_C_REAL(qwork)); magma_smalloc_pinned(&rwork, 2*m); magma_cmalloc_pinned(&work, lwork); err = magma_cgeev(_cNV, _cV, m, static_cast<magmaFloatComplex*>(Mat), ldm, static_cast<magmaFloatComplex*>(evalues), nullptr, ldv, static_cast<magmaFloatComplex*>(vr), ldv, work, lwork, rwork, &info); if( err != 0 ) errorQuda( "Error: CGEEVX, info %d\n",info); } else { magmaDoubleComplex qwork; magmaDoubleComplex *work = static_cast<magmaDoubleComplex*>(work_); double *rwork = static_cast<double*>(rwork_); err = magma_zgeev(_cNV, _cV, m, nullptr, ldm, nullptr, nullptr, ldv, nullptr, ldv, &qwork, -1, nullptr, &info); if( err != 0 ) errorQuda( "Error: ZGEEVX, info %d\n",info); magma_int_t lwork = static_cast<magma_int_t>( MAGMA_Z_REAL(qwork)); magma_dmalloc_pinned(&rwork, 2*m); magma_zmalloc_pinned(&work, lwork); err = magma_zgeev(_cNV, _cV, m, static_cast<magmaDoubleComplex*>(Mat), ldm, static_cast<magmaDoubleComplex*>(evalues), nullptr, ldv, static_cast<magmaDoubleComplex*>(vr), ldv, work, lwork, rwork, &info); if( err != 0 ) errorQuda( "Error: ZGEEVX, info %d\n",info); } } if(rwork_) magma_free_pinned(rwork_); if(work_ ) magma_free_pinned(work_); return; } ///// template<typename magmaFloat> void magma_gels(void *Mat, void *c, int rows, int cols, int ldm) { hipPointerAttribute_t ptr_attr; if(hipPointerGetAttributes(&ptr_attr, Mat) == hipErrorInvalidValue) errorQuda("In magma_gels, a pointer was not allocated in, mapped by or registered with current CUDA context.\n"); magma_int_t err, info, lwork; void *hwork_ = nullptr; if ( ptr_attr.memoryType == hipMemoryTypeDevice ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { magma_int_t nb = magma_get_cgeqrf_nb( rows, cols ); lwork = ::max( cols*nb, 2*nb*nb ); magmaFloatComplex *hwork = static_cast<magmaFloatComplex*>(hwork_); magma_cmalloc_cpu( &hwork, lwork); err = magma_cgels_gpu( _cN, rows, cols, 1, static_cast<magmaFloatComplex*>(Mat), ldm, static_cast<magmaFloatComplex*>(c), ldm, hwork, lwork, &info ); if (err != 0) errorQuda("\nError in magma_cgels_gpu, %d, exit ...\n", info); } else { magma_int_t nb = magma_get_zgeqrf_nb( rows, cols ); lwork = ::max( cols*nb, 2*nb*nb ); magmaDoubleComplex *hwork = static_cast<magmaDoubleComplex*>(hwork_); magma_zmalloc_cpu( &hwork, lwork); err = magma_zgels_gpu( _cN, rows, cols, 1, static_cast<magmaDoubleComplex*>(Mat), ldm, static_cast<magmaDoubleComplex*>(c), ldm, hwork, lwork, &info ); if (err != 0) errorQuda("\nError in magma_zgels_gpu, %d, exit ...\n", info); } } else if ( ptr_attr.memoryType == hipMemoryTypeHost ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { magma_int_t nb = magma_get_cgeqrf_nb( rows, cols ); lwork = ::max( cols*nb, 2*nb*nb ); magmaFloatComplex *hwork = static_cast<magmaFloatComplex*>(hwork_); magma_cmalloc_cpu( &hwork, lwork); err = magma_cgels( _cN, rows, cols, 1, static_cast<magmaFloatComplex*>(Mat), ldm, static_cast<magmaFloatComplex*>(c), ldm, hwork, lwork, &info ); if (err != 0) errorQuda("\nError in magma_cgels_cpu, %d, exit ...\n", info); } else { magma_int_t nb = magma_get_zgeqrf_nb( rows, cols ); lwork = ::max( cols*nb, 2*nb*nb ); magmaDoubleComplex *hwork = static_cast<magmaDoubleComplex*>(hwork_); magma_zmalloc_cpu( &hwork, lwork); err = magma_zgels( _cN, rows, cols, 1, static_cast<magmaDoubleComplex*>(Mat), ldm, static_cast<magmaDoubleComplex*>(c), ldm, hwork, lwork, &info ); if (err != 0) errorQuda("\nError in magma_zgels_cpu, %d, exit ...\n", info); } } if(hwork_) magma_free_cpu(hwork_); return; } template<typename magmaFloat> void magma_heev(void *Mat, const int m, const int ldm, void *evalues) { hipPointerAttribute_t ptr_attr; if(hipPointerGetAttributes(&ptr_attr, Mat) == hipErrorInvalidValue) errorQuda("In magma_heev, a pointer was not allocated in, mapped by or registered with current CUDA context.\n"); magma_int_t err, info; void *work_ = nullptr, *rwork_ = nullptr; int *iwork = nullptr; int qiwork; if ( ptr_attr.memoryType == hipMemoryTypeDevice ) { errorQuda("\nGPU version is not supported.\n"); } else if ( ptr_attr.memoryType == hipMemoryTypeHost ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { magmaFloatComplex qwork; float qrwork; magmaFloatComplex *work = static_cast<magmaFloatComplex*>(work_); float *rwork = static_cast<float*>(rwork_); err = magma_cheevd(_cV, _cU, m, nullptr, ldm, nullptr, &qwork, -1, &qrwork, -1, &qiwork, -1, &info); if( err != 0 ) errorQuda( "Error: CHEEVD, info %d\n",info); magma_int_t lwork = static_cast<magma_int_t>( MAGMA_C_REAL(qwork)); magma_int_t lrwork = static_cast<magma_int_t>( qrwork ); magma_int_t liwork = static_cast<magma_int_t>( qiwork ); magma_cmalloc_pinned(&work, lwork); magma_smalloc_pinned(&rwork, lrwork); magma_imalloc_pinned(&iwork, liwork); err = magma_cheevd(_cV, _cU, m, static_cast<magmaFloatComplex*>(Mat), ldm, static_cast<float*>(evalues), work, lwork, rwork, lrwork, iwork, liwork, &info); if( err != 0 ) errorQuda( "Error: CHEEVD, info %d\n",info); } else { magmaDoubleComplex qwork; double qrwork; magmaDoubleComplex *work = static_cast<magmaDoubleComplex*>(work_); double *rwork = static_cast<double*>(rwork_); err = magma_zheevd(_cV, _cU, m, nullptr, ldm, nullptr, &qwork, -1, &qrwork, -1, &qiwork, -1, &info); if( err != 0 ) errorQuda( "Error: ZHEEVD, info %d\n",info); magma_int_t lwork = static_cast<magma_int_t>( MAGMA_Z_REAL(qwork)); magma_int_t lrwork = static_cast<magma_int_t>( qrwork ); magma_int_t liwork = static_cast<magma_int_t>( qiwork ); magma_zmalloc_pinned(&work, lwork); magma_dmalloc_pinned(&rwork, lrwork); magma_imalloc_pinned(&iwork, liwork); err = magma_zheevd(_cV, _cU, m, static_cast<magmaDoubleComplex*>(Mat), ldm, static_cast<double*>(evalues), work, lwork, rwork, lrwork, iwork, liwork, &info); if( err != 0 ) errorQuda( "Error: ZHEEVD, info %d\n",info); } } if(rwork_) magma_free_pinned(rwork_); if(work_ ) magma_free_pinned(work_); if(iwork ) magma_free_pinned(iwork); return; } #endif // MAGMA_LIB void magma_Xgesv(void* sol, const int ldn, const int n, void* Mat, const int ldm, const int prec) { #ifdef MAGMA_LIB if (prec == sizeof(std::complex< double >)) magma_gesv<magmaDoubleComplex>(sol, ldn, n, Mat, ldm); else if (prec == sizeof(std::complex< float >)) magma_gesv<magmaFloatComplex >(sol, ldn, n, Mat, ldm); else errorQuda("\nPrecision is not supported.\n"); #endif return; } void magma_Xgeev(void *Mat, const int m, const int ldm, void *vr, void *evalues, const int ldv, const int prec) { #ifdef MAGMA_LIB if (prec == sizeof(std::complex< double >)) magma_geev<magmaDoubleComplex>(Mat, m, ldm, vr, evalues, ldv); else if (prec == sizeof(std::complex< float >)) magma_geev<magmaFloatComplex >(Mat, m, ldm, vr, evalues, ldv); else errorQuda("\nPrecision is not supported.\n"); #endif return; } void magma_Xgels(void *Mat, void *c, int rows, int cols, int ldm, const int prec) { #ifdef MAGMA_LIB if (prec == sizeof(std::complex< double >)) magma_gels<magmaDoubleComplex>(Mat, c, rows, cols, ldm); else if (prec == sizeof(std::complex< float >)) magma_gels<magmaFloatComplex >(Mat, c, rows, cols, ldm); else errorQuda("\nPrecision is not supported.\n"); #endif return; } void magma_Xheev(void *Mat, const int m, const int ldm, void *evalues, const int prec) { #ifdef MAGMA_LIB if (prec == sizeof(std::complex< double >)) magma_heev<magmaDoubleComplex>(Mat, m, ldm, evalues); else if (prec == sizeof(std::complex< float >)) magma_heev<magmaFloatComplex >(Mat, m, ldm, evalues); else errorQuda("\nPrecision is not supported.\n"); #endif return; } void OpenMagma(){ #ifdef MAGMA_LIB magma_int_t err = magma_init(); if(err != MAGMA_SUCCESS) errorQuda("\nError: cannot initialize MAGMA library\n"); int major, minor, micro; magma_version( &major, &minor, &micro); printfQuda("\nMAGMA library version: %d.%d\n\n", major, minor); #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } void CloseMagma(){ #ifdef MAGMA_LIB if(magma_finalize() != MAGMA_SUCCESS) errorQuda("\nError: cannot close MAGMA library\n"); #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } #ifdef MAGMA_LIB #undef _cV #undef _cU #undef _cR #undef _cL #undef _cC #undef _cN #undef _cNV #endif
abf749a1cfb1ae03f80f4bf5f6034a71a6aae627.cu
#include <blas_magma.h> #include <string.h> #include <vector> #include <algorithm> #include <util_quda.h> #include <quda_internal.h> #ifdef MAGMA_LIB #include <magma.h> #define _cV MagmaVec #define _cU MagmaUpper #define _cR MagmaRight #define _cL MagmaLeft #define _cC MagmaConjTrans #define _cN MagmaNoTrans #define _cNV MagmaNoVec #endif #ifdef MAGMA_LIB template<typename magmaFloat> void magma_gesv(void *sol, const int ldn, const int n, void *Mat, const int ldm) { cudaPointerAttributes ptr_attr; if(cudaPointerGetAttributes(&ptr_attr, Mat) == cudaErrorInvalidValue) errorQuda("In magma_gesv, a pointer was not allocated in, mapped by or registered with current CUDA context.\n"); magma_int_t *ipiv; magma_int_t err, info; magma_imalloc_pinned(&ipiv, n); void *tmp; magma_malloc_pinned((void**)&tmp, ldm*n*sizeof(magmaFloat)); memcpy(tmp, Mat, ldm*n*sizeof(magmaFloat)); if ( ptr_attr.memoryType == cudaMemoryTypeDevice ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { err = magma_cgesv_gpu(n, 1, static_cast<magmaFloatComplex* >(tmp), ldm, ipiv, static_cast<magmaFloatComplex* >(sol), ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_cgesv_gpu), exit ...\n"); } else { err = magma_zgesv_gpu(n, 1, static_cast<magmaDoubleComplex*>(tmp), ldm, ipiv, static_cast<magmaDoubleComplex*>(sol), ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_zgesv_gpu), exit ...\n"); } } else if ( ptr_attr.memoryType == cudaMemoryTypeHost ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { err = magma_cgesv(n, 1, static_cast<magmaFloatComplex* >(tmp), ldm, ipiv, static_cast<magmaFloatComplex* >(sol), ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_cgesv), exit ...\n"); } else { err = magma_zgesv(n, 1, static_cast<magmaDoubleComplex*>(tmp), ldm, ipiv, static_cast<magmaDoubleComplex*>(sol), ldn, &info); if(err != 0) errorQuda("\nError in SolveGPUProjMatrix (magma_zgesv), exit ...\n"); } } magma_free_pinned(ipiv); magma_free_pinned(tmp); return; } ///// template<typename magmaFloat> void magma_geev(void *Mat, const int m, const int ldm, void *vr, void *evalues, const int ldv) { cudaPointerAttributes ptr_attr; if(cudaPointerGetAttributes(&ptr_attr, Mat) == cudaErrorInvalidValue) errorQuda("In magma_geev, a pointer was not allocated in, mapped by or registered with current CUDA context.\n"); magma_int_t err, info; void *work_ = nullptr, *rwork_ = nullptr; if ( ptr_attr.memoryType == cudaMemoryTypeDevice ) { errorQuda("\nGPU version is not supported.\n"); } else if ( ptr_attr.memoryType == cudaMemoryTypeHost ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { magmaFloatComplex qwork; magmaFloatComplex *work = static_cast<magmaFloatComplex*>(work_); float *rwork = static_cast<float*>(rwork_); err = magma_cgeev(_cNV, _cV, m, nullptr, ldm, nullptr, nullptr, ldv, nullptr, ldv, &qwork, -1, nullptr, &info); if( err != 0 ) errorQuda( "Error: CGEEVX, info %d\n",info); magma_int_t lwork = static_cast<magma_int_t>( MAGMA_C_REAL(qwork)); magma_smalloc_pinned(&rwork, 2*m); magma_cmalloc_pinned(&work, lwork); err = magma_cgeev(_cNV, _cV, m, static_cast<magmaFloatComplex*>(Mat), ldm, static_cast<magmaFloatComplex*>(evalues), nullptr, ldv, static_cast<magmaFloatComplex*>(vr), ldv, work, lwork, rwork, &info); if( err != 0 ) errorQuda( "Error: CGEEVX, info %d\n",info); } else { magmaDoubleComplex qwork; magmaDoubleComplex *work = static_cast<magmaDoubleComplex*>(work_); double *rwork = static_cast<double*>(rwork_); err = magma_zgeev(_cNV, _cV, m, nullptr, ldm, nullptr, nullptr, ldv, nullptr, ldv, &qwork, -1, nullptr, &info); if( err != 0 ) errorQuda( "Error: ZGEEVX, info %d\n",info); magma_int_t lwork = static_cast<magma_int_t>( MAGMA_Z_REAL(qwork)); magma_dmalloc_pinned(&rwork, 2*m); magma_zmalloc_pinned(&work, lwork); err = magma_zgeev(_cNV, _cV, m, static_cast<magmaDoubleComplex*>(Mat), ldm, static_cast<magmaDoubleComplex*>(evalues), nullptr, ldv, static_cast<magmaDoubleComplex*>(vr), ldv, work, lwork, rwork, &info); if( err != 0 ) errorQuda( "Error: ZGEEVX, info %d\n",info); } } if(rwork_) magma_free_pinned(rwork_); if(work_ ) magma_free_pinned(work_); return; } ///// template<typename magmaFloat> void magma_gels(void *Mat, void *c, int rows, int cols, int ldm) { cudaPointerAttributes ptr_attr; if(cudaPointerGetAttributes(&ptr_attr, Mat) == cudaErrorInvalidValue) errorQuda("In magma_gels, a pointer was not allocated in, mapped by or registered with current CUDA context.\n"); magma_int_t err, info, lwork; void *hwork_ = nullptr; if ( ptr_attr.memoryType == cudaMemoryTypeDevice ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { magma_int_t nb = magma_get_cgeqrf_nb( rows, cols ); lwork = std::max( cols*nb, 2*nb*nb ); magmaFloatComplex *hwork = static_cast<magmaFloatComplex*>(hwork_); magma_cmalloc_cpu( &hwork, lwork); err = magma_cgels_gpu( _cN, rows, cols, 1, static_cast<magmaFloatComplex*>(Mat), ldm, static_cast<magmaFloatComplex*>(c), ldm, hwork, lwork, &info ); if (err != 0) errorQuda("\nError in magma_cgels_gpu, %d, exit ...\n", info); } else { magma_int_t nb = magma_get_zgeqrf_nb( rows, cols ); lwork = std::max( cols*nb, 2*nb*nb ); magmaDoubleComplex *hwork = static_cast<magmaDoubleComplex*>(hwork_); magma_zmalloc_cpu( &hwork, lwork); err = magma_zgels_gpu( _cN, rows, cols, 1, static_cast<magmaDoubleComplex*>(Mat), ldm, static_cast<magmaDoubleComplex*>(c), ldm, hwork, lwork, &info ); if (err != 0) errorQuda("\nError in magma_zgels_gpu, %d, exit ...\n", info); } } else if ( ptr_attr.memoryType == cudaMemoryTypeHost ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { magma_int_t nb = magma_get_cgeqrf_nb( rows, cols ); lwork = std::max( cols*nb, 2*nb*nb ); magmaFloatComplex *hwork = static_cast<magmaFloatComplex*>(hwork_); magma_cmalloc_cpu( &hwork, lwork); err = magma_cgels( _cN, rows, cols, 1, static_cast<magmaFloatComplex*>(Mat), ldm, static_cast<magmaFloatComplex*>(c), ldm, hwork, lwork, &info ); if (err != 0) errorQuda("\nError in magma_cgels_cpu, %d, exit ...\n", info); } else { magma_int_t nb = magma_get_zgeqrf_nb( rows, cols ); lwork = std::max( cols*nb, 2*nb*nb ); magmaDoubleComplex *hwork = static_cast<magmaDoubleComplex*>(hwork_); magma_zmalloc_cpu( &hwork, lwork); err = magma_zgels( _cN, rows, cols, 1, static_cast<magmaDoubleComplex*>(Mat), ldm, static_cast<magmaDoubleComplex*>(c), ldm, hwork, lwork, &info ); if (err != 0) errorQuda("\nError in magma_zgels_cpu, %d, exit ...\n", info); } } if(hwork_) magma_free_cpu(hwork_); return; } template<typename magmaFloat> void magma_heev(void *Mat, const int m, const int ldm, void *evalues) { cudaPointerAttributes ptr_attr; if(cudaPointerGetAttributes(&ptr_attr, Mat) == cudaErrorInvalidValue) errorQuda("In magma_heev, a pointer was not allocated in, mapped by or registered with current CUDA context.\n"); magma_int_t err, info; void *work_ = nullptr, *rwork_ = nullptr; int *iwork = nullptr; int qiwork; if ( ptr_attr.memoryType == cudaMemoryTypeDevice ) { errorQuda("\nGPU version is not supported.\n"); } else if ( ptr_attr.memoryType == cudaMemoryTypeHost ) { if(sizeof(magmaFloat) == sizeof(magmaFloatComplex)) { magmaFloatComplex qwork; float qrwork; magmaFloatComplex *work = static_cast<magmaFloatComplex*>(work_); float *rwork = static_cast<float*>(rwork_); err = magma_cheevd(_cV, _cU, m, nullptr, ldm, nullptr, &qwork, -1, &qrwork, -1, &qiwork, -1, &info); if( err != 0 ) errorQuda( "Error: CHEEVD, info %d\n",info); magma_int_t lwork = static_cast<magma_int_t>( MAGMA_C_REAL(qwork)); magma_int_t lrwork = static_cast<magma_int_t>( qrwork ); magma_int_t liwork = static_cast<magma_int_t>( qiwork ); magma_cmalloc_pinned(&work, lwork); magma_smalloc_pinned(&rwork, lrwork); magma_imalloc_pinned(&iwork, liwork); err = magma_cheevd(_cV, _cU, m, static_cast<magmaFloatComplex*>(Mat), ldm, static_cast<float*>(evalues), work, lwork, rwork, lrwork, iwork, liwork, &info); if( err != 0 ) errorQuda( "Error: CHEEVD, info %d\n",info); } else { magmaDoubleComplex qwork; double qrwork; magmaDoubleComplex *work = static_cast<magmaDoubleComplex*>(work_); double *rwork = static_cast<double*>(rwork_); err = magma_zheevd(_cV, _cU, m, nullptr, ldm, nullptr, &qwork, -1, &qrwork, -1, &qiwork, -1, &info); if( err != 0 ) errorQuda( "Error: ZHEEVD, info %d\n",info); magma_int_t lwork = static_cast<magma_int_t>( MAGMA_Z_REAL(qwork)); magma_int_t lrwork = static_cast<magma_int_t>( qrwork ); magma_int_t liwork = static_cast<magma_int_t>( qiwork ); magma_zmalloc_pinned(&work, lwork); magma_dmalloc_pinned(&rwork, lrwork); magma_imalloc_pinned(&iwork, liwork); err = magma_zheevd(_cV, _cU, m, static_cast<magmaDoubleComplex*>(Mat), ldm, static_cast<double*>(evalues), work, lwork, rwork, lrwork, iwork, liwork, &info); if( err != 0 ) errorQuda( "Error: ZHEEVD, info %d\n",info); } } if(rwork_) magma_free_pinned(rwork_); if(work_ ) magma_free_pinned(work_); if(iwork ) magma_free_pinned(iwork); return; } #endif // MAGMA_LIB void magma_Xgesv(void* sol, const int ldn, const int n, void* Mat, const int ldm, const int prec) { #ifdef MAGMA_LIB if (prec == sizeof(std::complex< double >)) magma_gesv<magmaDoubleComplex>(sol, ldn, n, Mat, ldm); else if (prec == sizeof(std::complex< float >)) magma_gesv<magmaFloatComplex >(sol, ldn, n, Mat, ldm); else errorQuda("\nPrecision is not supported.\n"); #endif return; } void magma_Xgeev(void *Mat, const int m, const int ldm, void *vr, void *evalues, const int ldv, const int prec) { #ifdef MAGMA_LIB if (prec == sizeof(std::complex< double >)) magma_geev<magmaDoubleComplex>(Mat, m, ldm, vr, evalues, ldv); else if (prec == sizeof(std::complex< float >)) magma_geev<magmaFloatComplex >(Mat, m, ldm, vr, evalues, ldv); else errorQuda("\nPrecision is not supported.\n"); #endif return; } void magma_Xgels(void *Mat, void *c, int rows, int cols, int ldm, const int prec) { #ifdef MAGMA_LIB if (prec == sizeof(std::complex< double >)) magma_gels<magmaDoubleComplex>(Mat, c, rows, cols, ldm); else if (prec == sizeof(std::complex< float >)) magma_gels<magmaFloatComplex >(Mat, c, rows, cols, ldm); else errorQuda("\nPrecision is not supported.\n"); #endif return; } void magma_Xheev(void *Mat, const int m, const int ldm, void *evalues, const int prec) { #ifdef MAGMA_LIB if (prec == sizeof(std::complex< double >)) magma_heev<magmaDoubleComplex>(Mat, m, ldm, evalues); else if (prec == sizeof(std::complex< float >)) magma_heev<magmaFloatComplex >(Mat, m, ldm, evalues); else errorQuda("\nPrecision is not supported.\n"); #endif return; } void OpenMagma(){ #ifdef MAGMA_LIB magma_int_t err = magma_init(); if(err != MAGMA_SUCCESS) errorQuda("\nError: cannot initialize MAGMA library\n"); int major, minor, micro; magma_version( &major, &minor, &micro); printfQuda("\nMAGMA library version: %d.%d\n\n", major, minor); #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } void CloseMagma(){ #ifdef MAGMA_LIB if(magma_finalize() != MAGMA_SUCCESS) errorQuda("\nError: cannot close MAGMA library\n"); #else errorQuda("\nError: MAGMA library was not compiled, check your compilation options...\n"); #endif return; } #ifdef MAGMA_LIB #undef _cV #undef _cU #undef _cR #undef _cL #undef _cC #undef _cN #undef _cNV #endif
41ddb8f619f3e996a7b74875a9dbdaa85bf905db.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <cfloat> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define spmv_NBLOCKS 12*8*21 //*22 #define spmv_BLOCK_SIZE 256 #define WARP_SIZE 32 __constant__ int yy[spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE)+1]; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; hipHostMalloc(newA_ptr, paddedSize * sizeof(float)); hipHostMalloc(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col]; } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(const float* val, const int * cols, // const int * rowDelimiters, const float * vec, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); __shared__ volatile float partialSums[spmv_BLOCK_SIZE]; if (myRow < dim) { int warpStart = yy[myRow];//rowDelimiters[myRow]; int warpEnd = yy[myRow+1];//rowDelimiters[myRow+1]; float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { int col = cols[j]; mySum += val[j] * vec[col]; } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { hipSetDevice(2); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero float maxval = 200.0; hipHostMalloc(&h_spmv_val, spmv_nItems * sizeof(float)); hipHostMalloc(&h_spmv_cols, spmv_nItems * sizeof(int)); hipHostMalloc(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); hipHostMalloc(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; hipHostMalloc(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); hipHostMalloc(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory hipMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); hipMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); hipMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); hipMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); hipMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device hipMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), hipMemcpyHostToDevice); // hipMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), hipMemcpyHostToDevice); hipMemcpyToSymbol(yy,h_rowDelimiters,(spmv_numRows+1)*sizeof(int)); hipEvent_t kernel_start, kernel_stop; hipEventCreate(&kernel_start); hipEventCreate(&kernel_stop); float kernel_time = 0.0f; hipEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE)); hipLaunchKernelGGL(( spmv_kernel) , dim3(spmv_grid), dim3(spmv_BLOCK_SIZE), 0, 0, d_spmv_val, d_spmv_cols, //d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); hipDeviceSynchronize(); hipEventRecord(kernel_stop, 0); hipEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; hipMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), hipMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
41ddb8f619f3e996a7b74875a9dbdaa85bf905db.cu
#include <cassert> #include <cfloat> #include <cuda_runtime_api.h> #include <cuda.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define spmv_NBLOCKS 12*8*21 //*22 #define spmv_BLOCK_SIZE 256 #define WARP_SIZE 32 __constant__ int yy[spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE)+1]; static const double MAX_RELATIVE_ERROR = .02; static const int PAD_FACTOR = 16; void fill(float *A, const int n, const float maxi) { for (int j = 0; j < n; j++) { A[j] = ((float) maxi * (rand() / (RAND_MAX + 1.0f))); } } void initRandomMatrix(int *cols, int *rowDelimiters, const int n, const int dim) { int nnzAssigned = 0; // Figure out the probability that a nonzero should be assigned to a given // spot in the matrix double prob = (double)n / ((double)dim * (double)dim); // Seed random number generator srand48(2013); // Randomly decide whether entry i,j gets a value, but ensure n values // are assigned bool fillRemaining = false; for (int i = 0; i < dim; i++) { rowDelimiters[i] = nnzAssigned; for (int j = 0; j < dim; j++) { int numEntriesLeft = (dim * dim) - ((i * dim) + j); int needToAssign = n - nnzAssigned; if (numEntriesLeft <= needToAssign) { fillRemaining = true; } if ((nnzAssigned < n && drand48() <= prob) || fillRemaining) { // Assign (i,j) a value cols[nnzAssigned] = j; nnzAssigned++; } } } // Observe the convention to put the number of non zeroes at the end of the // row delimiters array rowDelimiters[dim] = n; assert(nnzAssigned == n); } void convertToPadded(float *A, int *cols, int dim, int *rowDelimiters, float **newA_ptr, int **newcols_ptr, int *newIndices, int *newSize) { // determine total padded size and new row indices int paddedSize = 0; int rowSize; for (int i=0; i<dim; i++) { newIndices[i] = paddedSize; rowSize = rowDelimiters[i+1] - rowDelimiters[i]; if (rowSize % PAD_FACTOR != 0) { rowSize += PAD_FACTOR - rowSize % PAD_FACTOR; } paddedSize += rowSize; } *newSize = paddedSize; newIndices[dim] = paddedSize; cudaMallocHost(newA_ptr, paddedSize * sizeof(float)); cudaMallocHost(newcols_ptr, paddedSize * sizeof(int)); float *newA = *newA_ptr; int *newcols = *newcols_ptr; memset(newA, 0, paddedSize * sizeof(float)); // fill newA and newcols for (int i=0; i<dim; i++) { for (int j=rowDelimiters[i], k=newIndices[i]; j<rowDelimiters[i+1]; j++, k++) { newA[k] = A[j]; newcols[k] = cols[j]; } } } void spmvCpu(const float *val, const int *cols, const int *rowDelimiters, const float *vec, int dim, float *out) { for (int i=0; i<dim; i++) { float t = 0; for (int j = rowDelimiters[i]; j < rowDelimiters[i + 1]; j++) { int col = cols[j]; t += val[j] * vec[col]; } out[i] = t; } } void spmv_verifyResults(const float *cpuResults, const float *gpuResults, const int size) { bool passed = true; for (int i = 0; i < size; i++) { if (fabs(cpuResults[i] - gpuResults[i]) / cpuResults[i] > MAX_RELATIVE_ERROR) { cout << "Failed! Mismatch at i: "<< i << " ref: " << cpuResults[i] << " dev: " << gpuResults[i] << endl; return; } } cout << "spmv passed" << endl; } __global__ void spmv_kernel(const float* val, const int * cols, // const int * rowDelimiters, const float * vec, const int dim, float * out) { // Thread ID in block int t = threadIdx.x; // Thread ID within warp int id = t & (WARP_SIZE-1); int warpsPerBlock = blockDim.x / WARP_SIZE; // One row per warp int myRow = (blockIdx.x * warpsPerBlock) + (t / WARP_SIZE); __shared__ volatile float partialSums[spmv_BLOCK_SIZE]; if (myRow < dim) { int warpStart = yy[myRow];//rowDelimiters[myRow]; int warpEnd = yy[myRow+1];//rowDelimiters[myRow+1]; float mySum = 0; for (int j = warpStart + id; j < warpEnd; j += WARP_SIZE) { int col = cols[j]; mySum += val[j] * vec[col]; } partialSums[t] = mySum; // Reduce partial sums if (id < 16) partialSums[t] += partialSums[t+16]; if (id < 8) partialSums[t] += partialSums[t+ 8]; if (id < 4) partialSums[t] += partialSums[t+ 4]; if (id < 2) partialSums[t] += partialSums[t+ 2]; if (id < 1) partialSums[t] += partialSums[t+ 1]; // Write result if (id == 0) { out[myRow] = partialSums[t]; } } } int main(int argc, char **argv) { cudaSetDevice(2); srand(2013); float *h_spmv_val, *h_spmv_valPad; int *h_spmv_cols, *h_spmv_colsPad; int *h_rowDelimiters, *h_rowDelimitersPad; float *h_spmv_vec, *h_spmv_out, *spmv_refOut; int spmv_nItems, nItemsPadded, spmv_numRows; spmv_numRows = spmv_NBLOCKS * (spmv_BLOCK_SIZE/WARP_SIZE); spmv_nItems = spmv_numRows * spmv_numRows / 50; // 1% of entries will be non-zero float maxval = 200.0; cudaMallocHost(&h_spmv_val, spmv_nItems * sizeof(float)); cudaMallocHost(&h_spmv_cols, spmv_nItems * sizeof(int)); cudaMallocHost(&h_rowDelimiters, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_val, spmv_nItems, maxval); initRandomMatrix(h_spmv_cols, h_rowDelimiters, spmv_nItems, spmv_numRows); // Set up remaining host data int paddedSize = spmv_numRows + (PAD_FACTOR - spmv_numRows % PAD_FACTOR); cudaMallocHost(&h_spmv_vec, spmv_numRows * sizeof(float)) ; spmv_refOut = new float[spmv_numRows]; cudaMallocHost(&h_rowDelimitersPad, (spmv_numRows + 1) * sizeof(int)); fill(h_spmv_vec, spmv_numRows, maxval); cudaMallocHost(&h_spmv_out, paddedSize * sizeof(float)); convertToPadded(h_spmv_val, h_spmv_cols, spmv_numRows, h_rowDelimiters, &h_spmv_valPad, &h_spmv_colsPad, h_rowDelimitersPad, &nItemsPadded); // Compute reference solution spmvCpu(h_spmv_val, h_spmv_cols, h_rowDelimiters, h_spmv_vec, spmv_numRows, spmv_refOut); float *d_spmv_val, *d_spmv_vec, *d_spmv_out; int *d_spmv_cols, *d_rowDelimiters; // Allocate device memory cudaMalloc(&d_spmv_val, spmv_nItems * sizeof(float)); cudaMalloc(&d_spmv_cols, spmv_nItems * sizeof(int)); cudaMalloc(&d_spmv_vec, spmv_numRows * sizeof(float)); cudaMalloc(&d_spmv_out, spmv_numRows * sizeof(float)); cudaMalloc(&d_rowDelimiters, (spmv_numRows+1) * sizeof(int)); // Transfer data to device cudaMemcpy(d_spmv_val, h_spmv_val, spmv_nItems * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_cols, h_spmv_cols, spmv_nItems * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_spmv_vec, h_spmv_vec, spmv_numRows * sizeof(float), cudaMemcpyHostToDevice); // cudaMemcpy(d_rowDelimiters, h_rowDelimiters, (spmv_numRows+1) * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(yy,h_rowDelimiters,(spmv_numRows+1)*sizeof(int)); cudaEvent_t kernel_start, kernel_stop; cudaEventCreate(&kernel_start); cudaEventCreate(&kernel_stop); float kernel_time = 0.0f; cudaEventRecord(kernel_start, 0); // Setup thread configuration int spmv_grid = (int) ceil(spmv_numRows / (float)(spmv_BLOCK_SIZE / WARP_SIZE)); spmv_kernel <<<spmv_grid, spmv_BLOCK_SIZE>>> (d_spmv_val, d_spmv_cols, //d_rowDelimiters, d_spmv_vec, spmv_numRows, d_spmv_out); cudaDeviceSynchronize(); cudaEventRecord(kernel_stop, 0); cudaEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; cudaMemcpy(h_spmv_out, d_spmv_out, spmv_numRows * sizeof(float), cudaMemcpyDeviceToHost); spmv_verifyResults(spmv_refOut, h_spmv_out, spmv_numRows); return 0; }
38457708afc08de252bc40820edd5bc0760add30.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "saber/funcs/impl/cuda/saber_concat.h" namespace anakin{ namespace saber{ const int BLOCK_SIZE = 32; template <typename dtype> __global__ void concat_impl_cuda(const int nthreads, const dtype* in_data, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; out_data[top_index] = in_data[index]; } } template <typename dtype> __global__ void concat_impl_2d_impl(const int inner_size, const int num_concats, const dtype* in_data, const int concat_size, const int out_concat_axis, const int offset_concat_axis, dtype* out_data) { int idx_inner = threadIdx.x + blockIdx.x * blockDim.x; int idx_outer = threadIdx.y + blockIdx.y * blockDim.y; if (idx_inner < inner_size && idx_outer < num_concats) { int idx_input = idx_outer * inner_size + idx_inner; int idx_output = (idx_outer * out_concat_axis + offset_concat_axis) * \ concat_size + idx_inner; out_data[idx_output] = in_data[idx_input]; } } template <> SaberStatus SaberConcat<NV, AK_FLOAT>::dispatch(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param) { hipStream_t stream = this->_ctx->get_compute_stream(); int input_size = inputs.size(); //! get output data, valid shape and stride shape OpDataType* out_data = (OpDataType*)outputs[0]->mutable_data(); int offset_concat_axis = 0; Shape out_shape = outputs[0]->valid_shape(); const int out_concat_axis = out_shape[param.axis]; bool out_cont_flag = outputs[0]->is_continue_mem(); bool in_cont_flag = inputs[0]->is_continue_mem(); for (int i = 1; i < input_size; ++i) { in_cont_flag &= inputs[i]->is_continue_mem(); } //! inputs and outputs are all with continuous memory if (in_cont_flag && out_cont_flag){ for (int i = 0; i < input_size; ++i) { Shape in_shape = inputs[i]->valid_shape(); //std::vector<int> bottom_shape = {tmp[3], tmp[2], tmp[1], tmp[0]}; const OpDataType* in_data = (const OpDataType*)inputs[i]->data(); const int in_concat_axis = in_shape[param.axis]; const int in_concat_size = in_concat_axis * _concat_input_size; const int nthreads = in_concat_size * _num_concats; float ratio = (float)in_concat_size / _num_concats; bool is_balance = (ratio > 0.1 && ratio < 10); if (is_balance){ int block_x = BLOCK_SIZE; int block_y = BLOCK_SIZE; int grid_x = (in_concat_size + block_x - 1) / block_x; int grid_y = (_num_concats + block_y - 1) / block_y; dim3 block(block_x, block_y); dim3 grid(grid_x, grid_y); hipLaunchKernelGGL(( concat_impl_2d_impl<OpDataType>), dim3(grid), dim3(block), 0, stream, in_concat_size, _num_concats, in_data, _concat_input_size, out_concat_axis, offset_concat_axis, out_data ); } else { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( concat_impl_cuda<OpDataType>), dim3(CUDA_GET_BLOCKS(nthreads)), dim3(CUDA_NUM_THREADS), 0, stream, \ nthreads, in_data, _num_concats, _concat_input_size, \ out_concat_axis, in_concat_axis, offset_concat_axis, out_data); } offset_concat_axis += in_concat_axis; } } else { //! inputs or outputs memory is not continuous Shape offset_out = outputs[0]->offset(); Tensor<NV> tsub; for (int i = 0; i < input_size; ++i) { Shape in_shape = inputs[i]->valid_shape(); tsub.share_sub_buffer(*outputs[0], in_shape, offset_out); offset_out[param.axis] += in_shape[param.axis]; tsub.async_copy_from(*inputs[i], stream); } } return SaberSuccess; } DEFINE_OP_TEMPLATE(SaberConcat, ConcatParam, NV, AK_INT8); DEFINE_OP_TEMPLATE(SaberConcat, ConcatParam, NV, AK_HALF); } //namespace anakin } //namespace anakin
38457708afc08de252bc40820edd5bc0760add30.cu
#include "saber/funcs/impl/cuda/saber_concat.h" namespace anakin{ namespace saber{ const int BLOCK_SIZE = 32; template <typename dtype> __global__ void concat_impl_cuda(const int nthreads, const dtype* in_data, const int num_concats, const int concat_size, const int top_concat_axis, const int bottom_concat_axis, const int offset_concat_axis, dtype* out_data) { CUDA_KERNEL_LOOP(index, nthreads) { const int total_concat_size = concat_size * bottom_concat_axis; const int concat_num = index / total_concat_size; const int concat_index = index % total_concat_size; const int top_index = concat_index + (concat_num * top_concat_axis + offset_concat_axis) * concat_size; out_data[top_index] = in_data[index]; } } template <typename dtype> __global__ void concat_impl_2d_impl(const int inner_size, const int num_concats, const dtype* in_data, const int concat_size, const int out_concat_axis, const int offset_concat_axis, dtype* out_data) { int idx_inner = threadIdx.x + blockIdx.x * blockDim.x; int idx_outer = threadIdx.y + blockIdx.y * blockDim.y; if (idx_inner < inner_size && idx_outer < num_concats) { int idx_input = idx_outer * inner_size + idx_inner; int idx_output = (idx_outer * out_concat_axis + offset_concat_axis) * \ concat_size + idx_inner; out_data[idx_output] = in_data[idx_input]; } } template <> SaberStatus SaberConcat<NV, AK_FLOAT>::dispatch(const std::vector<Tensor<NV> *>& inputs, std::vector<Tensor<NV> *>& outputs, ConcatParam<NV>& param) { cudaStream_t stream = this->_ctx->get_compute_stream(); int input_size = inputs.size(); //! get output data, valid shape and stride shape OpDataType* out_data = (OpDataType*)outputs[0]->mutable_data(); int offset_concat_axis = 0; Shape out_shape = outputs[0]->valid_shape(); const int out_concat_axis = out_shape[param.axis]; bool out_cont_flag = outputs[0]->is_continue_mem(); bool in_cont_flag = inputs[0]->is_continue_mem(); for (int i = 1; i < input_size; ++i) { in_cont_flag &= inputs[i]->is_continue_mem(); } //! inputs and outputs are all with continuous memory if (in_cont_flag && out_cont_flag){ for (int i = 0; i < input_size; ++i) { Shape in_shape = inputs[i]->valid_shape(); //std::vector<int> bottom_shape = {tmp[3], tmp[2], tmp[1], tmp[0]}; const OpDataType* in_data = (const OpDataType*)inputs[i]->data(); const int in_concat_axis = in_shape[param.axis]; const int in_concat_size = in_concat_axis * _concat_input_size; const int nthreads = in_concat_size * _num_concats; float ratio = (float)in_concat_size / _num_concats; bool is_balance = (ratio > 0.1 && ratio < 10); if (is_balance){ int block_x = BLOCK_SIZE; int block_y = BLOCK_SIZE; int grid_x = (in_concat_size + block_x - 1) / block_x; int grid_y = (_num_concats + block_y - 1) / block_y; dim3 block(block_x, block_y); dim3 grid(grid_x, grid_y); concat_impl_2d_impl<OpDataType><<<grid, block, 0, stream>>>( in_concat_size, _num_concats, in_data, _concat_input_size, out_concat_axis, offset_concat_axis, out_data ); } else { // NOLINT_NEXT_LINE(whitespace/operators) concat_impl_cuda<OpDataType><<<CUDA_GET_BLOCKS(nthreads), CUDA_NUM_THREADS, 0, stream>>>( \ nthreads, in_data, _num_concats, _concat_input_size, \ out_concat_axis, in_concat_axis, offset_concat_axis, out_data); } offset_concat_axis += in_concat_axis; } } else { //! inputs or outputs memory is not continuous Shape offset_out = outputs[0]->offset(); Tensor<NV> tsub; for (int i = 0; i < input_size; ++i) { Shape in_shape = inputs[i]->valid_shape(); tsub.share_sub_buffer(*outputs[0], in_shape, offset_out); offset_out[param.axis] += in_shape[param.axis]; tsub.async_copy_from(*inputs[i], stream); } } return SaberSuccess; } DEFINE_OP_TEMPLATE(SaberConcat, ConcatParam, NV, AK_INT8); DEFINE_OP_TEMPLATE(SaberConcat, ConcatParam, NV, AK_HALF); } //namespace anakin } //namespace anakin
b9652a19361fa252b516c656433fd1557b6d7680.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void add(int *a,int *b){ int tid = threadIdx.x; if(tid %2 ==0){ if(a[tid+1]!=NULL){ b[tid+1] = a[tid]; b[tid] = a[tid+1]; } } } int main(void){ int n,i,size,*d_a,*d_b; int a[1000],b[1000]; printf("Enter no. of elements:\n"); scanf("%d",&n); for(i=0;i<n;i++){ a[i] = i; } size = sizeof(int); hipMalloc((void **)&d_a,size*n); hipMalloc((void **)&d_b,size*n); hipMemcpy(d_a,a,size*n,hipMemcpyHostToDevice); hipLaunchKernelGGL(( add) , dim3(1),dim3(n), 0, 0, d_a,d_b); hipMemcpy(b,d_b,size*n,hipMemcpyDeviceToHost); for(i=0;i<n;i++) printf("%d\t",b[i]); printf("\n"); hipFree(d_a); hipFree(d_b); return 0; }
b9652a19361fa252b516c656433fd1557b6d7680.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> __global__ void add(int *a,int *b){ int tid = threadIdx.x; if(tid %2 ==0){ if(a[tid+1]!=NULL){ b[tid+1] = a[tid]; b[tid] = a[tid+1]; } } } int main(void){ int n,i,size,*d_a,*d_b; int a[1000],b[1000]; printf("Enter no. of elements:\n"); scanf("%d",&n); for(i=0;i<n;i++){ a[i] = i; } size = sizeof(int); cudaMalloc((void **)&d_a,size*n); cudaMalloc((void **)&d_b,size*n); cudaMemcpy(d_a,a,size*n,cudaMemcpyHostToDevice); add <<<1,n>>> (d_a,d_b); cudaMemcpy(b,d_b,size*n,cudaMemcpyDeviceToHost); for(i=0;i<n;i++) printf("%d\t",b[i]); printf("\n"); cudaFree(d_a); cudaFree(d_b); return 0; }
9cee331ee8cb564c64a979e5343172f482743dad.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2017 Zheyong Fan, Ville Vierimaa, and Ari Harju This file is part of GPUQT. GPUQT is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. GPUQT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GPUQT. If not, see <http://www.gnu.org/licenses/>. */ #include "vector.h" #include <string.h> // memcpy #define BLOCK_SIZE 256 #ifndef CPU_ONLY __global__ void gpu_set_zero(int number_of_elements, real* __restrict__ g_state_real, real* __restrict__ g_state_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_elements) { g_state_real[n] = 0; g_state_imag[n] = 0; } } #else void cpu_set_zero(int number_of_elements, real* g_state_real, real* g_state_imag) { for (int n = 0; n < number_of_elements; ++n) { g_state_real[n] = 0; g_state_imag[n] = 0; } } #endif #ifndef CPU_ONLY void Vector::initialize_gpu(int n) { this->n = n; array_size = n * sizeof(real); CHECK(hipMalloc((void**)&real_part, array_size)); CHECK(hipMalloc((void**)&imag_part, array_size)); } #else void Vector::initialize_cpu(int n) { this->n = n; array_size = n * sizeof(real); real_part = new real[n]; imag_part = new real[n]; } #endif Vector::Vector(int n) { #ifndef CPU_ONLY initialize_gpu(n); hipLaunchKernelGGL(( gpu_set_zero), dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, real_part, imag_part); CHECK(hipGetLastError()); #else initialize_cpu(n); cpu_set_zero(n, real_part, imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_copy_state( const int N, const real* __restrict__ in_real, const real* __restrict__ in_imag, real* __restrict__ out_real, real* __restrict__ out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { out_real[n] = in_real[n]; out_imag[n] = in_imag[n]; } } #else void cpu_copy_state(int N, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int n = 0; n < N; ++n) { out_real[n] = in_real[n]; out_imag[n] = in_imag[n]; } } #endif Vector::Vector(Vector& original) { // Just teach myself: one can access private members of another instance // of the class from within the class #ifndef CPU_ONLY initialize_gpu(original.n); hipLaunchKernelGGL(( gpu_copy_state), dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, original.real_part, original.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else initialize_cpu(original.n); cpu_copy_state(n, original.real_part, original.imag_part, real_part, imag_part); #endif } Vector::~Vector() { #ifndef CPU_ONLY CHECK(hipFree(real_part)); CHECK(hipFree(imag_part)); #else delete[] real_part; delete[] imag_part; #endif } #ifndef CPU_ONLY __global__ void gpu_add_state( const int n, const real*__restrict__ in_real, const real*__restrict__ in_imag, real*__restrict__ out_real, real*__restrict__ out_imag) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { out_real[i] += in_real[i]; out_imag[i] += in_imag[i]; } } #else void cpu_add_state(int n, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int i = 0; i < n; ++i) { out_real[i] += in_real[i]; out_imag[i] += in_imag[i]; } } #endif void Vector::add(Vector& other) { #ifndef CPU_ONLY hipLaunchKernelGGL(( gpu_add_state), dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, other.real_part, other.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else cpu_add_state(n, other.real_part, other.imag_part, real_part, imag_part); #endif } void Vector::copy(Vector& other) { #ifndef CPU_ONLY hipLaunchKernelGGL(( gpu_copy_state), dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, other.real_part, other.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else cpu_copy_state(n, other.real_part, other.imag_part, real_part, imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_apply_sz( const int n, const real* __restrict__ in_real, const real* __restrict__ in_imag, real* __restrict__ out_real, real* __restrict__ out_imag) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { if (i % 2 == 0) { out_real[i] = in_real[i]; out_imag[i] = in_imag[i]; } else { out_real[i] = -in_real[i]; out_imag[i] = -in_imag[i]; } } } #else void cpu_apply_sz(int n, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int i = 0; i < n; ++i) { if (i % 2 == 0) { out_real[i] = in_real[i]; out_imag[i] = in_imag[i]; } else { out_real[i] = -in_real[i]; out_imag[i] = -in_imag[i]; } } } #endif void Vector::apply_sz(Vector& other) { #ifndef CPU_ONLY hipLaunchKernelGGL(( gpu_apply_sz), dim3((n - 1) / BLOCK_SIZE + 1), dim3(BLOCK_SIZE), 0, 0, n, other.real_part, other.imag_part, real_part, imag_part); CHECK(hipGetLastError()); #else cpu_apply_sz(n, other.real_part, other.imag_part, real_part, imag_part); #endif } void Vector::copy_from_host(real* other_real, real* other_imag) { #ifndef CPU_ONLY CHECK(hipMemcpy(real_part, other_real, array_size, hipMemcpyHostToDevice)); CHECK(hipMemcpy(imag_part, other_imag, array_size, hipMemcpyHostToDevice)); #else memcpy(real_part, other_real, array_size); memcpy(imag_part, other_imag, array_size); #endif } void Vector::copy_to_host(real* target_real, real* target_imag) { #ifndef CPU_ONLY CHECK(hipMemcpy(target_real, real_part, array_size, hipMemcpyDeviceToHost)); CHECK(hipMemcpy(target_imag, imag_part, array_size, hipMemcpyDeviceToHost)); #else memcpy(target_real, real_part, array_size); memcpy(target_imag, imag_part, array_size); #endif } void Vector::swap(Vector& other) { real* tmp_real = real_part; real* tmp_imag = imag_part; real_part = other.real_part, imag_part = other.imag_part; other.real_part = tmp_real; other.imag_part = tmp_imag; } #ifndef CPU_ONLY __device__ void warp_reduce(volatile real* s, int t) { s[t] += s[t + 32]; s[t] += s[t + 16]; s[t] += s[t + 8]; s[t] += s[t + 4]; s[t] += s[t + 2]; s[t] += s[t + 1]; } #endif #ifndef CPU_ONLY __global__ void gpu_find_inner_product_1( const int number_of_atoms, const real* __restrict__ g_final_state_real, const real* __restrict__ g_final_state_imag, const real* __restrict__ g_random_state_real, const real* __restrict__ g_random_state_imag, real* __restrict__ g_inner_product_real, real* __restrict__ g_inner_product_imag, const int g_offset) { int tid = threadIdx.x; int n = blockIdx.x * blockDim.x + tid; int m; real a, b, c, d; __shared__ real s_data_real[BLOCK_SIZE]; __shared__ real s_data_imag[BLOCK_SIZE]; s_data_real[tid] = 0.0; s_data_imag[tid] = 0.0; if (n < number_of_atoms) { a = g_final_state_real[n]; b = g_final_state_imag[n]; c = g_random_state_real[n]; d = g_random_state_imag[n]; s_data_real[tid] = (a * c + b * d); s_data_imag[tid] = (b * c - a * d); } __syncthreads(); /* if (tid < 256) { m = tid + 256; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); */ if (tid < 128) { m = tid + 128; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 64) { m = tid + 64; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 32) { warp_reduce(s_data_real, tid); warp_reduce(s_data_imag, tid); } if (tid == 0) { g_inner_product_real[blockIdx.x + g_offset] = s_data_real[0]; g_inner_product_imag[blockIdx.x + g_offset] = s_data_imag[0]; } } #else void cpu_find_inner_product_1( int grid_size, int number_of_atoms, real* g_final_state_real, real* g_final_state_imag, real* g_random_state_real, real* g_random_state_imag, real* g_inner_product_real, real* g_inner_product_imag, int g_offset) { for (int m = 0; m < grid_size; ++m) { real s_data_real = 0.0; real s_data_imag = 0.0; for (int k = 0; k < BLOCK_SIZE; ++k) { int n = m * BLOCK_SIZE + k; if (n < number_of_atoms) { real a = g_final_state_real[n]; real b = g_final_state_imag[n]; real c = g_random_state_real[n]; real d = g_random_state_imag[n]; s_data_real += (a * c + b * d); s_data_imag += (b * c - a * d); } } g_inner_product_real[m + g_offset] = s_data_real; g_inner_product_imag[m + g_offset] = s_data_imag; } } #endif void Vector::inner_product_1(int number_of_atoms, Vector& other, Vector& target, int offset) { int grid_size = (number_of_atoms - 1) / BLOCK_SIZE + 1; #ifndef CPU_ONLY hipLaunchKernelGGL(( gpu_find_inner_product_1), dim3(grid_size), dim3(BLOCK_SIZE), 0, 0, number_of_atoms, real_part, imag_part, other.real_part, other.imag_part, target.real_part, target.imag_part, offset); CHECK(hipGetLastError()); #else cpu_find_inner_product_1( grid_size, number_of_atoms, real_part, imag_part, other.real_part, other.imag_part, target.real_part, target.imag_part, offset); #endif } #ifndef CPU_ONLY __global__ void gpu_find_inner_product_2( const int number_of_atoms, const real* __restrict__ g_inner_product_1_real, const real* __restrict__ g_inner_product_1_imag, real* __restrict__ g_inner_product_2_real, real* __restrict__ g_inner_product_2_imag) { //<<<para.number_of_energy_points, BLOCK_SIZE)>>> int tid = threadIdx.x; int patch, n, m; __shared__ real s_data_real[BLOCK_SIZE]; __shared__ real s_data_imag[BLOCK_SIZE]; s_data_real[tid] = 0.0; s_data_imag[tid] = 0.0; int number_of_blocks = (number_of_atoms - 1) / BLOCK_SIZE + 1; int number_of_patches = (number_of_blocks - 1) / BLOCK_SIZE + 1; for (patch = 0; patch < number_of_patches; ++patch) { n = tid + patch * BLOCK_SIZE; if (n < number_of_blocks) { m = blockIdx.x * number_of_blocks + n; s_data_real[tid] += g_inner_product_1_real[m]; s_data_imag[tid] += g_inner_product_1_imag[m]; } } __syncthreads(); /* if (tid < 256) { m = tid + 256; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); */ if (tid < 128) { m = tid + 128; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 64) { m = tid + 64; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 32) { warp_reduce(s_data_real, tid); warp_reduce(s_data_imag, tid); } if (tid == 0) { g_inner_product_2_real[blockIdx.x] = s_data_real[0]; g_inner_product_2_imag[blockIdx.x] = s_data_imag[0]; } } #else void cpu_find_inner_product_2( int number_of_moments, int grid_size, real* g_inner_product_1_real, real* g_inner_product_1_imag, real* g_inner_product_2_real, real* g_inner_product_2_imag) { for (int m = 0; m < number_of_moments; ++m) { real s_data_real = 0.0; real s_data_imag = 0.0; for (int k = 0; k < grid_size; ++k) { int n = m * grid_size + k; s_data_real += g_inner_product_1_real[n]; s_data_imag += g_inner_product_1_imag[n]; } g_inner_product_2_real[m] = s_data_real; g_inner_product_2_imag[m] = s_data_imag; } } #endif void Vector::inner_product_2(int number_of_atoms, int number_of_moments, Vector& target) { #ifndef CPU_ONLY hipLaunchKernelGGL(( gpu_find_inner_product_2), dim3(number_of_moments), dim3(BLOCK_SIZE), 0, 0, number_of_atoms, real_part, imag_part, target.real_part, target.imag_part); CHECK(hipGetLastError()); #else int grid_size = (number_of_atoms - 1) / BLOCK_SIZE + 1; cpu_find_inner_product_2( number_of_moments, grid_size, real_part, imag_part, target.real_part, target.imag_part); #endif }
9cee331ee8cb564c64a979e5343172f482743dad.cu
/* Copyright 2017 Zheyong Fan, Ville Vierimaa, and Ari Harju This file is part of GPUQT. GPUQT is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. GPUQT is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GPUQT. If not, see <http://www.gnu.org/licenses/>. */ #include "vector.h" #include <string.h> // memcpy #define BLOCK_SIZE 256 #ifndef CPU_ONLY __global__ void gpu_set_zero(int number_of_elements, real* __restrict__ g_state_real, real* __restrict__ g_state_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < number_of_elements) { g_state_real[n] = 0; g_state_imag[n] = 0; } } #else void cpu_set_zero(int number_of_elements, real* g_state_real, real* g_state_imag) { for (int n = 0; n < number_of_elements; ++n) { g_state_real[n] = 0; g_state_imag[n] = 0; } } #endif #ifndef CPU_ONLY void Vector::initialize_gpu(int n) { this->n = n; array_size = n * sizeof(real); CHECK(cudaMalloc((void**)&real_part, array_size)); CHECK(cudaMalloc((void**)&imag_part, array_size)); } #else void Vector::initialize_cpu(int n) { this->n = n; array_size = n * sizeof(real); real_part = new real[n]; imag_part = new real[n]; } #endif Vector::Vector(int n) { #ifndef CPU_ONLY initialize_gpu(n); gpu_set_zero<<<(n - 1) / BLOCK_SIZE + 1, BLOCK_SIZE>>>(n, real_part, imag_part); CHECK(cudaGetLastError()); #else initialize_cpu(n); cpu_set_zero(n, real_part, imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_copy_state( const int N, const real* __restrict__ in_real, const real* __restrict__ in_imag, real* __restrict__ out_real, real* __restrict__ out_imag) { int n = blockIdx.x * blockDim.x + threadIdx.x; if (n < N) { out_real[n] = in_real[n]; out_imag[n] = in_imag[n]; } } #else void cpu_copy_state(int N, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int n = 0; n < N; ++n) { out_real[n] = in_real[n]; out_imag[n] = in_imag[n]; } } #endif Vector::Vector(Vector& original) { // Just teach myself: one can access private members of another instance // of the class from within the class #ifndef CPU_ONLY initialize_gpu(original.n); gpu_copy_state<<<(n - 1) / BLOCK_SIZE + 1, BLOCK_SIZE>>>( n, original.real_part, original.imag_part, real_part, imag_part); CHECK(cudaGetLastError()); #else initialize_cpu(original.n); cpu_copy_state(n, original.real_part, original.imag_part, real_part, imag_part); #endif } Vector::~Vector() { #ifndef CPU_ONLY CHECK(cudaFree(real_part)); CHECK(cudaFree(imag_part)); #else delete[] real_part; delete[] imag_part; #endif } #ifndef CPU_ONLY __global__ void gpu_add_state( const int n, const real*__restrict__ in_real, const real*__restrict__ in_imag, real*__restrict__ out_real, real*__restrict__ out_imag) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { out_real[i] += in_real[i]; out_imag[i] += in_imag[i]; } } #else void cpu_add_state(int n, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int i = 0; i < n; ++i) { out_real[i] += in_real[i]; out_imag[i] += in_imag[i]; } } #endif void Vector::add(Vector& other) { #ifndef CPU_ONLY gpu_add_state<<<(n - 1) / BLOCK_SIZE + 1, BLOCK_SIZE>>>( n, other.real_part, other.imag_part, real_part, imag_part); CHECK(cudaGetLastError()); #else cpu_add_state(n, other.real_part, other.imag_part, real_part, imag_part); #endif } void Vector::copy(Vector& other) { #ifndef CPU_ONLY gpu_copy_state<<<(n - 1) / BLOCK_SIZE + 1, BLOCK_SIZE>>>( n, other.real_part, other.imag_part, real_part, imag_part); CHECK(cudaGetLastError()); #else cpu_copy_state(n, other.real_part, other.imag_part, real_part, imag_part); #endif } #ifndef CPU_ONLY __global__ void gpu_apply_sz( const int n, const real* __restrict__ in_real, const real* __restrict__ in_imag, real* __restrict__ out_real, real* __restrict__ out_imag) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { if (i % 2 == 0) { out_real[i] = in_real[i]; out_imag[i] = in_imag[i]; } else { out_real[i] = -in_real[i]; out_imag[i] = -in_imag[i]; } } } #else void cpu_apply_sz(int n, real* in_real, real* in_imag, real* out_real, real* out_imag) { for (int i = 0; i < n; ++i) { if (i % 2 == 0) { out_real[i] = in_real[i]; out_imag[i] = in_imag[i]; } else { out_real[i] = -in_real[i]; out_imag[i] = -in_imag[i]; } } } #endif void Vector::apply_sz(Vector& other) { #ifndef CPU_ONLY gpu_apply_sz<<<(n - 1) / BLOCK_SIZE + 1, BLOCK_SIZE>>>( n, other.real_part, other.imag_part, real_part, imag_part); CHECK(cudaGetLastError()); #else cpu_apply_sz(n, other.real_part, other.imag_part, real_part, imag_part); #endif } void Vector::copy_from_host(real* other_real, real* other_imag) { #ifndef CPU_ONLY CHECK(cudaMemcpy(real_part, other_real, array_size, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(imag_part, other_imag, array_size, cudaMemcpyHostToDevice)); #else memcpy(real_part, other_real, array_size); memcpy(imag_part, other_imag, array_size); #endif } void Vector::copy_to_host(real* target_real, real* target_imag) { #ifndef CPU_ONLY CHECK(cudaMemcpy(target_real, real_part, array_size, cudaMemcpyDeviceToHost)); CHECK(cudaMemcpy(target_imag, imag_part, array_size, cudaMemcpyDeviceToHost)); #else memcpy(target_real, real_part, array_size); memcpy(target_imag, imag_part, array_size); #endif } void Vector::swap(Vector& other) { real* tmp_real = real_part; real* tmp_imag = imag_part; real_part = other.real_part, imag_part = other.imag_part; other.real_part = tmp_real; other.imag_part = tmp_imag; } #ifndef CPU_ONLY __device__ void warp_reduce(volatile real* s, int t) { s[t] += s[t + 32]; s[t] += s[t + 16]; s[t] += s[t + 8]; s[t] += s[t + 4]; s[t] += s[t + 2]; s[t] += s[t + 1]; } #endif #ifndef CPU_ONLY __global__ void gpu_find_inner_product_1( const int number_of_atoms, const real* __restrict__ g_final_state_real, const real* __restrict__ g_final_state_imag, const real* __restrict__ g_random_state_real, const real* __restrict__ g_random_state_imag, real* __restrict__ g_inner_product_real, real* __restrict__ g_inner_product_imag, const int g_offset) { int tid = threadIdx.x; int n = blockIdx.x * blockDim.x + tid; int m; real a, b, c, d; __shared__ real s_data_real[BLOCK_SIZE]; __shared__ real s_data_imag[BLOCK_SIZE]; s_data_real[tid] = 0.0; s_data_imag[tid] = 0.0; if (n < number_of_atoms) { a = g_final_state_real[n]; b = g_final_state_imag[n]; c = g_random_state_real[n]; d = g_random_state_imag[n]; s_data_real[tid] = (a * c + b * d); s_data_imag[tid] = (b * c - a * d); } __syncthreads(); /* if (tid < 256) { m = tid + 256; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); */ if (tid < 128) { m = tid + 128; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 64) { m = tid + 64; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 32) { warp_reduce(s_data_real, tid); warp_reduce(s_data_imag, tid); } if (tid == 0) { g_inner_product_real[blockIdx.x + g_offset] = s_data_real[0]; g_inner_product_imag[blockIdx.x + g_offset] = s_data_imag[0]; } } #else void cpu_find_inner_product_1( int grid_size, int number_of_atoms, real* g_final_state_real, real* g_final_state_imag, real* g_random_state_real, real* g_random_state_imag, real* g_inner_product_real, real* g_inner_product_imag, int g_offset) { for (int m = 0; m < grid_size; ++m) { real s_data_real = 0.0; real s_data_imag = 0.0; for (int k = 0; k < BLOCK_SIZE; ++k) { int n = m * BLOCK_SIZE + k; if (n < number_of_atoms) { real a = g_final_state_real[n]; real b = g_final_state_imag[n]; real c = g_random_state_real[n]; real d = g_random_state_imag[n]; s_data_real += (a * c + b * d); s_data_imag += (b * c - a * d); } } g_inner_product_real[m + g_offset] = s_data_real; g_inner_product_imag[m + g_offset] = s_data_imag; } } #endif void Vector::inner_product_1(int number_of_atoms, Vector& other, Vector& target, int offset) { int grid_size = (number_of_atoms - 1) / BLOCK_SIZE + 1; #ifndef CPU_ONLY gpu_find_inner_product_1<<<grid_size, BLOCK_SIZE>>>( number_of_atoms, real_part, imag_part, other.real_part, other.imag_part, target.real_part, target.imag_part, offset); CHECK(cudaGetLastError()); #else cpu_find_inner_product_1( grid_size, number_of_atoms, real_part, imag_part, other.real_part, other.imag_part, target.real_part, target.imag_part, offset); #endif } #ifndef CPU_ONLY __global__ void gpu_find_inner_product_2( const int number_of_atoms, const real* __restrict__ g_inner_product_1_real, const real* __restrict__ g_inner_product_1_imag, real* __restrict__ g_inner_product_2_real, real* __restrict__ g_inner_product_2_imag) { //<<<para.number_of_energy_points, BLOCK_SIZE)>>> int tid = threadIdx.x; int patch, n, m; __shared__ real s_data_real[BLOCK_SIZE]; __shared__ real s_data_imag[BLOCK_SIZE]; s_data_real[tid] = 0.0; s_data_imag[tid] = 0.0; int number_of_blocks = (number_of_atoms - 1) / BLOCK_SIZE + 1; int number_of_patches = (number_of_blocks - 1) / BLOCK_SIZE + 1; for (patch = 0; patch < number_of_patches; ++patch) { n = tid + patch * BLOCK_SIZE; if (n < number_of_blocks) { m = blockIdx.x * number_of_blocks + n; s_data_real[tid] += g_inner_product_1_real[m]; s_data_imag[tid] += g_inner_product_1_imag[m]; } } __syncthreads(); /* if (tid < 256) { m = tid + 256; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); */ if (tid < 128) { m = tid + 128; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 64) { m = tid + 64; s_data_real[tid] += s_data_real[m]; s_data_imag[tid] += s_data_imag[m]; } __syncthreads(); if (tid < 32) { warp_reduce(s_data_real, tid); warp_reduce(s_data_imag, tid); } if (tid == 0) { g_inner_product_2_real[blockIdx.x] = s_data_real[0]; g_inner_product_2_imag[blockIdx.x] = s_data_imag[0]; } } #else void cpu_find_inner_product_2( int number_of_moments, int grid_size, real* g_inner_product_1_real, real* g_inner_product_1_imag, real* g_inner_product_2_real, real* g_inner_product_2_imag) { for (int m = 0; m < number_of_moments; ++m) { real s_data_real = 0.0; real s_data_imag = 0.0; for (int k = 0; k < grid_size; ++k) { int n = m * grid_size + k; s_data_real += g_inner_product_1_real[n]; s_data_imag += g_inner_product_1_imag[n]; } g_inner_product_2_real[m] = s_data_real; g_inner_product_2_imag[m] = s_data_imag; } } #endif void Vector::inner_product_2(int number_of_atoms, int number_of_moments, Vector& target) { #ifndef CPU_ONLY gpu_find_inner_product_2<<<number_of_moments, BLOCK_SIZE>>>( number_of_atoms, real_part, imag_part, target.real_part, target.imag_part); CHECK(cudaGetLastError()); #else int grid_size = (number_of_atoms - 1) / BLOCK_SIZE + 1; cpu_find_inner_product_2( number_of_moments, grid_size, real_part, imag_part, target.real_part, target.imag_part); #endif }
b7618435e9fbeadb37301957f35c161dc5ffbae7.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (C) 2017 NEC Laboratories America, Inc. ("NECLA"). All rights reserved. * * This source code is licensed under the license found in the LICENSE file in * the root directory of this source tree. An additional grant of patent rights * can be found in the PATENTS file in the same directory. */ /** \file * identical to segnInfo.cpp, except compiled with different compiler * * New reorganization requires some explicit instantiaons into library for segVecGpu codes * * This MIGHT be because user forgot to include segImpl.hh ??? */ #if defined(__HIPCC__) // instantiate some constexpr SegInfo:: data tables #include "segInfo.cpp" #include "dstorm_msg.hpp" #include "segImpl.hh" #include "segVecGpu.cuh" namespace dStorm { namespace seg { template class VecGpu<double>; template class VecGpu<float>; }//seg:: namespace user { template class Seg_VecGpu<float>; template class Seg_VecGpu<double>; // some of above classes have internal template functions that should // have "common varieties" instantiated into the library. // // store: Seg_VecGpu<T>::store( S* iIter, ...) // for T,S combinations of float, double // // BUT... this did NOT add the functions to the library! why? // oh... need const iterator versions: // float/double const *iIter #define SEG_VECGPU__STORE(T,S) \ template cu_pair< uint32_t /*cnt*/, void* /*dataEnd*/ >* \ Seg_VecGpu<T>::store( S const* iIter, \ uint32_t cnt, uint32_t const offset, \ void* const buf, double const wgt/*=1.0*/ ) SEG_VECGPU__STORE(float,float); SEG_VECGPU__STORE(float,double); SEG_VECGPU__STORE(double,float); SEG_VECGPU__STORE(double,double); #undef SEG_VECGPU__STORE // You may need to add more if you use fancier iterators, for example. // OR you might include the template definitions of segVecGpu.cuh into // and instantiate in your own code (using same cuda nvcc version) }//user:: namespace detail { template class SegImpl<seg::VecGpu<double>>; template class SegImpl<seg::VecGpu<float>>; #define SEGIMPL__STORE(T,S) \ template void SegImpl<seg::VecGpu<T>>::store<S const*> \ (S const*,uint32_t const,uint32_t const,double const); SEGIMPL__STORE(float,float); SEGIMPL__STORE(float,double); SEGIMPL__STORE(double,float); SEGIMPL__STORE(double,double); #undef SEGIMPL__STORE }//detail:: }//dStorm:: #endif
b7618435e9fbeadb37301957f35c161dc5ffbae7.cu
/* * Copyright (C) 2017 NEC Laboratories America, Inc. ("NECLA"). All rights reserved. * * This source code is licensed under the license found in the LICENSE file in * the root directory of this source tree. An additional grant of patent rights * can be found in the PATENTS file in the same directory. */ /** \file * identical to segnInfo.cpp, except compiled with different compiler * * New reorganization requires some explicit instantiaons into library for segVecGpu codes * * This MIGHT be because user forgot to include segImpl.hh ??? */ #if defined(__CUDACC__) // instantiate some constexpr SegInfo:: data tables #include "segInfo.cpp" #include "dstorm_msg.hpp" #include "segImpl.hh" #include "segVecGpu.cuh" namespace dStorm { namespace seg { template class VecGpu<double>; template class VecGpu<float>; }//seg:: namespace user { template class Seg_VecGpu<float>; template class Seg_VecGpu<double>; // some of above classes have internal template functions that should // have "common varieties" instantiated into the library. // // store: Seg_VecGpu<T>::store( S* iIter, ...) // for T,S combinations of float, double // // BUT... this did NOT add the functions to the library! why? // oh... need const iterator versions: // float/double const *iIter #define SEG_VECGPU__STORE(T,S) \ template cu_pair< uint32_t /*cnt*/, void* /*dataEnd*/ >* \ Seg_VecGpu<T>::store( S const* iIter, \ uint32_t cnt, uint32_t const offset, \ void* const buf, double const wgt/*=1.0*/ ) SEG_VECGPU__STORE(float,float); SEG_VECGPU__STORE(float,double); SEG_VECGPU__STORE(double,float); SEG_VECGPU__STORE(double,double); #undef SEG_VECGPU__STORE // You may need to add more if you use fancier iterators, for example. // OR you might include the template definitions of segVecGpu.cuh into // and instantiate in your own code (using same cuda nvcc version) }//user:: namespace detail { template class SegImpl<seg::VecGpu<double>>; template class SegImpl<seg::VecGpu<float>>; #define SEGIMPL__STORE(T,S) \ template void SegImpl<seg::VecGpu<T>>::store<S const*> \ (S const*,uint32_t const,uint32_t const,double const); SEGIMPL__STORE(float,float); SEGIMPL__STORE(float,double); SEGIMPL__STORE(double,float); SEGIMPL__STORE(double,double); #undef SEGIMPL__STORE }//detail:: }//dStorm:: #endif
7ff5d0cae38144d6c766272ae97b9741bc59e3d0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common/ConfigParser.h" #include "common/Constants.h" #include "common/device_intrinsics.h" #include "core/warp_solver/solver_encode.h" #include "core/warp_solver/solver_constants.h" #include "core/warp_solver/geometry_icp_jacobian.cuh" #include "core/warp_solver/JtJMaterializer.h" #include "core/warp_solver/jtj_block_jacobian.cuh" #include <device_launch_parameters.h> namespace surfelwarp { namespace device { enum { jtj_blk_size = 36, warp_size = 32, num_warps = 4, thread_blk_size = num_warps * warp_size, }; __device__ __forceinline__ void computeScalarJtJBlock( const float jacobian[6], float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian[0] * jacobian[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian[1] * jacobian[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian[2] * jacobian[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian[3] * jacobian[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian[4] * jacobian[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian[5] * jacobian[jac_row]; } } __device__ __forceinline__ void computeSmoothJtJBlock( const NodeGraphSmoothTerm2Jacobian& term2jacobian, unsigned typed_term, unsigned encoded_pair, float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { //Check the validity of this term const auto validity = term2jacobian.validity_indicator[typed_term]; if(validity == 0) { #pragma unroll for (auto i = 0; i < jtj_blk_size; i++) { jtj_blk[i] = 0.0f; } return; } const ushort2 node_ij = term2jacobian.node_graph[typed_term]; unsigned node_i, node_j; decode_nodepair(encoded_pair, node_i, node_j); //Explicit compute jacobian const float3 r = term2jacobian.Ti_xj[typed_term]; const float3 s = term2jacobian.Tj_xj[typed_term]; TwistGradientOfScalarCost twist_gradient_i, twist_gradient_j; //The order of two terms const float* jacobian_encoded_i; const float* jacobian_encoded_j; if(node_i == node_ij.x) { jacobian_encoded_i = (const float*)(&twist_gradient_i); jacobian_encoded_j = (const float*)(&twist_gradient_j); } else { jacobian_encoded_i = (const float*)(&twist_gradient_j); jacobian_encoded_j = (const float*)(&twist_gradient_i); } //The first iteration assign { twist_gradient_i.rotation = make_float3(0.0f, r.z, -r.y); twist_gradient_i.translation = make_float3(1.0f, 0.0f, 0.0f); twist_gradient_j.rotation = make_float3(0.0f, -s.z, s.y); twist_gradient_j.translation = make_float3(-1.0f, 0.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } //The next two iterations, plus { twist_gradient_i.rotation = make_float3(-r.z, 0.0f, r.x); twist_gradient_i.translation = make_float3(0.0f, 1.0f, 0.0f); twist_gradient_j.rotation = make_float3(s.z, 0.0f, -s.x); twist_gradient_j.translation = make_float3( 0.0f, -1.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } { twist_gradient_i.rotation = make_float3(r.y, -r.x, 0.0f); twist_gradient_i.translation = make_float3(0.0f, 0.0f, 1.0f); twist_gradient_j.rotation = make_float3(-s.y, s.x, 0.0f); twist_gradient_j.translation = make_float3(0.0f, 0.0f, -1.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } } __device__ __forceinline__ void computeChannelledJtJBlock( const float jacobian_channelled[18], float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { //The first iteration: assign const float* jacobian = jacobian_channelled; #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian[0] * jacobian[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian[1] * jacobian[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian[2] * jacobian[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian[3] * jacobian[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian[4] * jacobian[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian[5] * jacobian[jac_row]; } //The next 2 iterations: plus for(auto channel = 1; channel < 3; channel++) { jacobian = &(jacobian_channelled[channel * 6]); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian[0] * jacobian[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian[1] * jacobian[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian[2] * jacobian[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian[3] * jacobian[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian[4] * jacobian[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian[5] * jacobian[jac_row]; } } } //The deprecated method __device__ __forceinline__ void computeSmoothJtJBlockOnline( const NodeGraphSmoothTerm2Jacobian& term2jacobian, unsigned typed_term, unsigned encoded_pair, float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { const ushort2 node_ij = term2jacobian.node_graph[typed_term]; const auto xj4 = term2jacobian.reference_node_coords[node_ij.y]; DualQuaternion dq_i = term2jacobian.node_se3[node_ij.x]; DualQuaternion dq_j = term2jacobian.node_se3[node_ij.y]; const mat34 Ti = dq_i.se3_matrix(); const mat34 Tj = dq_j.se3_matrix(); unsigned node_i, node_j; decode_nodepair(encoded_pair, node_i, node_j); //Explicit compute jacobian const float3 xj = make_float3(xj4.x, xj4.y, xj4.z); const float3 r = Ti.rot * xj + Ti.trans; const float3 s = Tj.rot * xj + Tj.trans; TwistGradientOfScalarCost twist_gradient_i, twist_gradient_j; //The order of two terms const float* jacobian_encoded_i; const float* jacobian_encoded_j; if(node_i == node_ij.x) { jacobian_encoded_i = (const float*)(&twist_gradient_i); jacobian_encoded_j = (const float*)(&twist_gradient_j); } else { jacobian_encoded_i = (const float*)(&twist_gradient_j); jacobian_encoded_j = (const float*)(&twist_gradient_i); } //The first iteration assign { twist_gradient_i.rotation = make_float3(0.0f, r.z, -r.y); twist_gradient_i.translation = make_float3(1.0f, 0.0f, 0.0f); twist_gradient_j.rotation = make_float3(0.0f, -s.z, s.y); twist_gradient_j.translation = make_float3(-1.0f, 0.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } //The next two iterations, plus { twist_gradient_i.rotation = make_float3(-r.z, 0.0f, r.x); twist_gradient_i.translation = make_float3(0.0f, 1.0f, 0.0f); twist_gradient_j.rotation = make_float3(s.z, 0.0f, -s.x); twist_gradient_j.translation = make_float3( 0.0f, -1.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } { twist_gradient_i.rotation = make_float3(r.y, -r.x, 0.0f); twist_gradient_i.translation = make_float3(0.0f, 0.0f, 1.0f); twist_gradient_j.rotation = make_float3(-s.y, s.x, 0.0f); twist_gradient_j.translation = make_float3(0.0f, 0.0f, -1.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } } __global__ void computeJtJNonDiagonalBlockNoSyncKernel( const NodePair2TermsIndex::NodePair2TermMap nodepair2term, const Term2JacobianMaps term2jacobian, float* jtj_blks, const PenaltyConstants constants = PenaltyConstants() ) { const auto nodepair_idx = blockIdx.x; const auto encoded_pair = nodepair2term.encoded_nodepair[nodepair_idx]; const auto term_begin = nodepair2term.nodepair_term_range[nodepair_idx].x; const auto term_end = nodepair2term.nodepair_term_range[nodepair_idx].y; const auto term_size = term_end - term_begin; const auto padded_term_size = thread_blk_size * ((term_size + thread_blk_size - 1) / thread_blk_size); const auto warp_id = threadIdx.x >> 5; const auto lane_id = threadIdx.x & 31; //The shared memory for reduction __shared__ float shared_blks[jtj_blk_size][num_warps]; //Zero out the elements for(auto iter = threadIdx.x; iter < jtj_blk_size * num_warps; iter += thread_blk_size) { shared_blks[iter % jtj_blk_size][iter / jtj_blk_size] = 0.0f; } // __syncthreads(); for (auto iter = threadIdx.x; iter < padded_term_size; iter += thread_blk_size) { //The global term index bool term_valid = true; //The memory for store the JtResidual result of each threads float local_blks[jtj_blk_size]; if(iter < term_size) { const auto term_idx = nodepair2term.nodepair_term_index[term_begin + iter]; unsigned typed_term_idx; TermType term_type; query_typed_index(term_idx, nodepair2term.term_offset, term_type, typed_term_idx); switch (term_type) { case TermType::DenseImage: { float term_jacobian[6] = {0}; float nodepair_weight = 0; computeScalarJtJBlockJacobian(term2jacobian.dense_depth_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeScalarJtJBlock(term_jacobian, local_blks, constants.DenseDepthSquared() * nodepair_weight); } break; case TermType::Smooth: computeSmoothJtJBlock(term2jacobian.smooth_term, typed_term_idx, encoded_pair, local_blks, constants.SmoothSquared()); break; /*case TermType::DensityMap: { float term_jacobian[6] = {0}; float nodepair_weight = 0; computeScalarJtJBlockJacobian(term2jacobian.density_map_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeScalarJtJBlock(term_jacobian, local_blks, constants.DensitySquared() * nodepair_weight); } break;*/ case TermType::Foreground: { float term_jacobian[6] = {0}; float nodepair_weight = 0; computeScalarJtJBlockJacobian(term2jacobian.foreground_mask_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeScalarJtJBlock(term_jacobian, local_blks, constants.ForegroundSquared() * nodepair_weight); } break; case TermType::Feature: { float term_jacobian[18] = {0}; float nodepair_weight = 0; computeFeatureJtJBlockJacobian(term2jacobian.sparse_feature_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeChannelledJtJBlock(term_jacobian, local_blks, constants.SparseFeatureSquared() * nodepair_weight); } break; default: term_valid = false; break; } } //__syncthreads(); //Do a reduction for (int i = 0; i < jtj_blk_size; i++) { float data = (iter < term_size && term_valid) ? local_blks[i] : 0.0f; data = warp_scan(data); if (lane_id == warpSize - 1) { shared_blks[i][warp_id] += data; } } } __syncthreads(); //Write to output for(auto iter = threadIdx.x; iter < jtj_blk_size; iter += thread_blk_size) jtj_blks[jtj_blk_size * nodepair_idx + iter] = (shared_blks[iter][0] + shared_blks[iter][1] + shared_blks[iter][2] + shared_blks[iter][3]); } } // namespace device } // namespace surfelwarp void surfelwarp::JtJMaterializer::computeNonDiagonalBlocksNoSync(hipStream_t stream) { //Correct the size of node pairs const auto num_nodepairs = m_nodepair2term_map.encoded_nodepair.Size(); SURFELWARP_CHECK_EQ(num_nodepairs, m_nodepair2term_map.nodepair_term_range.Size()); m_nondiag_blks.ResizeArrayOrException(num_nodepairs * device::jtj_blk_size); //Invoke the kernel dim3 blk(device::thread_blk_size); dim3 grid(num_nodepairs); hipLaunchKernelGGL(( device::computeJtJNonDiagonalBlockNoSyncKernel), dim3(grid), dim3(blk), 0, stream, m_nodepair2term_map, m_term2jacobian_map, m_nondiag_blks.Ptr(), m_penalty_constants ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(hipStreamSynchronize(stream)); cudaSafeCall(hipGetLastError()); #endif //Do a sanity check //nonDiagonalBlocksSanityCheck(); }
7ff5d0cae38144d6c766272ae97b9741bc59e3d0.cu
#include "common/ConfigParser.h" #include "common/Constants.h" #include "common/device_intrinsics.h" #include "core/warp_solver/solver_encode.h" #include "core/warp_solver/solver_constants.h" #include "core/warp_solver/geometry_icp_jacobian.cuh" #include "core/warp_solver/JtJMaterializer.h" #include "core/warp_solver/jtj_block_jacobian.cuh" #include <device_launch_parameters.h> namespace surfelwarp { namespace device { enum { jtj_blk_size = 36, warp_size = 32, num_warps = 4, thread_blk_size = num_warps * warp_size, }; __device__ __forceinline__ void computeScalarJtJBlock( const float jacobian[6], float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian[0] * jacobian[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian[1] * jacobian[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian[2] * jacobian[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian[3] * jacobian[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian[4] * jacobian[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian[5] * jacobian[jac_row]; } } __device__ __forceinline__ void computeSmoothJtJBlock( const NodeGraphSmoothTerm2Jacobian& term2jacobian, unsigned typed_term, unsigned encoded_pair, float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { //Check the validity of this term const auto validity = term2jacobian.validity_indicator[typed_term]; if(validity == 0) { #pragma unroll for (auto i = 0; i < jtj_blk_size; i++) { jtj_blk[i] = 0.0f; } return; } const ushort2 node_ij = term2jacobian.node_graph[typed_term]; unsigned node_i, node_j; decode_nodepair(encoded_pair, node_i, node_j); //Explicit compute jacobian const float3 r = term2jacobian.Ti_xj[typed_term]; const float3 s = term2jacobian.Tj_xj[typed_term]; TwistGradientOfScalarCost twist_gradient_i, twist_gradient_j; //The order of two terms const float* jacobian_encoded_i; const float* jacobian_encoded_j; if(node_i == node_ij.x) { jacobian_encoded_i = (const float*)(&twist_gradient_i); jacobian_encoded_j = (const float*)(&twist_gradient_j); } else { jacobian_encoded_i = (const float*)(&twist_gradient_j); jacobian_encoded_j = (const float*)(&twist_gradient_i); } //The first iteration assign { twist_gradient_i.rotation = make_float3(0.0f, r.z, -r.y); twist_gradient_i.translation = make_float3(1.0f, 0.0f, 0.0f); twist_gradient_j.rotation = make_float3(0.0f, -s.z, s.y); twist_gradient_j.translation = make_float3(-1.0f, 0.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } //The next two iterations, plus { twist_gradient_i.rotation = make_float3(-r.z, 0.0f, r.x); twist_gradient_i.translation = make_float3(0.0f, 1.0f, 0.0f); twist_gradient_j.rotation = make_float3(s.z, 0.0f, -s.x); twist_gradient_j.translation = make_float3( 0.0f, -1.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } { twist_gradient_i.rotation = make_float3(r.y, -r.x, 0.0f); twist_gradient_i.translation = make_float3(0.0f, 0.0f, 1.0f); twist_gradient_j.rotation = make_float3(-s.y, s.x, 0.0f); twist_gradient_j.translation = make_float3(0.0f, 0.0f, -1.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } } __device__ __forceinline__ void computeChannelledJtJBlock( const float jacobian_channelled[18], float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { //The first iteration: assign const float* jacobian = jacobian_channelled; #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian[0] * jacobian[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian[1] * jacobian[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian[2] * jacobian[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian[3] * jacobian[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian[4] * jacobian[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian[5] * jacobian[jac_row]; } //The next 2 iterations: plus for(auto channel = 1; channel < 3; channel++) { jacobian = &(jacobian_channelled[channel * 6]); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian[0] * jacobian[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian[1] * jacobian[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian[2] * jacobian[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian[3] * jacobian[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian[4] * jacobian[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian[5] * jacobian[jac_row]; } } } //The deprecated method __device__ __forceinline__ void computeSmoothJtJBlockOnline( const NodeGraphSmoothTerm2Jacobian& term2jacobian, unsigned typed_term, unsigned encoded_pair, float jtj_blk[jtj_blk_size], const float weight_square = 1.0f ) { const ushort2 node_ij = term2jacobian.node_graph[typed_term]; const auto xj4 = term2jacobian.reference_node_coords[node_ij.y]; DualQuaternion dq_i = term2jacobian.node_se3[node_ij.x]; DualQuaternion dq_j = term2jacobian.node_se3[node_ij.y]; const mat34 Ti = dq_i.se3_matrix(); const mat34 Tj = dq_j.se3_matrix(); unsigned node_i, node_j; decode_nodepair(encoded_pair, node_i, node_j); //Explicit compute jacobian const float3 xj = make_float3(xj4.x, xj4.y, xj4.z); const float3 r = Ti.rot * xj + Ti.trans; const float3 s = Tj.rot * xj + Tj.trans; TwistGradientOfScalarCost twist_gradient_i, twist_gradient_j; //The order of two terms const float* jacobian_encoded_i; const float* jacobian_encoded_j; if(node_i == node_ij.x) { jacobian_encoded_i = (const float*)(&twist_gradient_i); jacobian_encoded_j = (const float*)(&twist_gradient_j); } else { jacobian_encoded_i = (const float*)(&twist_gradient_j); jacobian_encoded_j = (const float*)(&twist_gradient_i); } //The first iteration assign { twist_gradient_i.rotation = make_float3(0.0f, r.z, -r.y); twist_gradient_i.translation = make_float3(1.0f, 0.0f, 0.0f); twist_gradient_j.rotation = make_float3(0.0f, -s.z, s.y); twist_gradient_j.translation = make_float3(-1.0f, 0.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] = weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] = weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] = weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] = weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] = weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] = weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } //The next two iterations, plus { twist_gradient_i.rotation = make_float3(-r.z, 0.0f, r.x); twist_gradient_i.translation = make_float3(0.0f, 1.0f, 0.0f); twist_gradient_j.rotation = make_float3(s.z, 0.0f, -s.x); twist_gradient_j.translation = make_float3( 0.0f, -1.0f, 0.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } { twist_gradient_i.rotation = make_float3(r.y, -r.x, 0.0f); twist_gradient_i.translation = make_float3(0.0f, 0.0f, 1.0f); twist_gradient_j.rotation = make_float3(-s.y, s.x, 0.0f); twist_gradient_j.translation = make_float3(0.0f, 0.0f, -1.0f); #pragma unroll for (int jac_row = 0; jac_row < 6; jac_row++) { jtj_blk[6 * jac_row + 0] += weight_square * jacobian_encoded_i[0] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 1] += weight_square * jacobian_encoded_i[1] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 2] += weight_square * jacobian_encoded_i[2] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 3] += weight_square * jacobian_encoded_i[3] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 4] += weight_square * jacobian_encoded_i[4] * jacobian_encoded_j[jac_row]; jtj_blk[6 * jac_row + 5] += weight_square * jacobian_encoded_i[5] * jacobian_encoded_j[jac_row]; } } } __global__ void computeJtJNonDiagonalBlockNoSyncKernel( const NodePair2TermsIndex::NodePair2TermMap nodepair2term, const Term2JacobianMaps term2jacobian, float* jtj_blks, const PenaltyConstants constants = PenaltyConstants() ) { const auto nodepair_idx = blockIdx.x; const auto encoded_pair = nodepair2term.encoded_nodepair[nodepair_idx]; const auto term_begin = nodepair2term.nodepair_term_range[nodepair_idx].x; const auto term_end = nodepair2term.nodepair_term_range[nodepair_idx].y; const auto term_size = term_end - term_begin; const auto padded_term_size = thread_blk_size * ((term_size + thread_blk_size - 1) / thread_blk_size); const auto warp_id = threadIdx.x >> 5; const auto lane_id = threadIdx.x & 31; //The shared memory for reduction __shared__ float shared_blks[jtj_blk_size][num_warps]; //Zero out the elements for(auto iter = threadIdx.x; iter < jtj_blk_size * num_warps; iter += thread_blk_size) { shared_blks[iter % jtj_blk_size][iter / jtj_blk_size] = 0.0f; } // __syncthreads(); for (auto iter = threadIdx.x; iter < padded_term_size; iter += thread_blk_size) { //The global term index bool term_valid = true; //The memory for store the JtResidual result of each threads float local_blks[jtj_blk_size]; if(iter < term_size) { const auto term_idx = nodepair2term.nodepair_term_index[term_begin + iter]; unsigned typed_term_idx; TermType term_type; query_typed_index(term_idx, nodepair2term.term_offset, term_type, typed_term_idx); switch (term_type) { case TermType::DenseImage: { float term_jacobian[6] = {0}; float nodepair_weight = 0; computeScalarJtJBlockJacobian(term2jacobian.dense_depth_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeScalarJtJBlock(term_jacobian, local_blks, constants.DenseDepthSquared() * nodepair_weight); } break; case TermType::Smooth: computeSmoothJtJBlock(term2jacobian.smooth_term, typed_term_idx, encoded_pair, local_blks, constants.SmoothSquared()); break; /*case TermType::DensityMap: { float term_jacobian[6] = {0}; float nodepair_weight = 0; computeScalarJtJBlockJacobian(term2jacobian.density_map_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeScalarJtJBlock(term_jacobian, local_blks, constants.DensitySquared() * nodepair_weight); } break;*/ case TermType::Foreground: { float term_jacobian[6] = {0}; float nodepair_weight = 0; computeScalarJtJBlockJacobian(term2jacobian.foreground_mask_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeScalarJtJBlock(term_jacobian, local_blks, constants.ForegroundSquared() * nodepair_weight); } break; case TermType::Feature: { float term_jacobian[18] = {0}; float nodepair_weight = 0; computeFeatureJtJBlockJacobian(term2jacobian.sparse_feature_term, encoded_pair, typed_term_idx, term_jacobian, &nodepair_weight); computeChannelledJtJBlock(term_jacobian, local_blks, constants.SparseFeatureSquared() * nodepair_weight); } break; default: term_valid = false; break; } } //__syncthreads(); //Do a reduction for (int i = 0; i < jtj_blk_size; i++) { float data = (iter < term_size && term_valid) ? local_blks[i] : 0.0f; data = warp_scan(data); if (lane_id == warpSize - 1) { shared_blks[i][warp_id] += data; } } } __syncthreads(); //Write to output for(auto iter = threadIdx.x; iter < jtj_blk_size; iter += thread_blk_size) jtj_blks[jtj_blk_size * nodepair_idx + iter] = (shared_blks[iter][0] + shared_blks[iter][1] + shared_blks[iter][2] + shared_blks[iter][3]); } } // namespace device } // namespace surfelwarp void surfelwarp::JtJMaterializer::computeNonDiagonalBlocksNoSync(cudaStream_t stream) { //Correct the size of node pairs const auto num_nodepairs = m_nodepair2term_map.encoded_nodepair.Size(); SURFELWARP_CHECK_EQ(num_nodepairs, m_nodepair2term_map.nodepair_term_range.Size()); m_nondiag_blks.ResizeArrayOrException(num_nodepairs * device::jtj_blk_size); //Invoke the kernel dim3 blk(device::thread_blk_size); dim3 grid(num_nodepairs); device::computeJtJNonDiagonalBlockNoSyncKernel<<<grid, blk, 0, stream>>>( m_nodepair2term_map, m_term2jacobian_map, m_nondiag_blks.Ptr(), m_penalty_constants ); //Sync and check error #if defined(CUDA_DEBUG_SYNC_CHECK) cudaSafeCall(cudaStreamSynchronize(stream)); cudaSafeCall(cudaGetLastError()); #endif //Do a sanity check //nonDiagonalBlocksSanityCheck(); }
209a2c1afef49a66ee10948fc1771617fa8a84fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/hip/detail/TensorInfo.cuh> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THH/THHAtomics.cuh> #include <THH/THHNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { using namespace at::cuda::detail; namespace { template <typename scalar_t, typename accscalar_t> __device__ inline int64_t get_intervals( accscalar_t sample, int64_t index, int64_t inputSize, int64_t outputSize, int64_t poolSize) { accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) / static_cast<accscalar_t>(outputSize - 1); if (index == outputSize - 1) { return inputSize - poolSize; } else { return static_cast<int64_t>((index + sample) * alpha) - \ static_cast<int64_t>(sample * alpha); } } template <typename scalar_t> __global__ void fractional_max_pool3d_out_frame( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, PackedTensorAccessor64<int64_t, 5> indices, PackedTensorAccessor64<scalar_t, 3> samples, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW) { using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; // Output (t, h, w) point that this thread is responsible for int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int64_t plane = blockIdx.y; int64_t batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < output.size(2) * output.size(3) * output.size(4)){ int64_t outputT = ourOutputPoint / (output.size(3) * output.size(4)); int64_t outputH = (ourOutputPoint / output.size(4)) % output.size(3); int64_t outputW = ourOutputPoint % output.size(4); int64_t poolT = get_intervals<scalar_t,accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][0]), outputT, input.size(2), output.size(2), poolSizeT); int64_t poolH = get_intervals<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][1]), outputH, input.size(3), output.size(3), poolSizeH); int64_t poolW = get_intervals<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][2]), outputW, input.size(4), output.size(4), poolSizeW); scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound(); int64_t maxIndex = poolT * input.size(3) * input.size(4) + poolH * input.size(4) + poolW; for(int64_t t = poolT; t < poolT + poolSizeT; ++ t) { for (int64_t h = poolH; h < poolH + poolSizeH; ++h) { if(poolSizeW < 2 || poolSizeW > 7) { for (int64_t w = poolW; w < poolW + poolSizeW; ++w) { scalar_t val = input[batch][plane][t][h][w]; // for consistency with THNN, favor the first max if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) { maxIndex = t * input.size(3) * input.size(4) + h * input.size(4) + w; maxVal = val; } } } else { for (int64_t i = 0; i < poolSizeW; ++i) { int64_t w = i + poolW; scalar_t val = input[batch][plane][t][h][w]; // for consistency with THNN, favor the first max if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) { maxIndex = t * input.size(3) * input.size(4) + h * input.size(4) + w; maxVal = val; } } } } } indices[batch][plane][outputT][outputH][outputW] = maxIndex; output[batch][plane][outputT][outputH][outputW] = maxVal; } } template <typename scalar_t> __global__ void fractional_max_pool3d_backward_out_frame( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, PackedTensorAccessor64<int64_t, 5> indices) { // Output (h, w) point that this thread is responsible for int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int64_t plane = blockIdx.y; int64_t batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4)) { int64_t outputW = ourOutputPoint % gradOutput.size(4); int64_t outputH = (ourOutputPoint / gradOutput.size(4)) % gradOutput.size(3); int64_t outputT = ourOutputPoint / (gradOutput.size(3) * gradOutput.size(4)); int64_t index = indices[batch][plane][outputT][outputH][outputW]; assert(index >= 0); int64_t inputW = index % gradInput.size(4); int64_t inputH = (index / gradInput.size(4)) % gradInput.size(3); int64_t inputT = index / (gradInput.size(3) * gradInput.size(4)); assert(inputT < gradInput.size(2)); gpuAtomicAdd( &gradInput[batch][plane][inputT][inputH][inputW], gradOutput[batch][plane][outputT][outputH][outputW] ); } } void fractional_max_pool3d_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const Tensor& randomSamples) { int64_t planeDim = 0; int64_t dimt = 1; int64_t dimh = 2; int64_t dimw = 3; int64_t numBatch = 1; int64_t outputT = output_size[0]; int64_t outputH = output_size[1]; int64_t outputW = output_size[2]; int64_t poolSizeT = pool_size[0]; int64_t poolSizeH = pool_size[1]; int64_t poolSizeW = pool_size[2]; int64_t ndims = input.ndimension(); TORCH_CHECK( input.numel() != 0 && (ndims == 4 || ndims == 5), "fractional_max_pool3d_out_cuda_template(): ", "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", ndims); if (ndims == 5) { numBatch = input.size(0); planeDim++; dimt++; dimh++; dimw++; } /* sizes */ int64_t numPlanes = input.size(planeDim); int64_t inputT = input.size(dimt); int64_t inputH = input.size(dimh); int64_t inputW = input.size(dimw); TORCH_CHECK( outputT + poolSizeT - 1 < inputT, "fractional_max_pool3d_out_cuda_template(): ", "pool time (", poolSizeT, ") too large relative to input time (", inputT, ")"); TORCH_CHECK( outputH + poolSizeH - 1 < inputH, "fractional_max_pool3d_out_cuda_template(): ", "pool height (", poolSizeH, ") too large relative to input height (", inputH, ")"); TORCH_CHECK( outputW + poolSizeW - 1 < inputW, "fractional_max_pool3d_out_cuda_template(): ", "pool width (", poolSizeW, ") too large relative to input width (", inputW, ")"); if (ndims == 4) { /* resize output */ output.resize_({numPlanes, outputT, outputH, outputW}); /* indices will contain the locations for each output point */ indices.resize_({numPlanes, outputT, outputH, outputW}); } else { /* resize output */ output.resize_({numBatch, numPlanes, outputT, outputH, outputW}); /* indices will contain the locations for each output point */ indices.resize_({numBatch, numPlanes, outputT, outputH, outputW}); } auto output_ = output; auto indices_ = indices; auto input_ = input; if(ndims == 4) { output_ = output_.reshape({1, numPlanes, outputT, outputH, outputW}); indices_ = indices_.reshape({1, numPlanes, outputT, outputH, outputW}); input_ = input_.reshape({1, numPlanes, inputT, inputH, inputW}); } // block is limited to 4 warps // grid handles overflow per each plane int64_t outputPlaneSize = output_.size(2) * output_.size(3) * output_.size(4); dim3 grid( (outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) input_.size(1), input_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "fractional_max_pool3d_out_frame", [&]{ hipLaunchKernelGGL(( fractional_max_pool3d_out_frame<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), input_.packed_accessor64<scalar_t, 5>(), output_.packed_accessor64<scalar_t, 5>(), indices_.packed_accessor64<int64_t, 5>(), randomSamples.packed_accessor64<scalar_t, 3>(), poolSizeT, poolSizeH, poolSizeW ); C10_HIP_KERNEL_LAUNCH_CHECK(); } ); } void fractional_max_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef pool_size /* unused */, IntArrayRef output_size, const Tensor& indices) { int64_t dimt = 1; int64_t dimh = 2; int64_t dimw = 3; int64_t outputT = output_size[0]; int64_t outputH = output_size[1]; int64_t outputW = output_size[2]; int64_t ndims = input.ndimension(); if (ndims == 5) { dimt++; dimh++; dimw++; } /* sizes */ int64_t inputT = input.size(dimt); int64_t inputH = input.size(dimh); int64_t inputW = input.size(dimw); TORCH_CHECK( outputT == gradOutput.size(dimt), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput time unexpected" ); TORCH_CHECK( outputH == gradOutput.size(dimh), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput height unexpected" ); TORCH_CHECK( outputW == gradOutput.size(dimw), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput width unexpected" ); /* resize */ gradInput.resize_as_(input); gradInput.zero_(); auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; auto indices_ = indices; if(ndims == 4) { gradInput_ = gradInput_.reshape({1, gradInput.size(0), inputT, inputH, inputW}); gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputT, outputH, outputW}); indices_ = indices_.reshape({1, indices.size(0), outputT, outputH, outputW}); } /* backprop */ // block is limited to 4 warps // grid handles overflow per each plane int64_t outputPlaneSize = gradOutput_.size(2) * gradOutput_.size(3) * gradOutput_.size(4); dim3 grid( (outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) gradInput_.size(1), gradInput_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); AT_DISPATCH_FLOATING_TYPES_AND_HALF( gradOutput.scalar_type(), "fractional_max_pool3d_backward_out_frame", [&] { hipLaunchKernelGGL(( fractional_max_pool3d_backward_out_frame<scalar_t>) , dim3(grid), dim3(block), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), gradInput_.packed_accessor64<scalar_t, 5>(), gradOutput_.packed_accessor64<scalar_t, 5>(), indices_.packed_accessor64<int64_t, 5>() ); C10_HIP_KERNEL_LAUNCH_CHECK(); } ); } }// namespace std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cuda( at::Tensor& output, at::Tensor& indices, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { fractional_max_pool3d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples ); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> fractional_max_pool3d_cuda( const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); fractional_max_pool3d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples ); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& fractional_max_pool3d_backward_out_cuda( at::Tensor& gradInput, const at::Tensor& gradOutput_, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("fractional_max_pool3d_backward_out_cuda"); fractional_max_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, pool_size, output_size, indices ); return gradInput; } Tensor fractional_max_pool3d_backward_cuda( const at::Tensor& gradOutput, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("fractional_max_pool3d_backward_cuda"); Tensor gradInput = at::empty({0}, input.options()); fractional_max_pool3d_backward_out_cuda_template( gradInput, gradOutput, input, pool_size, output_size, indices ); return gradInput; } }// native }// at
209a2c1afef49a66ee10948fc1771617fa8a84fb.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/cuda/detail/TensorInfo.cuh> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <c10/util/Exception.h> #include <THC/THCAtomics.cuh> #include <THC/THCNumerics.cuh> #include <algorithm> #include <cfloat> #include <cmath> namespace at { namespace native { using namespace at::cuda::detail; namespace { template <typename scalar_t, typename accscalar_t> __device__ inline int64_t get_intervals( accscalar_t sample, int64_t index, int64_t inputSize, int64_t outputSize, int64_t poolSize) { accscalar_t alpha = static_cast<accscalar_t>(inputSize - poolSize) / static_cast<accscalar_t>(outputSize - 1); if (index == outputSize - 1) { return inputSize - poolSize; } else { return static_cast<int64_t>((index + sample) * alpha) - \ static_cast<int64_t>(sample * alpha); } } template <typename scalar_t> __global__ void fractional_max_pool3d_out_frame( PackedTensorAccessor64<scalar_t, 5> input, PackedTensorAccessor64<scalar_t, 5> output, PackedTensorAccessor64<int64_t, 5> indices, PackedTensorAccessor64<scalar_t, 3> samples, int64_t poolSizeT, int64_t poolSizeH, int64_t poolSizeW) { using accscalar_t = at::acc_type<scalar_t, /*is_cuda=*/true>; // Output (t, h, w) point that this thread is responsible for int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int64_t plane = blockIdx.y; int64_t batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < output.size(2) * output.size(3) * output.size(4)){ int64_t outputT = ourOutputPoint / (output.size(3) * output.size(4)); int64_t outputH = (ourOutputPoint / output.size(4)) % output.size(3); int64_t outputW = ourOutputPoint % output.size(4); int64_t poolT = get_intervals<scalar_t,accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][0]), outputT, input.size(2), output.size(2), poolSizeT); int64_t poolH = get_intervals<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][1]), outputH, input.size(3), output.size(3), poolSizeH); int64_t poolW = get_intervals<scalar_t, accscalar_t>( static_cast<accscalar_t>(samples[batch][plane][2]), outputW, input.size(4), output.size(4), poolSizeW); scalar_t maxVal = at::numeric_limits<scalar_t>::lower_bound(); int64_t maxIndex = poolT * input.size(3) * input.size(4) + poolH * input.size(4) + poolW; for(int64_t t = poolT; t < poolT + poolSizeT; ++ t) { for (int64_t h = poolH; h < poolH + poolSizeH; ++h) { if(poolSizeW < 2 || poolSizeW > 7) { for (int64_t w = poolW; w < poolW + poolSizeW; ++w) { scalar_t val = input[batch][plane][t][h][w]; // for consistency with THNN, favor the first max if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) { maxIndex = t * input.size(3) * input.size(4) + h * input.size(4) + w; maxVal = val; } } } else { for (int64_t i = 0; i < poolSizeW; ++i) { int64_t w = i + poolW; scalar_t val = input[batch][plane][t][h][w]; // for consistency with THNN, favor the first max if (val > maxVal || THCNumerics<scalar_t>::isnan(val)) { maxIndex = t * input.size(3) * input.size(4) + h * input.size(4) + w; maxVal = val; } } } } } indices[batch][plane][outputT][outputH][outputW] = maxIndex; output[batch][plane][outputT][outputH][outputW] = maxVal; } } template <typename scalar_t> __global__ void fractional_max_pool3d_backward_out_frame( PackedTensorAccessor64<scalar_t, 5> gradInput, PackedTensorAccessor64<scalar_t, 5> gradOutput, PackedTensorAccessor64<int64_t, 5> indices) { // Output (h, w) point that this thread is responsible for int64_t ourOutputPoint = threadIdx.x + blockIdx.x * blockDim.x; int64_t plane = blockIdx.y; int64_t batch = blockIdx.z; // Each thread generates a specific output point if (ourOutputPoint < gradOutput.size(2) * gradOutput.size(3) * gradOutput.size(4)) { int64_t outputW = ourOutputPoint % gradOutput.size(4); int64_t outputH = (ourOutputPoint / gradOutput.size(4)) % gradOutput.size(3); int64_t outputT = ourOutputPoint / (gradOutput.size(3) * gradOutput.size(4)); int64_t index = indices[batch][plane][outputT][outputH][outputW]; assert(index >= 0); int64_t inputW = index % gradInput.size(4); int64_t inputH = (index / gradInput.size(4)) % gradInput.size(3); int64_t inputT = index / (gradInput.size(3) * gradInput.size(4)); assert(inputT < gradInput.size(2)); gpuAtomicAdd( &gradInput[batch][plane][inputT][inputH][inputW], gradOutput[batch][plane][outputT][outputH][outputW] ); } } void fractional_max_pool3d_out_cuda_template( Tensor& output, Tensor& indices, const Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const Tensor& randomSamples) { int64_t planeDim = 0; int64_t dimt = 1; int64_t dimh = 2; int64_t dimw = 3; int64_t numBatch = 1; int64_t outputT = output_size[0]; int64_t outputH = output_size[1]; int64_t outputW = output_size[2]; int64_t poolSizeT = pool_size[0]; int64_t poolSizeH = pool_size[1]; int64_t poolSizeW = pool_size[2]; int64_t ndims = input.ndimension(); TORCH_CHECK( input.numel() != 0 && (ndims == 4 || ndims == 5), "fractional_max_pool3d_out_cuda_template(): ", "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", ndims); if (ndims == 5) { numBatch = input.size(0); planeDim++; dimt++; dimh++; dimw++; } /* sizes */ int64_t numPlanes = input.size(planeDim); int64_t inputT = input.size(dimt); int64_t inputH = input.size(dimh); int64_t inputW = input.size(dimw); TORCH_CHECK( outputT + poolSizeT - 1 < inputT, "fractional_max_pool3d_out_cuda_template(): ", "pool time (", poolSizeT, ") too large relative to input time (", inputT, ")"); TORCH_CHECK( outputH + poolSizeH - 1 < inputH, "fractional_max_pool3d_out_cuda_template(): ", "pool height (", poolSizeH, ") too large relative to input height (", inputH, ")"); TORCH_CHECK( outputW + poolSizeW - 1 < inputW, "fractional_max_pool3d_out_cuda_template(): ", "pool width (", poolSizeW, ") too large relative to input width (", inputW, ")"); if (ndims == 4) { /* resize output */ output.resize_({numPlanes, outputT, outputH, outputW}); /* indices will contain the locations for each output point */ indices.resize_({numPlanes, outputT, outputH, outputW}); } else { /* resize output */ output.resize_({numBatch, numPlanes, outputT, outputH, outputW}); /* indices will contain the locations for each output point */ indices.resize_({numBatch, numPlanes, outputT, outputH, outputW}); } auto output_ = output; auto indices_ = indices; auto input_ = input; if(ndims == 4) { output_ = output_.reshape({1, numPlanes, outputT, outputH, outputW}); indices_ = indices_.reshape({1, numPlanes, outputT, outputH, outputW}); input_ = input_.reshape({1, numPlanes, inputT, inputH, inputW}); } // block is limited to 4 warps // grid handles overflow per each plane int64_t outputPlaneSize = output_.size(2) * output_.size(3) * output_.size(4); dim3 grid( (outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) input_.size(1), input_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "fractional_max_pool3d_out_frame", [&]{ fractional_max_pool3d_out_frame<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( input_.packed_accessor64<scalar_t, 5>(), output_.packed_accessor64<scalar_t, 5>(), indices_.packed_accessor64<int64_t, 5>(), randomSamples.packed_accessor64<scalar_t, 3>(), poolSizeT, poolSizeH, poolSizeW ); C10_CUDA_KERNEL_LAUNCH_CHECK(); } ); } void fractional_max_pool3d_backward_out_cuda_template( Tensor& gradInput, const Tensor& gradOutput, const Tensor& input, IntArrayRef pool_size /* unused */, IntArrayRef output_size, const Tensor& indices) { int64_t dimt = 1; int64_t dimh = 2; int64_t dimw = 3; int64_t outputT = output_size[0]; int64_t outputH = output_size[1]; int64_t outputW = output_size[2]; int64_t ndims = input.ndimension(); if (ndims == 5) { dimt++; dimh++; dimw++; } /* sizes */ int64_t inputT = input.size(dimt); int64_t inputH = input.size(dimh); int64_t inputW = input.size(dimw); TORCH_CHECK( outputT == gradOutput.size(dimt), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput time unexpected" ); TORCH_CHECK( outputH == gradOutput.size(dimh), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput height unexpected" ); TORCH_CHECK( outputW == gradOutput.size(dimw), "fractional_max_pool3d_backward_out_cuda_template(): ", "gradOutput width unexpected" ); /* resize */ gradInput.resize_as_(input); gradInput.zero_(); auto gradInput_ = gradInput; auto gradOutput_ = gradOutput; auto indices_ = indices; if(ndims == 4) { gradInput_ = gradInput_.reshape({1, gradInput.size(0), inputT, inputH, inputW}); gradOutput_ = gradOutput_.reshape({1, gradOutput.size(0), outputT, outputH, outputW}); indices_ = indices_.reshape({1, indices.size(0), outputT, outputH, outputW}); } /* backprop */ // block is limited to 4 warps // grid handles overflow per each plane int64_t outputPlaneSize = gradOutput_.size(2) * gradOutput_.size(3) * gradOutput_.size(4); dim3 grid( (outputPlaneSize + 127) / 128, // ceil(outputPlaneSize / 128) gradInput_.size(1), gradInput_.size(0)); dim3 block(outputPlaneSize > 128 ? 128 : outputPlaneSize); AT_DISPATCH_FLOATING_TYPES_AND_HALF( gradOutput.scalar_type(), "fractional_max_pool3d_backward_out_frame", [&] { fractional_max_pool3d_backward_out_frame<scalar_t> <<<grid, block, 0, at::cuda::getCurrentCUDAStream()>>>( gradInput_.packed_accessor64<scalar_t, 5>(), gradOutput_.packed_accessor64<scalar_t, 5>(), indices_.packed_accessor64<int64_t, 5>() ); C10_CUDA_KERNEL_LAUNCH_CHECK(); } ); } }// namespace std::tuple<Tensor&, Tensor&> fractional_max_pool3d_out_cuda( at::Tensor& output, at::Tensor& indices, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { fractional_max_pool3d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples ); return std::tuple<Tensor&, Tensor&>(output, indices); } std::tuple<Tensor, Tensor> fractional_max_pool3d_cuda( const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& randomSamples) { Tensor output = at::empty({0}, input.options()); Tensor indices = at::empty({0}, input.options().dtype(kLong)); fractional_max_pool3d_out_cuda_template( output, indices, input, pool_size, output_size, randomSamples ); return std::tuple<Tensor, Tensor>(output, indices); } Tensor& fractional_max_pool3d_backward_out_cuda( at::Tensor& gradInput, const at::Tensor& gradOutput_, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("fractional_max_pool3d_backward_out_cuda"); fractional_max_pool3d_backward_out_cuda_template( gradInput, gradOutput_, input, pool_size, output_size, indices ); return gradInput; } Tensor fractional_max_pool3d_backward_cuda( const at::Tensor& gradOutput, const at::Tensor& input, IntArrayRef pool_size, IntArrayRef output_size, const at::Tensor& indices) { // See Note [Writing Nondeterministic Operations] // Nondeterministic because of atomicAdd usage globalContext().alertNotDeterministic("fractional_max_pool3d_backward_cuda"); Tensor gradInput = at::empty({0}, input.options()); fractional_max_pool3d_backward_out_cuda_template( gradInput, gradOutput, input, pool_size, output_size, indices ); return gradInput; } }// native }// at
0a6783a756fc1e0c9cb824dcb6914c88273bf4b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "common.h" #include "device_launch_parameters.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { hipError_t err = hipGetLastError(); if (hipSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } if (idata[index] < 0) { bools[index] = 1; } else { bools[index] = 0; } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } if (bools[index] == 1) { odata[indices[index]] = idata[index]; } } } }
0a6783a756fc1e0c9cb824dcb6914c88273bf4b3.cu
#include "common.h" #include "device_launch_parameters.h" void checkCUDAErrorFn(const char *msg, const char *file, int line) { cudaError_t err = cudaGetLastError(); if (cudaSuccess == err) { return; } fprintf(stderr, "CUDA error"); if (file) { fprintf(stderr, " (%s:%d)", file, line); } fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err)); exit(EXIT_FAILURE); } namespace StreamCompaction { namespace Common { /** * Maps an array to an array of 0s and 1s for stream compaction. Elements * which map to 0 will be removed, and elements which map to 1 will be kept. */ __global__ void kernMapToBoolean(int n, int *bools, const int *idata) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } if (idata[index] < 0) { bools[index] = 1; } else { bools[index] = 0; } } /** * Performs scatter on an array. That is, for each element in idata, * if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]]. */ __global__ void kernScatter(int n, int *odata, const int *idata, const int *bools, const int *indices) { int index = threadIdx.x + (blockIdx.x * blockDim.x); if (index >= n) { return; } if (bools[index] == 1) { odata[indices[index]] = idata[index]; } } } }
201bb5c571e7a72f8d9a98d010091a8ddb224759.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/native/ScatterGatherShapeChecks.h> #include <ATen/native/ReduceOpsUtils.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/hip/detail/OffsetCalculator.cuh> #include <ATen/hip/HIPContext.h> namespace at { namespace native { // The kernels are implemented on an opaque, // self-aligned type of the correct size, // to avoid redundant kernels for different types // of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; // essentialy rewritten related to legacy::launch_kernel parts template <int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void _scatter_gather_elementwise_kernel(int N, func_t f) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < N) { f(idx); idx += nt; } } } template <int nt, int vt, typename func_t> static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( _scatter_gather_elementwise_kernel<nt, vt, func_t>), dim3(grid), dim3(block), 0, stream, N, f); AT_CUDA_CHECK(hipGetLastError()); } template <bool is_scatter_like, typename scalar_t> struct _cuda_scatter_gather_internal_kernel { template <typename func_t> void operator() ( TensorIterator& iter, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()( sub_iter, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* src_ptr = (char*)iter.data_ptr(1); char* index_ptr = (char*)iter.data_ptr(2); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds"); char* self_data = self_ptr + offsets[0]; char* src_data = src_ptr + offsets[1]; f( (scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0), (scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride) ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool is_scatter_like = true, bool cast_to_opaque = true> struct cuda_scatter_gather_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } dim = maybe_wrap_dim(dim, self.dim()); if (is_scatter_like) { scatter_shape_check(self, dim, index, src); } else { gather_shape_check(self, dim, index); } auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index.sizes(), self.strides()); auto src_restrided = is_scatter_like ? src.as_strided(index.sizes(), src.strides()) : restride_dim(src, dim, index_sizes); auto iter = TensorIterator(); iter.dont_compute_common_dtype(); iter.dont_resize_outputs(); iter.add_output(self_restrided); iter.add_input(src_restrided, src.device(), src.scalar_type()); iter.add_input(index); iter.build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_gather_base_kernel template <typename scalar_t> struct _cuda_scatter_fill_internal_kernel { template <typename func_t> void operator()( TensorIterator& iter, scalar_t src_val, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_fill_internal_kernel<scalar_t>()( sub_iter, src_val, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* index_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds" ); char* self_data = self_ptr + offsets[0]; f( (scalar_t*)self_data + idx_dim * index_stride, &src_val ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool cast_to_opaque = true> struct cuda_scatter_fill_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, Scalar src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } dim = maybe_wrap_dim(dim, self.dim()); scatter_shape_check(self, dim, index); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self such that // self.shape = index.shape and // self.stride[dim] = 0 auto self_restrided = restride_dim(self, dim, index_sizes); auto iter = TensorIterator(); iter.dont_compute_common_dtype(); iter.dont_resize_outputs(); iter.add_output(self_restrided, self.device(), self.scalar_type()); iter.add_input(index); iter.build(); auto index_size = ensure_nonempty_size(self, dim); auto index_stride = ensure_nonempty_stride(self, dim); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; auto src_scalar_val = src.to<scalar_t>(); auto src_val = *(dtype*)&src_scalar_val; _cuda_scatter_fill_internal_kernel<dtype>()( iter, src_val, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_fill_base_kernel void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { cuda_scatter_gather_base_kernel<>()( self, dim, index, src, "scatter_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, Scalar src) { cuda_scatter_fill_base_kernel<>()( self, dim, index, src, "scatter_fill_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel); REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel); }} // namespace at::native
201bb5c571e7a72f8d9a98d010091a8ddb224759.cu
#include <ATen/native/TensorAdvancedIndexing.h> #include <ATen/ATen.h> #include <ATen/Dispatch.h> #include <ATen/native/ScatterGatherShapeChecks.h> #include <ATen/native/ReduceOpsUtils.h> #include <ATen/native/TensorIterator.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/cuda/detail/OffsetCalculator.cuh> #include <ATen/cuda/CUDAContext.h> namespace at { namespace native { // The kernels are implemented on an opaque, // self-aligned type of the correct size, // to avoid redundant kernels for different types // of the same size. template <int N> struct alignas(N) OpaqueType { char data[N]; }; // essentialy rewritten related to legacy::launch_kernel parts template <int nt, int vt, typename func_t> C10_LAUNCH_BOUNDS_2(nt, vt) __global__ void _scatter_gather_elementwise_kernel(int N, func_t f) { constexpr int nv = nt * vt; int idx = nv * blockIdx.x + threadIdx.x; #pragma unroll for (int i = 0; i < vt; ++i) { if (idx < N) { f(idx); idx += nt; } } } template <int nt, int vt, typename func_t> static void _launch_scatter_gather_kernel(int64_t N, const func_t& f) { TORCH_INTERNAL_ASSERT(N >= 0 && N <= std::numeric_limits<int32_t>::max()); if (N == 0) { return; } dim3 block(nt); dim3 grid((N + block.x * vt - 1) / (block.x * vt)); auto stream = at::cuda::getCurrentCUDAStream(); _scatter_gather_elementwise_kernel<nt, vt, func_t><<<grid, block, 0, stream>>>(N, f); AT_CUDA_CHECK(cudaGetLastError()); } template <bool is_scatter_like, typename scalar_t> struct _cuda_scatter_gather_internal_kernel { template <typename func_t> void operator() ( TensorIterator& iter, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_gather_internal_kernel<is_scatter_like, scalar_t>()( sub_iter, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* src_ptr = (char*)iter.data_ptr(1); char* index_ptr = (char*)iter.data_ptr(2); auto offset_calc = make_offset_calculator<3>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[2]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds"); char* self_data = self_ptr + offsets[0]; char* src_data = src_ptr + offsets[1]; f( (scalar_t*)self_data + (is_scatter_like ? idx_dim * index_stride : 0), (scalar_t*)src_data + (is_scatter_like ? 0 : idx_dim * index_stride) ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool is_scatter_like = true, bool cast_to_opaque = true> struct cuda_scatter_gather_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, const Tensor& src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } dim = maybe_wrap_dim(dim, self.dim()); if (is_scatter_like) { scatter_shape_check(self, dim, index, src); } else { gather_shape_check(self, dim, index); } auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self and src such that // self.shape = src.shape = index.shape // // restride stride[dim] such that // if (is_scatter_like) self.stride[dim] = 0 // else src.stride[dim] = 0 auto self_restrided = is_scatter_like ? restride_dim(self, dim, index_sizes) : self.as_strided(index.sizes(), self.strides()); auto src_restrided = is_scatter_like ? src.as_strided(index.sizes(), src.strides()) : restride_dim(src, dim, index_sizes); auto iter = TensorIterator(); iter.dont_compute_common_dtype(); iter.dont_resize_outputs(); iter.add_output(self_restrided); iter.add_input(src_restrided, src.device(), src.scalar_type()); iter.add_input(index); iter.build(); auto self_dim_stride = ensure_nonempty_stride(self, dim); auto self_dim_size = ensure_nonempty_size(self, dim); auto src_dim_stride = ensure_nonempty_stride(src, dim); auto src_dim_size = ensure_nonempty_size(src, dim); auto index_size = is_scatter_like ? self_dim_size : src_dim_size; auto index_stride = is_scatter_like ? self_dim_stride : src_dim_stride; AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; _cuda_scatter_gather_internal_kernel<is_scatter_like, dtype>()( iter, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_gather_base_kernel template <typename scalar_t> struct _cuda_scatter_fill_internal_kernel { template <typename func_t> void operator()( TensorIterator& iter, scalar_t src_val, int64_t index_size, int64_t index_stride, const func_t& f ) { if (iter.numel() == 0) { return; } if (!iter.can_use_32bit_indexing()) { for (auto& sub_iter : iter.with_32bit_indexing()) { _cuda_scatter_fill_internal_kernel<scalar_t>()( sub_iter, src_val, index_size, index_stride, f ); } return; } char* self_ptr = (char*)iter.data_ptr(0); char* index_ptr = (char*)iter.data_ptr(1); auto offset_calc = make_offset_calculator<2>(iter); auto loop = [=]C10_DEVICE(int i) { auto offsets = offset_calc.get(i); int64_t idx_dim = *(int64_t*)(index_ptr + offsets[1]); CUDA_KERNEL_ASSERT(idx_dim >= 0 && idx_dim < index_size && "index out of bounds" ); char* self_data = self_ptr + offsets[0]; f( (scalar_t*)self_data + idx_dim * index_stride, &src_val ); }; _launch_scatter_gather_kernel<num_threads, thread_work_size>(iter.numel(), loop); } }; // struct _cuda_scatter_fill_internal_kernel template <bool cast_to_opaque = true> struct cuda_scatter_fill_base_kernel { template <typename func_t> void operator()( Tensor& self, int64_t dim, const Tensor& index, Scalar src, const std::string& method_name, const func_t& f ) { // no-op if index is empty if (index.numel() == 0) { return; } dim = maybe_wrap_dim(dim, self.dim()); scatter_shape_check(self, dim, index); auto index_sizes = ensure_nonempty_vec(index.sizes().vec()); // restride self such that // self.shape = index.shape and // self.stride[dim] = 0 auto self_restrided = restride_dim(self, dim, index_sizes); auto iter = TensorIterator(); iter.dont_compute_common_dtype(); iter.dont_resize_outputs(); iter.add_output(self_restrided, self.device(), self.scalar_type()); iter.add_input(index); iter.build(); auto index_size = ensure_nonempty_size(self, dim); auto index_stride = ensure_nonempty_stride(self, dim); AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3( at::ScalarType::Half, at::ScalarType::Bool, at::ScalarType::BFloat16, iter.dtype(), method_name, [&] { using dtype = typename std::conditional<cast_to_opaque, OpaqueType<sizeof(scalar_t)>, scalar_t>::type; auto src_scalar_val = src.to<scalar_t>(); auto src_val = *(dtype*)&src_scalar_val; _cuda_scatter_fill_internal_kernel<dtype>()( iter, src_val, index_size, index_stride, f ); } ); } }; // struct cuda_scatter_fill_base_kernel void scatter_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, const Tensor& src) { cuda_scatter_gather_base_kernel<>()( self, dim, index, src, "scatter_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } void scatter_fill_cuda_kernel(Tensor& self, int64_t dim, const Tensor& index, Scalar src) { cuda_scatter_fill_base_kernel<>()( self, dim, index, src, "scatter_fill_cuda_", []C10_DEVICE(auto* lhs, const auto* rhs) { *lhs = *rhs; } ); } REGISTER_DISPATCH(scatter_stub, &scatter_cuda_kernel); REGISTER_DISPATCH(scatter_fill_stub, &scatter_fill_cuda_kernel); }} // namespace at::native
fcc85333b2abdc587a427d63b36f9571207b436a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <iostream> #include <fstream> #define WIDTH 8192 #define LENGHT 8192 #define N_PARTICLES 5000 #define INF 999999.999 #define RADIO 100 #define CELLS_FOR_THREAD 8 using namespace std; // __constant__ float x_part_dev[N_PARTICLES]; // __constant__ float y_part_dev[N_PARTICLES]; #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ cout << hipGetErrorString(error) << endl; \ } \ } while (0) __device__ float dist(float x1, float y1, float x2, float y2) { float dist; dist = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1); //dist = sqrtf(powf(x2-x1, 2) + powf(y2-y1, 2)); if(dist != 0) return 1/dist; else return -1; } __global__ void charge(float l, float *map,float *X,float *Y) { int idx = blockIdx.x*blockDim.x + threadIdx.x; float rowParticle,colParticle,rowCell,colCell; for (int i = idx*CELLS_FOR_THREAD; i<idx*CELLS_FOR_THREAD+CELLS_FOR_THREAD; i++) { if (i<l) { for (size_t j = 0; j < N_PARTICLES; j++) { rowParticle = Y[j]; colParticle = X[j]; rowCell = (i / WIDTH); colCell = (i % WIDTH); //float distancia = rowCell-colCell; float distancia = 1;//(dist(rowParticle,colParticle,rowCell,colCell); if (distancia != -1) { map[i] += distancia; } } //map[i] = 1; } } } __global__ void chargeWithRadio(int l, float *map,float *X,float *Y) { float d; int idx = blockIdx.x*blockDim.x + threadIdx.x; int rowPartcile,colParticle,rowCell,colCell; if (idx < l) { for (size_t i = 0; i < N_PARTICLES; i++) { rowPartcile = Y[i]; colParticle = X[i]; rowCell = (idx / WIDTH)+1; colCell = (idx % WIDTH)+1; d = dist(rowPartcile,colParticle,rowCell,colCell); map[idx] += (d<RADIO)?d:0.0; } } } __global__ void minReduction(float *in, float *out) { __shared__ float sharedData[256]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + tid; // blockSize = 256 sharedData[tid] = in[i] + in[i+blockDim.x]; __syncthreads(); for (unsigned int s = blockDim.x/2; s>32; s>>=1) { if(tid<s) { sharedData[tid] = (sharedData[tid]<sharedData[tid+s])?sharedData[tid]:sharedData[tid+s]; } __syncthreads(); } if (tid < 32) { sharedData[tid] = (sharedData[tid]<sharedData[tid+32])?sharedData[tid]:sharedData[tid+32]; sharedData[tid] = (sharedData[tid]<sharedData[tid+16])?sharedData[tid]:sharedData[tid+16]; sharedData[tid] = (sharedData[tid]<sharedData[tid+8])?sharedData[tid]:sharedData[tid+8]; sharedData[tid] = (sharedData[tid]<sharedData[tid+4])?sharedData[tid]:sharedData[tid+4]; sharedData[tid] = (sharedData[tid]<sharedData[tid+2])?sharedData[tid]:sharedData[tid+2]; sharedData[tid] = (sharedData[tid]<sharedData[tid+1])?sharedData[tid]:sharedData[tid+1]; } if(tid==0) { out[blockIdx.x] = sharedData[0]; } } int main(int argc, char *argv[]){ // Load data string input_file_name; if (argc > 1) { input_file_name = argv[1]; } else { cout << "falt un argumento" << endl; exit(0); } ifstream infile; cout << "Reading: " << input_file_name.c_str() << endl; infile.open(input_file_name.c_str()); int nP; float *x_part, *y_part; infile >> nP; cout << "nP: "<<nP << endl; x_part = (float *)malloc(nP * sizeof(float)); y_part = (float *)malloc(nP * sizeof(float)); for (int i = 0; i<nP; i++) { infile >> x_part[i] >> y_part[i]; } // Get memory for structures float *cells, *d_cells,*outData,*out2,*out3,y[4]; float *x_part_dev, *y_part_dev; cells = (float*)malloc(WIDTH*LENGHT*sizeof(float)); // Initialization grid with 0 for (int i = 0; i < WIDTH*LENGHT; i++) { cells[i] = 0.0; } // Define sizes of GPU int blockSize = 256; // # threads int gridSize = ((WIDTH*LENGHT)/256)/CELLS_FOR_THREAD +1; // # blocks cout << "gridSize: " << gridSize << endl; // Get memory in GPU for structures // data for charge function //hipMalloc(&x_dev, nP * sizeof(float)); // X cord for particles //hipMalloc(&y_dev, nP * sizeof(float)); // Y cord for particles CUDA_CHECK(hipMalloc(&d_cells, WIDTH*LENGHT*sizeof(float))); // 1D array representation for grid 2D CUDA_CHECK(hipMalloc(&x_part_dev, N_PARTICLES*sizeof(float))); CUDA_CHECK(hipMalloc(&y_part_dev, N_PARTICLES*sizeof(float))); // data for reduction function CUDA_CHECK(hipMalloc(&outData, gridSize*sizeof(float))); CUDA_CHECK(hipMalloc(&out2, (gridSize/blockSize)*sizeof(float))); CUDA_CHECK(hipMalloc(&out3, ((gridSize/blockSize)/blockSize)*sizeof(float))); // Copy data from CPU to GPU CUDA_CHECK(hipMemcpy(d_cells, cells, WIDTH*LENGHT*sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(x_part_dev, x_part, N_PARTICLES * sizeof(float), hipMemcpyHostToDevice)); CUDA_CHECK(hipMemcpy(y_part_dev, y_part, N_PARTICLES * sizeof(float), hipMemcpyHostToDevice)); //hipMemcpy(x_dev, &x_part, nP * sizeof(float), hipMemcpyHostToDevice); //hipMemcpy(y_dev, &y_part, nP * sizeof(float), hipMemcpyHostToDevice); hipEvent_t ct1, ct2; float dt, dt2; // time before kernel hipEventCreate(&ct1); hipEventCreate(&ct2); hipEventRecord(ct1); // Charge grid hipLaunchKernelGGL(( charge), dim3(gridSize),dim3(blockSize), 0, 0, WIDTH*LENGHT, d_cells, x_part_dev, y_part_dev); hipDeviceSynchronize(); //Time after charge kernel hipEventRecord(ct2); hipEventSynchronize(ct2); hipEventElapsedTime(&dt, ct1, ct2); float time1 = dt; std::cout << "Time GPU computing cells charges: " << time1 << "[ms]" << std::endl; CUDA_CHECK(hipMemcpy(cells, d_cells, WIDTH*LENGHT*sizeof(float), hipMemcpyDeviceToHost)); hipDeviceSynchronize(); // check for errors hipError_t error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr, "ERROR: %s \n", hipGetErrorString(error)); } for (size_t i = 0; i < 100; i++) { cout << cells[i] << ' '; } cout << endl; float suma = 0; for (int i = 0; i < WIDTH*LENGHT; i++) { if (cells[i] == 0) { cout << "i: " << i << " = 0"<< endl; break; } suma += cells[i]; } cout << "Suma: " << suma << endl; cout << "\n \n primera parte exitosa (?)" << endl; // time before kernel min hipEventCreate(&ct1); hipEventCreate(&ct2); hipEventRecord(ct1); // Search min load hipLaunchKernelGGL(( minReduction), dim3(gridSize),dim3(blockSize), 0, 0, d_cells,outData); // First reduction 8192*8192 -> (8192*8192+255)/ 256 = 262.144 hipDeviceSynchronize(); hipLaunchKernelGGL(( minReduction), dim3(gridSize/blockSize),dim3(blockSize), 0, 0, outData,out2); // Second reduction 262.144 -> 262.144/256 = 1024 hipDeviceSynchronize(); hipLaunchKernelGGL(( minReduction), dim3((gridSize/blockSize)/blockSize),dim3(blockSize), 0, 0, out2,out3); // Third reduction 262.144 -> 4 :) hipDeviceSynchronize(); //Time after min kernel hipEventRecord(ct2); hipEventSynchronize(ct2); hipEventElapsedTime(&dt2, ct1, ct2); float time2 = dt2; std::cout << "Time GPU computing minimum value: " << time2 << "[ms]" << std::endl; // check for errors error = hipGetLastError(); if (error != hipSuccess) { fprintf(stderr, "ERROR: %s \n", hipGetErrorString(error)); } // Escribiendo resultado en archivo ofstream times_file; times_file.open("results_tarea_4_2.txt", ios_base::app); times_file << input_file_name.c_str() << endl; times_file << "Tiempo en charge kernel: "<< dt << "[ms]" << endl; times_file << "Tiempo en min kernel: "<< dt2 << "[ms]" << endl; hipMemcpy(y, out3, 4*sizeof(float), hipMemcpyDeviceToHost); int min=INF; // min load for (size_t i = 0; i < 4; i++) { min = (y[i]<min)?y[i]:min; } cout << min << endl; //hipFree(x_dev); //hipFree(y_dev); hipFree(d_cells); hipFree(outData); hipFree(out2); hipFree(out3); free(cells); free(x_part); free(y_part); return 0; }
fcc85333b2abdc587a427d63b36f9571207b436a.cu
#include <stdio.h> #include <iostream> #include <iostream> #include <fstream> #define WIDTH 8192 #define LENGHT 8192 #define N_PARTICLES 5000 #define INF 999999.999 #define RADIO 100 #define CELLS_FOR_THREAD 8 using namespace std; // __constant__ float x_part_dev[N_PARTICLES]; // __constant__ float y_part_dev[N_PARTICLES]; #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ cout << cudaGetErrorString(error) << endl; \ } \ } while (0) __device__ float dist(float x1, float y1, float x2, float y2) { float dist; dist = (x2-x1)*(x2-x1) + (y2-y1)*(y2-y1); //dist = sqrtf(powf(x2-x1, 2) + powf(y2-y1, 2)); if(dist != 0) return 1/dist; else return -1; } __global__ void charge(float l, float *map,float *X,float *Y) { int idx = blockIdx.x*blockDim.x + threadIdx.x; float rowParticle,colParticle,rowCell,colCell; for (int i = idx*CELLS_FOR_THREAD; i<idx*CELLS_FOR_THREAD+CELLS_FOR_THREAD; i++) { if (i<l) { for (size_t j = 0; j < N_PARTICLES; j++) { rowParticle = Y[j]; colParticle = X[j]; rowCell = (i / WIDTH); colCell = (i % WIDTH); //float distancia = rowCell-colCell; float distancia = 1;//(dist(rowParticle,colParticle,rowCell,colCell); if (distancia != -1) { map[i] += distancia; } } //map[i] = 1; } } } __global__ void chargeWithRadio(int l, float *map,float *X,float *Y) { float d; int idx = blockIdx.x*blockDim.x + threadIdx.x; int rowPartcile,colParticle,rowCell,colCell; if (idx < l) { for (size_t i = 0; i < N_PARTICLES; i++) { rowPartcile = Y[i]; colParticle = X[i]; rowCell = (idx / WIDTH)+1; colCell = (idx % WIDTH)+1; d = dist(rowPartcile,colParticle,rowCell,colCell); map[idx] += (d<RADIO)?d:0.0; } } } __global__ void minReduction(float *in, float *out) { __shared__ float sharedData[256]; unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*(blockDim.x*2) + tid; // blockSize = 256 sharedData[tid] = in[i] + in[i+blockDim.x]; __syncthreads(); for (unsigned int s = blockDim.x/2; s>32; s>>=1) { if(tid<s) { sharedData[tid] = (sharedData[tid]<sharedData[tid+s])?sharedData[tid]:sharedData[tid+s]; } __syncthreads(); } if (tid < 32) { sharedData[tid] = (sharedData[tid]<sharedData[tid+32])?sharedData[tid]:sharedData[tid+32]; sharedData[tid] = (sharedData[tid]<sharedData[tid+16])?sharedData[tid]:sharedData[tid+16]; sharedData[tid] = (sharedData[tid]<sharedData[tid+8])?sharedData[tid]:sharedData[tid+8]; sharedData[tid] = (sharedData[tid]<sharedData[tid+4])?sharedData[tid]:sharedData[tid+4]; sharedData[tid] = (sharedData[tid]<sharedData[tid+2])?sharedData[tid]:sharedData[tid+2]; sharedData[tid] = (sharedData[tid]<sharedData[tid+1])?sharedData[tid]:sharedData[tid+1]; } if(tid==0) { out[blockIdx.x] = sharedData[0]; } } int main(int argc, char *argv[]){ // Load data string input_file_name; if (argc > 1) { input_file_name = argv[1]; } else { cout << "faltó un argumento" << endl; exit(0); } ifstream infile; cout << "Reading: " << input_file_name.c_str() << endl; infile.open(input_file_name.c_str()); int nP; float *x_part, *y_part; infile >> nP; cout << "nP: "<<nP << endl; x_part = (float *)malloc(nP * sizeof(float)); y_part = (float *)malloc(nP * sizeof(float)); for (int i = 0; i<nP; i++) { infile >> x_part[i] >> y_part[i]; } // Get memory for structures float *cells, *d_cells,*outData,*out2,*out3,y[4]; float *x_part_dev, *y_part_dev; cells = (float*)malloc(WIDTH*LENGHT*sizeof(float)); // Initialization grid with 0 for (int i = 0; i < WIDTH*LENGHT; i++) { cells[i] = 0.0; } // Define sizes of GPU int blockSize = 256; // # threads int gridSize = ((WIDTH*LENGHT)/256)/CELLS_FOR_THREAD +1; // # blocks cout << "gridSize: " << gridSize << endl; // Get memory in GPU for structures // data for charge function //cudaMalloc(&x_dev, nP * sizeof(float)); // X cord for particles //cudaMalloc(&y_dev, nP * sizeof(float)); // Y cord for particles CUDA_CHECK(cudaMalloc(&d_cells, WIDTH*LENGHT*sizeof(float))); // 1D array representation for grid 2D CUDA_CHECK(cudaMalloc(&x_part_dev, N_PARTICLES*sizeof(float))); CUDA_CHECK(cudaMalloc(&y_part_dev, N_PARTICLES*sizeof(float))); // data for reduction function CUDA_CHECK(cudaMalloc(&outData, gridSize*sizeof(float))); CUDA_CHECK(cudaMalloc(&out2, (gridSize/blockSize)*sizeof(float))); CUDA_CHECK(cudaMalloc(&out3, ((gridSize/blockSize)/blockSize)*sizeof(float))); // Copy data from CPU to GPU CUDA_CHECK(cudaMemcpy(d_cells, cells, WIDTH*LENGHT*sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(x_part_dev, x_part, N_PARTICLES * sizeof(float), cudaMemcpyHostToDevice)); CUDA_CHECK(cudaMemcpy(y_part_dev, y_part, N_PARTICLES * sizeof(float), cudaMemcpyHostToDevice)); //cudaMemcpy(x_dev, &x_part, nP * sizeof(float), cudaMemcpyHostToDevice); //cudaMemcpy(y_dev, &y_part, nP * sizeof(float), cudaMemcpyHostToDevice); cudaEvent_t ct1, ct2; float dt, dt2; // time before kernel cudaEventCreate(&ct1); cudaEventCreate(&ct2); cudaEventRecord(ct1); // Charge grid charge<<<gridSize,blockSize>>>(WIDTH*LENGHT, d_cells, x_part_dev, y_part_dev); cudaDeviceSynchronize(); //Time after charge kernel cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt, ct1, ct2); float time1 = dt; std::cout << "Time GPU computing cells charges: " << time1 << "[ms]" << std::endl; CUDA_CHECK(cudaMemcpy(cells, d_cells, WIDTH*LENGHT*sizeof(float), cudaMemcpyDeviceToHost)); cudaDeviceSynchronize(); // check for errors cudaError_t error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); } for (size_t i = 0; i < 100; i++) { cout << cells[i] << ' '; } cout << endl; float suma = 0; for (int i = 0; i < WIDTH*LENGHT; i++) { if (cells[i] == 0) { cout << "i: " << i << " = 0"<< endl; break; } suma += cells[i]; } cout << "Suma: " << suma << endl; cout << "\n \n primera parte exitosa (?)" << endl; // time before kernel min cudaEventCreate(&ct1); cudaEventCreate(&ct2); cudaEventRecord(ct1); // Search min load minReduction<<<gridSize,blockSize>>>(d_cells,outData); // First reduction 8192*8192 -> (8192*8192+255)/ 256 = 262.144 cudaDeviceSynchronize(); minReduction<<<gridSize/blockSize,blockSize>>>(outData,out2); // Second reduction 262.144 -> 262.144/256 = 1024 cudaDeviceSynchronize(); minReduction<<<(gridSize/blockSize)/blockSize,blockSize>>>(out2,out3); // Third reduction 262.144 -> 4 :) cudaDeviceSynchronize(); //Time after min kernel cudaEventRecord(ct2); cudaEventSynchronize(ct2); cudaEventElapsedTime(&dt2, ct1, ct2); float time2 = dt2; std::cout << "Time GPU computing minimum value: " << time2 << "[ms]" << std::endl; // check for errors error = cudaGetLastError(); if (error != cudaSuccess) { fprintf(stderr, "ERROR: %s \n", cudaGetErrorString(error)); } // Escribiendo resultado en archivo ofstream times_file; times_file.open("results_tarea_4_2.txt", ios_base::app); times_file << input_file_name.c_str() << endl; times_file << "Tiempo en charge kernel: "<< dt << "[ms]" << endl; times_file << "Tiempo en min kernel: "<< dt2 << "[ms]" << endl; cudaMemcpy(y, out3, 4*sizeof(float), cudaMemcpyDeviceToHost); int min=INF; // min load for (size_t i = 0; i < 4; i++) { min = (y[i]<min)?y[i]:min; } cout << min << endl; //cudaFree(x_dev); //cudaFree(y_dev); cudaFree(d_cells); cudaFree(outData); cudaFree(out2); cudaFree(out3); free(cells); free(x_part); free(y_part); return 0; }
75e6a9cce934f6d8cc5e2137f174b551330d3d7c.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common_win.h" #include <hip/hip_runtime.h> #include <stdio.h> int cpu_reduce(int *data, const int n) { if (n==1) return data[0]; const int stride = n / 2; for (int i=0;i<stride;i++) data[i] += data[i+stride]; return cpu_reduce(data, stride); } __global__ void nested_neighbored(int *gi, int *go,unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; int *data = gi + blockIdx.x * blockDim.x; if (idx > n) return ; for (int i=1; i<blockDim.x;i*=2) { if(tidx % (2*i) == 0) data[tidx] += data[tidx + i]; __syncthreads(); } if(tidx == 0) go[blockIdx.x] = data[0]; } __global__ void nested_neighbored_less(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int bidx = blockIdx.x; unsigned int idx = blockDim.x * bidx + tidx; int *data = gi + bidx * blockDim.x; if(idx > n) return ; for (int i=1; i<blockDim.x; i*=2) { int j = 2 * i * tidx; if (j < blockDim.x) data[j] += data[i+j]; __syncthreads(); } if(tidx == 0) go[bidx] = data[0]; } __global__ void reduce_leaved(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + tidx; int *data = gi + blockIdx.x * blockDim.x; if (idx > n) return ; for (int i = blockDim.x / 2; i > 0; i >>= 1) { if(tidx < i) data[tidx] += data[tidx + i]; __syncthreads(); } if(tidx==0) go[blockIdx.x] = data[0]; } __global__ void reduce_unrolling2(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + tidx; int *data = gi + blockIdx.x * blockDim.x *2; if(idx > n) return ; for (int i = blockDim.x /2 ; i > 0; i>>=1) { if(tidx < i) data[tidx] += data[tidx + i]; __syncthreads(); } if (tidx == 0) go[blockIdx.x] = data[0]; } __global__ void reduce_unrolling4(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockDim.x * blockIdx.x * 4 + tidx; int *data = gi + blockIdx.x * blockDim.x * 4; if (idx + 3*blockDim.x < n) { int a1 = gi[idx]; int a2 = gi[idx + blockDim.x]; int a3 = gi[idx + blockDim.x * 2]; int a4 = gi[idx + blockDim.x * 3]; gi[idx] = a1 + a2 + a3 + a4; } __syncthreads(); for (int i = blockDim.x / 2; i > 0; i>>=1) { if (tidx < i) data[tidx] += data[tidx + i]; __syncthreads(); } if (tidx == 0) go[blockIdx.x] = data[0]; } __global__ void reduce_unrolling8(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + tidx; int *data = gi + blockDim.x * blockIdx.x * 8; if(idx + 7 * blockDim.x < n) { int a1 = gi[idx]; int a2 = gi[idx + blockDim.x]; int a3 = gi[idx + blockDim.x * 2]; int a4 = gi[idx + blockDim.x * 3]; int b1 = gi[idx + blockDim.x * 4]; int b2 = gi[idx + blockDim.x * 5]; int b3 = gi[idx + blockDim.x * 6]; int b4 = gi[idx + blockDim.x * 7]; gi[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 +b4; } __syncthreads(); for (int i = blockDim.x / 2; i > 0; i>>=1) { if(tidx < i) data[tidx] += data[tidx + i]; __syncthreads(); } if (tidx == 0) go[blockIdx.x] = data[0]; } __global__ void reduce_unrolling_warps8(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockDim.x * blockIdx.x * 8 + tidx; int *data = gi + blockIdx.x * blockDim.x * 8; if (idx + blockDim.x * 7 < n) { int a1 = gi[idx]; int a2 = gi[idx + blockDim.x]; int a3 = gi[idx + 2 * blockDim.x]; int a4 = gi[idx + 3 * blockDim.x]; int b1 = gi[idx + 4 * blockDim.x]; int b2 = gi[idx + 5 * blockDim.x]; int b3 = gi[idx + 6 * blockDim.x]; int b4 = gi[idx + 7 * blockDim.x]; gi[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tidx < stride) { data[tidx] += data[tidx + stride]; } __syncthreads(); } if (tidx < 32) { volatile int *vmem = data; vmem[tidx] = vmem[tidx + 32]; vmem[tidx] = vmem[tidx + 16]; vmem[tidx] = vmem[tidx + 8 ]; vmem[tidx] = vmem[tidx + 4 ]; vmem[tidx] = vmem[tidx + 2 ]; vmem[tidx] = vmem[tidx + 1 ]; } if (tidx == 0) { go[blockIdx.x] = data[0]; } } __global__ void reduce_complete_unrolling_warps8(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned idx = blockIdx.x * blockDim.x * 8 + tidx; int *data = gi + blockDim.x * blockIdx.x * 8; if (idx + blockDim.x * 7 < n) { int a1 = gi[idx]; int a2 = gi[idx + blockDim.x]; int a3 = gi[idx + 2 * blockDim.x]; int a4 = gi[idx + 3 * blockDim.x]; int b1 = gi[idx + 4 * blockDim.x]; int b2 = gi[idx + 5 * blockDim.x]; int b3 = gi[idx + 6 * blockDim.x]; int b4 = gi[idx + 7 * blockDim.x]; gi[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); if (blockDim.x >= 1024 && tidx < 512) data[tidx] += data[tidx + 512]; __syncthreads(); if (blockDim.x >= 512 && tidx < 256) data[tidx] += data[tidx + 256]; __syncthreads(); if (blockDim.x >= 256 && tidx < 128) data[tidx] += data[tidx + 128]; __syncthreads(); if (blockDim.x > 128 && tidx < 64) data[tidx] += data[tidx + 64]; __syncthreads(); if (tidx < 32) { volatile int *vsmem = data; vsmem[tidx] += vsmem[tidx + 32]; vsmem[tidx] += vsmem[tidx + 16]; vsmem[tidx] += vsmem[tidx + 8]; vsmem[tidx] += vsmem[tidx + 4]; vsmem[tidx] += vsmem[tidx + 2]; vsmem[tidx] += vsmem[tidx + 1]; } if (tidx == 0) go[blockIdx.x] = data[0]; } template <unsigned int iBlockSize> __global__ void reudce_complete_unroll(int *gi, int *go, unsigned int n) { unsigned int tid = threadIdx.x; unsigned int idx = blockDim.x*blockIdx.x*8 + tid; int *data = gi + blockIdx.x*blockDim.x*8; if (idx+7*blockDim.x <n) { int a1 = gi[idx]; int a2 = gi[idx + blockDim.x]; int a3 = gi[idx + 2 * blockDim.x]; int a4 = gi[idx + 3 * blockDim.x]; int b1 = gi[idx + 4 * blockDim.x]; int b2 = gi[idx + 5 * blockDim.x]; int b3 = gi[idx + 6 * blockDim.x]; int b4 = gi[idx + 7 * blockDim.x]; gi[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); if (iBlockSize >= 1024 && tid < 512) data[tid] += data[tid + 512]; __syncthreads(); if (iBlockSize >= 512 && tid < 256) data[tid] += data[tid + 256]; __syncthreads(); if (iBlockSize >= 256 && tid < 128) data[tid] += data[tid + 128]; __syncthreads(); if (iBlockSize >= 128 && tid < 64) data[tid] += data[tid + 64]; __syncthreads(); if(tid< 32) { volatile int *vsmem = data; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) go[blockIdx.x] = data[0]; } __global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling last warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(hipSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = 512; // initial block size if(argc > 1) { blocksize = atoi(argv[1]); // block size from command line argument } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) { // mask off high 2 bytes to force max number to 255 h_idata[i] = (int)( rand() & 0xFF ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(hipMalloc((void **) &d_idata, bytes)); CHECK(hipMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction iStart = seconds(); int cpu_sum = cpu_reduce (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// // kernel 1: reduceNeighbored CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( nested_neighbored), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 2: reduceNeighbored with less divergence CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( nested_neighbored_less), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 3: reduceInterleaved CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduce_leaved), dim3(grid), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 4: reduceUnrolling2 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduce_unrolling2), dim3(grid.x / 2), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x); // kernel 5: reduceUnrolling4 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduce_unrolling4), dim3(grid.x / 4), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x); // kernel 6: reduceUnrolling8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduce_unrolling8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i]; // kernel 8: reduceUnrollWarps8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduce_unrolling_warps8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reudce_complete_unrollWarsp8 CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); hipLaunchKernelGGL(( reduce_complete_unrolling_warps8), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reudce_complete_unroll CHECK(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); CHECK(hipDeviceSynchronize()); iStart = seconds(); switch (blocksize) { case 1024: hipLaunchKernelGGL(( reudce_complete_unroll<1024>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 512: hipLaunchKernelGGL(( reudce_complete_unroll<512>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 256: hipLaunchKernelGGL(( reudce_complete_unroll<256>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 128: hipLaunchKernelGGL(( reudce_complete_unroll<128>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; case 64: hipLaunchKernelGGL(( reudce_complete_unroll<64>), dim3(grid.x / 8), dim3(block), 0, 0, d_idata, d_odata, size); break; } CHECK(hipDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(hipMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), hipMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(hipFree(d_idata)); CHECK(hipFree(d_odata)); // reset device CHECK(hipDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
75e6a9cce934f6d8cc5e2137f174b551330d3d7c.cu
#include "../common/common_win.h" #include <cuda_runtime.h> #include <stdio.h> int cpu_reduce(int *data, const int n) { if (n==1) return data[0]; const int stride = n / 2; for (int i=0;i<stride;i++) data[i] += data[i+stride]; return cpu_reduce(data, stride); } __global__ void nested_neighbored(int *gi, int *go,unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; int *data = gi + blockIdx.x * blockDim.x; if (idx > n) return ; for (int i=1; i<blockDim.x;i*=2) { if(tidx % (2*i) == 0) data[tidx] += data[tidx + i]; __syncthreads(); } if(tidx == 0) go[blockIdx.x] = data[0]; } __global__ void nested_neighbored_less(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int bidx = blockIdx.x; unsigned int idx = blockDim.x * bidx + tidx; int *data = gi + bidx * blockDim.x; if(idx > n) return ; for (int i=1; i<blockDim.x; i*=2) { int j = 2 * i * tidx; if (j < blockDim.x) data[j] += data[i+j]; __syncthreads(); } if(tidx == 0) go[bidx] = data[0]; } __global__ void reduce_leaved(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x + tidx; int *data = gi + blockIdx.x * blockDim.x; if (idx > n) return ; for (int i = blockDim.x / 2; i > 0; i >>= 1) { if(tidx < i) data[tidx] += data[tidx + i]; __syncthreads(); } if(tidx==0) go[blockIdx.x] = data[0]; } __global__ void reduce_unrolling2(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + tidx; int *data = gi + blockIdx.x * blockDim.x *2; if(idx > n) return ; for (int i = blockDim.x /2 ; i > 0; i>>=1) { if(tidx < i) data[tidx] += data[tidx + i]; __syncthreads(); } if (tidx == 0) go[blockIdx.x] = data[0]; } __global__ void reduce_unrolling4(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockDim.x * blockIdx.x * 4 + tidx; int *data = gi + blockIdx.x * blockDim.x * 4; if (idx + 3*blockDim.x < n) { int a1 = gi[idx]; int a2 = gi[idx + blockDim.x]; int a3 = gi[idx + blockDim.x * 2]; int a4 = gi[idx + blockDim.x * 3]; gi[idx] = a1 + a2 + a3 + a4; } __syncthreads(); for (int i = blockDim.x / 2; i > 0; i>>=1) { if (tidx < i) data[tidx] += data[tidx + i]; __syncthreads(); } if (tidx == 0) go[blockIdx.x] = data[0]; } __global__ void reduce_unrolling8(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 8 + tidx; int *data = gi + blockDim.x * blockIdx.x * 8; if(idx + 7 * blockDim.x < n) { int a1 = gi[idx]; int a2 = gi[idx + blockDim.x]; int a3 = gi[idx + blockDim.x * 2]; int a4 = gi[idx + blockDim.x * 3]; int b1 = gi[idx + blockDim.x * 4]; int b2 = gi[idx + blockDim.x * 5]; int b3 = gi[idx + blockDim.x * 6]; int b4 = gi[idx + blockDim.x * 7]; gi[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 +b4; } __syncthreads(); for (int i = blockDim.x / 2; i > 0; i>>=1) { if(tidx < i) data[tidx] += data[tidx + i]; __syncthreads(); } if (tidx == 0) go[blockIdx.x] = data[0]; } __global__ void reduce_unrolling_warps8(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned int idx = blockDim.x * blockIdx.x * 8 + tidx; int *data = gi + blockIdx.x * blockDim.x * 8; if (idx + blockDim.x * 7 < n) { int a1 = gi[idx]; int a2 = gi[idx + blockDim.x]; int a3 = gi[idx + 2 * blockDim.x]; int a4 = gi[idx + 3 * blockDim.x]; int b1 = gi[idx + 4 * blockDim.x]; int b2 = gi[idx + 5 * blockDim.x]; int b3 = gi[idx + 6 * blockDim.x]; int b4 = gi[idx + 7 * blockDim.x]; gi[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tidx < stride) { data[tidx] += data[tidx + stride]; } __syncthreads(); } if (tidx < 32) { volatile int *vmem = data; vmem[tidx] = vmem[tidx + 32]; vmem[tidx] = vmem[tidx + 16]; vmem[tidx] = vmem[tidx + 8 ]; vmem[tidx] = vmem[tidx + 4 ]; vmem[tidx] = vmem[tidx + 2 ]; vmem[tidx] = vmem[tidx + 1 ]; } if (tidx == 0) { go[blockIdx.x] = data[0]; } } __global__ void reduce_complete_unrolling_warps8(int *gi, int *go, unsigned int n) { unsigned int tidx = threadIdx.x; unsigned idx = blockIdx.x * blockDim.x * 8 + tidx; int *data = gi + blockDim.x * blockIdx.x * 8; if (idx + blockDim.x * 7 < n) { int a1 = gi[idx]; int a2 = gi[idx + blockDim.x]; int a3 = gi[idx + 2 * blockDim.x]; int a4 = gi[idx + 3 * blockDim.x]; int b1 = gi[idx + 4 * blockDim.x]; int b2 = gi[idx + 5 * blockDim.x]; int b3 = gi[idx + 6 * blockDim.x]; int b4 = gi[idx + 7 * blockDim.x]; gi[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); if (blockDim.x >= 1024 && tidx < 512) data[tidx] += data[tidx + 512]; __syncthreads(); if (blockDim.x >= 512 && tidx < 256) data[tidx] += data[tidx + 256]; __syncthreads(); if (blockDim.x >= 256 && tidx < 128) data[tidx] += data[tidx + 128]; __syncthreads(); if (blockDim.x > 128 && tidx < 64) data[tidx] += data[tidx + 64]; __syncthreads(); if (tidx < 32) { volatile int *vsmem = data; vsmem[tidx] += vsmem[tidx + 32]; vsmem[tidx] += vsmem[tidx + 16]; vsmem[tidx] += vsmem[tidx + 8]; vsmem[tidx] += vsmem[tidx + 4]; vsmem[tidx] += vsmem[tidx + 2]; vsmem[tidx] += vsmem[tidx + 1]; } if (tidx == 0) go[blockIdx.x] = data[0]; } template <unsigned int iBlockSize> __global__ void reudce_complete_unroll(int *gi, int *go, unsigned int n) { unsigned int tid = threadIdx.x; unsigned int idx = blockDim.x*blockIdx.x*8 + tid; int *data = gi + blockIdx.x*blockDim.x*8; if (idx+7*blockDim.x <n) { int a1 = gi[idx]; int a2 = gi[idx + blockDim.x]; int a3 = gi[idx + 2 * blockDim.x]; int a4 = gi[idx + 3 * blockDim.x]; int b1 = gi[idx + 4 * blockDim.x]; int b2 = gi[idx + 5 * blockDim.x]; int b3 = gi[idx + 6 * blockDim.x]; int b4 = gi[idx + 7 * blockDim.x]; gi[idx] = a1 + a2 + a3 + a4 + b1 + b2 + b3 + b4; } __syncthreads(); if (iBlockSize >= 1024 && tid < 512) data[tid] += data[tid + 512]; __syncthreads(); if (iBlockSize >= 512 && tid < 256) data[tid] += data[tid + 256]; __syncthreads(); if (iBlockSize >= 256 && tid < 128) data[tid] += data[tid + 128]; __syncthreads(); if (iBlockSize >= 128 && tid < 64) data[tid] += data[tid + 64]; __syncthreads(); if(tid< 32) { volatile int *vsmem = data; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) go[blockIdx.x] = data[0]; } __global__ void reduceUnrollWarps (int *g_idata, int *g_odata, unsigned int n) { // set thread ID unsigned int tid = threadIdx.x; unsigned int idx = blockIdx.x * blockDim.x * 2 + threadIdx.x; // convert global data pointer to the local pointer of this block int *idata = g_idata + blockIdx.x * blockDim.x * 2; // unrolling 2 if (idx + blockDim.x < n) g_idata[idx] += g_idata[idx + blockDim.x]; __syncthreads(); // in-place reduction in global memory for (int stride = blockDim.x / 2; stride > 32; stride >>= 1) { if (tid < stride) { idata[tid] += idata[tid + stride]; } // synchronize within threadblock __syncthreads(); } // unrolling last warp if (tid < 32) { volatile int *vsmem = idata; vsmem[tid] += vsmem[tid + 32]; vsmem[tid] += vsmem[tid + 16]; vsmem[tid] += vsmem[tid + 8]; vsmem[tid] += vsmem[tid + 4]; vsmem[tid] += vsmem[tid + 2]; vsmem[tid] += vsmem[tid + 1]; } if (tid == 0) g_odata[blockIdx.x] = idata[0]; } int main(int argc, char **argv) { // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("%s starting reduction at ", argv[0]); printf("device %d: %s ", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); bool bResult = false; // initialization int size = 1 << 24; // total number of elements to reduce printf(" with array size %d ", size); // execution configuration int blocksize = 512; // initial block size if(argc > 1) { blocksize = atoi(argv[1]); // block size from command line argument } dim3 block (blocksize, 1); dim3 grid ((size + block.x - 1) / block.x, 1); printf("grid %d block %d\n", grid.x, block.x); // allocate host memory size_t bytes = size * sizeof(int); int *h_idata = (int *) malloc(bytes); int *h_odata = (int *) malloc(grid.x * sizeof(int)); int *tmp = (int *) malloc(bytes); // initialize the array for (int i = 0; i < size; i++) { // mask off high 2 bytes to force max number to 255 h_idata[i] = (int)( rand() & 0xFF ); } memcpy (tmp, h_idata, bytes); double iStart, iElaps; int gpu_sum = 0; // allocate device memory int *d_idata = NULL; int *d_odata = NULL; CHECK(cudaMalloc((void **) &d_idata, bytes)); CHECK(cudaMalloc((void **) &d_odata, grid.x * sizeof(int))); // cpu reduction iStart = seconds(); int cpu_sum = cpu_reduce (tmp, size); iElaps = seconds() - iStart; printf("cpu reduce elapsed %f sec cpu_sum: %d\n", iElaps, cpu_sum); //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// // kernel 1: reduceNeighbored CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); nested_neighbored<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 2: reduceNeighbored with less divergence CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); nested_neighbored_less<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Neighbored2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 3: reduceInterleaved CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduce_leaved<<<grid, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x; i++) gpu_sum += h_odata[i]; printf("gpu Interleaved elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x, block.x); // kernel 4: reduceUnrolling2 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduce_unrolling2<<<grid.x / 2, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 2 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 2; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling2 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 2, block.x); // kernel 5: reduceUnrolling4 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduce_unrolling4<<<grid.x / 4, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 4 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 4; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling4 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 4, block.x); // kernel 6: reduceUnrolling8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduce_unrolling8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Unrolling8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); for (int i = 0; i < grid.x / 16; i++) gpu_sum += h_odata[i]; // kernel 8: reduceUnrollWarps8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduce_unrolling_warps8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu UnrollWarp8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reudce_complete_unrollWarsp8 CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); reduce_complete_unrolling_warps8<<<grid.x / 8, block>>>(d_idata, d_odata, size); CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll8 elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // kernel 9: reudce_complete_unroll CHECK(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); CHECK(cudaDeviceSynchronize()); iStart = seconds(); switch (blocksize) { case 1024: reudce_complete_unroll<1024><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 512: reudce_complete_unroll<512><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 256: reudce_complete_unroll<256><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 128: reudce_complete_unroll<128><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; case 64: reudce_complete_unroll<64><<<grid.x / 8, block>>>(d_idata, d_odata, size); break; } CHECK(cudaDeviceSynchronize()); iElaps = seconds() - iStart; CHECK(cudaMemcpy(h_odata, d_odata, grid.x / 8 * sizeof(int), cudaMemcpyDeviceToHost)); gpu_sum = 0; for (int i = 0; i < grid.x / 8; i++) gpu_sum += h_odata[i]; printf("gpu Cmptnroll elapsed %f sec gpu_sum: %d <<<grid %d block " "%d>>>\n", iElaps, gpu_sum, grid.x / 8, block.x); // free host memory free(h_idata); free(h_odata); // free device memory CHECK(cudaFree(d_idata)); CHECK(cudaFree(d_odata)); // reset device CHECK(cudaDeviceReset()); // check the results bResult = (gpu_sum == cpu_sum); if(!bResult) printf("Test failed!\n"); return EXIT_SUCCESS; }
7aaab49c0503ab61af3d60cb74b3a61545e39866.hip
// !!! This is a file automatically generated by hipify!!! #include <cmath> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <string> #include <iomanip> #include <ctime> #include <sstream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "LatticeBoltzmann.cuh" #include "ImmersedBoundary.cuh" #include "seconds.h" using namespace std; //------------------------------------------PHYSICAL CONSTANTS---------------------------- #define C_S 0.577 //SPEED OF SOUND ON LATTICE #define RHO_0 1. //FLUID DENSITY #define PI 3.14159 //PI //-------------------------------------------PARAMETER SCALING---------------------------- double l_0 = 0.000006; //6 MICRON CILIUM LENGTH double t_0 = 0.067; //67ms BEAT PERIOD AT 15Hz __constant__ double A_mn[7 * 2 * 3] = { -0.449, 0.130, -0.169, 0.063, -0.050, -0.040, -0.068, 2.076, -0.003, 0.054, 0.007, 0.026, 0.022, 0.010, -0.072, -1.502, 0.260, -0.123, 0.011, -0.009, 0.196, -1.074, -0.230, -0.305, -0.180, -0.069, 0.001, -0.080, 0.658, 0.793, -0.251, 0.049, 0.009, 0.023, -0.111, 0.381, 0.331, 0.193, 0.082, 0.029, 0.002, 0.048 }; __constant__ double B_mn[7 * 2 * 3] = { 0.0, -0.030, -0.093, 0.037, 0.062, 0.016, -0.065, 0.0, 0.080, -0.044, -0.017, 0.052, 0.007, 0.051, 0.0, 1.285, -0.036, -0.244, -0.093, -0.137, 0.095, 0.0, -0.298, 0.513, 0.004, -0.222, 0.035, -0.128, 0.0, -1.034, 0.050, 0.143, 0.043, 0.098, -0.054, 0.0, 0.210, -0.367, 0.009, 0.120, -0.024, 0.102 }; __global__ void define_filament(const int T, const int it, const double c_space, const int p_step, const double c_num, double * s, double * lasts, double * b_points) { int n(0), j(0); double arcl(0.); int phase(0.); double b_length(0.); double a_n[2 * 7]; double b_n[2 * 7]; int threadnum = blockDim.x*blockIdx.x + threadIdx.x; int k = threadnum % 10000; int m = (threadnum - k) / 10000; { arcl = 1.*k / 10000; if (it + m*p_step == T) phase = T; else phase = (it + m*p_step) % T; double offset = 1.*(m - (c_num - 1) / 2.)*c_space; for (n = 0; n < 7; n++) { a_n[2 * n + 0] = 0.; b_n[2 * n + 0] = 0.; a_n[2 * n + 0] += A_mn[n + 14 * 0 + 7 * 0] * pow(arcl, 0 + 1); b_n[2 * n + 0] += B_mn[n + 14 * 0 + 7 * 0] * pow(arcl, 0 + 1); a_n[2 * n + 0] += A_mn[n + 14 * 1 + 7 * 0] * pow(arcl, 1 + 1); b_n[2 * n + 0] += B_mn[n + 14 * 1 + 7 * 0] * pow(arcl, 1 + 1); a_n[2 * n + 0] += A_mn[n + 14 * 2 + 7 * 0] * pow(arcl, 2 + 1); b_n[2 * n + 0] += B_mn[n + 14 * 2 + 7 * 0] * pow(arcl, 2 + 1); a_n[2 * n + 1] = 0.; b_n[2 * n + 1] = 0.; a_n[2 * n + 1] += A_mn[n + 14 * 0 + 7 * 1] * pow(arcl, 0 + 1); b_n[2 * n + 1] += B_mn[n + 14 * 0 + 7 * 1] * pow(arcl, 0 + 1); a_n[2 * n + 1] += A_mn[n + 14 * 1 + 7 * 1] * pow(arcl, 1 + 1); b_n[2 * n + 1] += B_mn[n + 14 * 1 + 7 * 1] * pow(arcl, 1 + 1); a_n[2 * n + 1] += A_mn[n + 14 * 2 + 7 * 1] * pow(arcl, 2 + 1); b_n[2 * n + 1] += B_mn[n + 14 * 2 + 7 * 1] * pow(arcl, 2 + 1); } s[5 * (k + m * 10000) + 0] = 1. * 115 * a_n[2 * 0 + 0] * 0.5 + offset; s[5 * (k + m * 10000) + 1] = 1. * 115 * a_n[2 * 0 + 1] * 0.5; s[5 * (k + m * 10000) + 2] = 115 * arcl; for (n = 1; n < 7; n++) { s[5 * (k + m * 10000) + 0] += 1. * 115 * (a_n[2 * n + 0] * cos(n*2.*PI*phase / T) + b_n[2 * n + 0] * sin(n*2.*PI*phase / T)); s[5 * (k + m * 10000) + 1] += 1. * 115 * (a_n[2 * n + 1] * cos(n*2.*PI*phase / T) + b_n[2 * n + 1] * sin(n*2.*PI*phase / T)); } if (it > 0) { s[5 * (k + m * 10000) + 3] = s[5 * (k + m * 10000) + 0] - lasts[2 * (k + m * 10000) + 0]; s[5 * (k + m * 10000) + 4] = s[5 * (k + m * 10000) + 1] - lasts[2 * (k + m * 10000) + 1]; } lasts[2 * (k + m * 10000) + 0] = s[5 * (k + m * 10000) + 0]; lasts[2 * (k + m * 10000) + 1] = s[5 * (k + m * 10000) + 1]; } for (j = m*100 ; j < (m + 1)*100; j++) { b_length = j%100; if (abs(s[5 * (k + m * 10000) + 2] - b_length) < 0.01) { b_points[5 * j + 0] = s[5 * (k + m * 10000) + 0]; b_points[5 * j + 1] = s[5 * (k + m * 10000) + 1]; b_points[5 * j + 2] = s[5 * (k + m * 10000) + 3]; b_points[5 * j + 3] = s[5 * (k + m * 10000) + 4]; } } } void boundary_check(const int m, const double c_space, const int c_num, const int L, const double * s, int * epsilon) { int r(0), k(0), l(0); int b_cross = 0; int lowest = 0; bool xclose = 0; bool yclose = 0; int r_max = 2 * L / c_space; double x_m(0.), y_m(0.), x_l(0.), y_l(0.); for (r = 1; r <= r_max; r++) { b_cross = 2 * L - r*c_space; if (b_cross > L) lowest = 0; else lowest = L - b_cross; for (k = lowest; k < L; k++) { x_m = s[2 * (k + m * 100) + 0]; y_m = s[2 * (k + m * 100) + 1]; for (l = lowest; l < L; l++) { xclose = 0; yclose = 0; if (m-r < 0) { x_l = s[2 * (l + (m - r + c_num) * 100) + 0]; y_l = s[2 * (l + (m - r + c_num) * 100) + 1]; } else { x_l = s[2 * (l + (m - r) * 100) + 0]; y_l = s[2 * (l + (m - r) * 100) + 1]; } if (abs(x_l - x_m) < 1) xclose = 1; if (abs(y_l - y_m) < 1) yclose = 1; if (xclose && yclose) epsilon[(k + m * 100)] = 0; } } } } double free_space(const int XDIM, const int c_num, const int L, const double * b_points, const int level) { int cilium_p = 0; int cilium_m = 0; double space(0.); cilium_p = (0 + level) * L; cilium_m = (c_num - level) * L; for (int i = 0; i < L; i++) { space += 1.*(b_points[5*(cilium_p + i) + 0] - (b_points[5*(cilium_m + i) + 0] - XDIM))*(i*1./ L) / L / (b_points[5 * (cilium_p) + 0] - (b_points[5 * (cilium_m) + 0] - XDIM)); } return space; } int main(int argc, char * argv[]) { //----------------------------INITIALISING---------------------------- unsigned int c_fraction = 1; unsigned int c_num = 6; double Re = 1.0; unsigned int XDIM = 300; unsigned int YDIM = 200; unsigned int T = 100000; unsigned int ITERATIONS = T; unsigned int INTERVAL = 100; unsigned int LENGTH = 100; unsigned int c_space = 50; bool ShARC = 0; bool BigData = 0; if (argc < 10) { cout << "Too few arguments! " << argc - 1 << " entered of 9 required. " << endl; return 1; } stringstream arg; arg << argv[1] << ' ' << argv[2] << ' ' << argv[3] << ' ' << argv[4] << ' ' << argv[5] << ' ' << argv[6] << ' ' << argv[7] << ' ' << argv[8] << ' ' << argv[9]; arg >> c_fraction >> c_num >> c_space >> Re >> T >> ITERATIONS >> INTERVAL >> ShARC >> BigData ; XDIM = c_num*c_space; if (XDIM <= 2 * LENGTH) { cout << "not enough cilia in simulation! cilia spacing of " << c_space << "requires atleast " << 2 * LENGTH / c_space << " cilia" << endl; return 1; } const double centre[2] = { XDIM / 2., 0. }; double dx = 1. / LENGTH; double dt = 1. / (T); double SPEED = 0.8*1000/T; double t_scale = 1000.*dt*t_0; //milliseconds double x_scale = 1000000. * dx*l_0; //microns double s_scale = 1000.*x_scale / t_scale; //millimetres per second const double TAU = (SPEED*LENGTH) / (Re*C_S*C_S) + 1. / 2.; const double TAU2 = 1. / (12.*(TAU - (1. / 2.))) + (1. / 2.); time_t rawtime; struct tm * timeinfo; time(&rawtime); timeinfo = localtime(&rawtime); cout << asctime(timeinfo) << endl; cout << "Initialising...\n"; unsigned int i(0), j(0), k(0), m(0); unsigned int it(0); //int phase(0); int p_step = T * c_fraction / c_num; //double offset = 0.; double * lasts; lasts = new double[2 * c_num * 10000]; double * boundary; boundary = new double[5 * c_num * 10000]; int Np = 100 * c_num; double * b_points; b_points = new double[5 * Np]; const int size = XDIM*YDIM; for (k = 0; k < c_num*10000; k++) { boundary[5 * k + 0] = 0.; boundary[5 * k + 1] = 0.; boundary[5 * k + 2] = 0.; boundary[5 * k + 3] = 0.; boundary[5 * k + 4] = 0.; lasts[2 * k + 0] = 0.; lasts[2 * k + 1] = 0.; } //-------------------------------CUDA PARAMETERS DEFINITION----------------------- int blocksize = 500; int gridsize = size / blocksize; int blocksize2 = c_num*LENGTH; int gridsize2 = 1; if (blocksize2 > 1000) { for (blocksize2 = 1000; blocksize2 > 0; blocksize2 -= LENGTH) { if ((c_num*LENGTH) % blocksize2 == 0) { gridsize2 = (c_num*LENGTH) / blocksize2; break; } } } int blocksize3 = 500; int gridsize3 = 20 * c_num; hipError_t cudaStatus; double Q = 0.; double W = 0.; double f_space_1 = 0.; double f_space_2 = 0.; bool done = 0; if(ShARC) cudaStatus = hipSetDevice(3); else cudaStatus = hipSetDevice(0); if (cudaStatus != hipSuccess) { fprintf(stderr, "Failed to set CUDA device.\n"); } //------------------------------------------ERROR------------------------------------------------ //double l_error = (l_0*dx)*(l_0*dx); //double t_error = (t_0*dt)*(t_0*dt); //double c_error = (t_0*dt)*(t_0*dt) / ((l_0*dx)*(l_0*dx)); double Ma = 1.*SPEED / C_S; time_t p_runtime; //-------------------------------------------ASSIGN CELL VALUES ON HEAP----------------------------- double * u; //VELOCITY VECTOR u = new double[2 * size]; double * rho; //DENSITY rho = new double[size]; double * f0; //EQUILIBRIUM DISTRIBUTION FUNCTION f0 = new double[9 * size]; double * f; //DISTRIBUTION FUNCTION f = new double[9 * size]; double * f1; //POST COLLISION DISTRIBUTION FUNCTION f1 = new double[9 * size]; double * force; //MACROSCOPIC BODY FORCE VECTOR force = new double[2 * size]; double * F; //LATTICE BOLTZMANN FORCE F = new double[9 * size]; unsigned int Ns = LENGTH * c_num; //NUMBER OF BOUNDARY POINTS double * s; //BOUNDARY POINTS double * u_s; //BOUNDARY POINT VELOCITY double * F_s; //BOUNDARY FORCE int * epsilon; s = new double[2 * Ns]; u_s = new double[2 * Ns]; F_s = new double[2 * Ns]; epsilon = new int[Ns]; for (k = 0; k < Ns; k++) { epsilon[k] = 1; } //----------------------------------------CREATE DEVICE VARIABLES----------------------------- double * d_u; //VELOCITY VECTOR double * d_rho; //DENSITY double * d_f0; //EQUILIBRIUM DISTRIBUTION FUNCTION double * d_f; //DISTRIBUTION FUNCTION double * d_f1; //POST COLLISION DISTRIBUTION FUNCTION double * d_centre; double * d_force; double * d_F; double * d_F_s; double * d_s; double * d_u_s; int * d_epsilon; double * d_Q; double * d_lasts; double * d_boundary; double * d_b_points; //---------------------------CUDA MALLOC------------------------------------------------------------- { cudaStatus = hipMalloc((void**)&d_u, 2 * size * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMalloc((void**)&d_rho, size * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMalloc((void**)&d_f0, 9 * size * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMalloc((void**)&d_f, 9 * size * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMalloc((void**)&d_f1, 9 * size * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed,"); } cudaStatus = hipMalloc((void**)&d_centre, 2 * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMalloc((void**)&d_force, 2 * size * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMalloc((void**)&d_F, 9 * size * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } cudaStatus = hipMalloc((void**)&d_Q, sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc failed!"); } } { cudaStatus = hipMalloc((void**)&d_F_s, 2 * Ns * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc of F_s failed!\n"); } cudaStatus = hipMalloc((void**)&d_s, 2 * Ns * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc of s failed!\n"); } cudaStatus = hipMalloc((void**)&d_u_s, 2 * Ns * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc of u_s failed!\n"); } cudaStatus = hipMalloc((void**)&d_epsilon, Ns * sizeof(int)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc of epsilon failed!\n"); } cudaStatus = hipMalloc((void**)&d_lasts, 2 * c_num * 10000 * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc of u_s failed!\n"); } cudaStatus = hipMalloc((void**)&d_boundary, 5 * c_num * 10000 * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc of u_s failed!\n"); } cudaStatus = hipMalloc((void**)&d_b_points, 5 * Np * sizeof(double)); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMalloc of u_s failed!\n"); } } //----------------------------------------DEFINE DIRECTORIES---------------------------------- string output_data = "Data/Test/"; if(ShARC) output_data = "/shared/soft_matter_physics2/User/Phq16ja/ShARC_Data/"; else output_data = "C:/Users/phq16ja/Documents/Data/"; //output_data = "//uosfstore.shef.ac.uk/shared/soft_matter_physics2/User/Phq16ja/Local_Data/"; string raw_data = output_data + "Raw/"; raw_data += to_string(c_num); raw_data += "/"; string cilia_data = output_data + "Cilia/"; cilia_data += to_string(c_num); cilia_data += "/"; string img_data = output_data + "Img/"; img_data += to_string(c_num); img_data += "/"; string outfile = cilia_data; //----------------------------------------BOUNDARY INITIALISATION------------------------------------------------ string flux = output_data + "/Flux/" + to_string(c_fraction) + "_" + to_string(c_num) + "_" + to_string(c_space) + "-flux.dat"; string fspace = output_data + "/Flux/free_space.dat"; string parameters = raw_data + "/SimLog.txt"; string input = "Data/cilium/"; input += to_string(c_num); input += "/"; ofstream fsA(input.c_str()); ofstream fsB(flux.c_str()); ofstream fsC(parameters.c_str()); ofstream fsD; fsB.open(flux.c_str(), ofstream::trunc); fsB.close(); fsC.open(parameters.c_str(), ofstream::trunc); fsC.close(); //----------------------------------------INITIALISE ALL CELL VALUES--------------------------------------- for (j = 0; j < XDIM*YDIM; j++) { rho[j] = RHO_0; u[2 * j + 0] = 0.0; u[2 * j + 1] = 0.0; force[2 * j + 0] = 0.; force[2 * j + 1] = 0.; for (i = 0; i < 9; i++) { f0[9 * j + i] = 0.; f[9 * j + i] = 0.; f1[9 * j + i] = 0.; F[9 * j + i] = 0.; } } //------------------------------------------------------COPY INITIAL VALUES TO DEVICE----------------------------------------------------------- //CUDA MEMORY COPIES { cudaStatus = hipMemcpy(d_u, u, 2 * size * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy(d_rho, rho, size * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy(d_f0, f0, 9 * size * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy(d_f, f, 9 * size * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy(d_f1, f1, 9 * size * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy(d_centre, centre, 2 * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy(d_force, force, 2 * size * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy(d_F, F, 9 * size * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy(d_lasts, lasts, 2 * c_num * 10000 * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of lasts failed!"); } cudaStatus = hipMemcpy(d_boundary, boundary, 5 * c_num * 10000 * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of boundary failed!"); } cudaStatus = hipMemcpy(d_Q, &Q, sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } } //------------------------------------------------------SET INITIAL DISTRIBUTION TO EQUILIBRIUM------------------------------------------------- equilibrium << <gridsize, blocksize >> > (d_u, d_rho, d_f0, d_force, d_F, XDIM, YDIM, TAU); //INITIAL EQUILIBRIUM SET { // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "first equilibrium launch failed: %s\n", hipGetErrorString(cudaStatus)); } cudaStatus = hipMemcpy(f0, d_f0, 9 * size * sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } cudaStatus = hipMemcpy(F, d_F, 9 * size * sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy failed!"); } } for (j = 0; j < XDIM*YDIM; j++) { for (i = 0; i < 9; i++) { f[9 * j + i] = f0[9 * j + i]; } } cudaStatus = hipMemcpy(d_f, f, 9 * size * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of f failed!\n"); } //-----------------------------------------------------OUTPUT PARAMETERS------------------------------------------------------------------------ fsC.open(parameters.c_str(), ofstream::trunc); fsC.close(); fsC.open(parameters.c_str(), ofstream::app); fsC << asctime(timeinfo) << endl; fsC << "Size: " << XDIM << "x" << YDIM << endl; fsC << "Iterations: " << ITERATIONS << endl; fsC << "Reynolds Number: " << Re << endl; fsC << "Relaxation times: " << TAU << ", " << TAU2 << endl; //if (TAU <= 0.6) fsC << "POSSIBLE INSTABILITY! Relaxation time: " << TAU << endl; //if (TAU >= 2.01) fsC << "POSSIBLE INACCURACY! Relaxation time: " << TAU << endl; fsC << "Spatial step: " << dx*l_0 << "m" << endl; fsC << "Time step: " << dt*t_0 << "s" << endl; fsC << "Mach number: " << Ma << endl; //fsC << "Spatial discretisation error: " << l_error << endl; //fsC << "Time discretisation error: " << t_error << endl; //fsC << "Compressibility error: " << c_error << endl; fsC << "Phase Step: " << c_fraction << "/" << c_num << endl; //fsC << "\nThreads per block: " << blocksize << endl; //fsC << "Blocks: " << gridsize << endl; if (BigData) fsC << "\nBig Data is ON" << endl; else fsC << "\nBig Data is OFF" << endl; if (ShARC) fsC << "Running on ShARC" << endl; else fsC << "Running on local GPU" << endl; //--------------------------ITERATION LOOP----------------------------- cout << "Running Simulation...\n"; time_t start = seconds(); for (it = 0; it < ITERATIONS; it++) { //--------------------------CILIA BEAT DEFINITION------------------------- define_filament << <gridsize3, blocksize3 >> > (T, it, c_space, p_step, c_num, d_boundary, d_lasts, d_b_points); cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "define_filament failed: %s\n", hipGetErrorString(cudaStatus)); } cudaStatus = hipMemcpy(b_points, d_b_points, 5 * Np * sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of b_points failed!\n"); } f_space_1 = free_space(XDIM, c_num, LENGTH, b_points, 1); f_space_2 = free_space(XDIM, c_num, LENGTH, b_points, 2); if (1.*it / ITERATIONS > 0.166 && !done) { fsD.open(fspace.c_str(), ofstream::app); fsD << c_fraction *1./ c_num << "\t" << f_space_1 << "\t" << f_space_2 << endl; fsD.close(); done = 1; } for (j = 0; j < c_num*LENGTH; j++) { k = j; s[2 * k + 0] = (c_space*c_num) / 2. + b_points[5 * j + 0]; if (s[2 * k + 0] < 0) s[2 * k + 0] += XDIM; else if (s[2 * k + 0] > XDIM) s[2 * k + 0] -= XDIM; s[2 * k + 1] = b_points[5 * j + 1] + 1; if (it == 0) { u_s[2 * k + 0] = 0.; u_s[2 * k + 1] = 0.; } else { u_s[2 * k + 0] = b_points[5 * j + 2]; u_s[2 * k + 1] = b_points[5 * j + 3]; } epsilon[k] = 1; } for (m = 0; m < c_num; m++) { boundary_check(m, c_space, c_num, LENGTH, s, epsilon); } //---------------------------CILIUM COPY---------------------------------------- { cudaStatus = hipMemcpy(d_epsilon, epsilon, Ns * sizeof(int), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of epsilon failed!\n"); } cudaStatus = hipMemcpy(d_s, s, 2 * Ns * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of s failed!\n"); } cudaStatus = hipMemcpy(d_u_s, u_s, 2 * Ns * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of u_s failed!\n"); } cudaStatus = hipMemcpy(d_F_s, F_s, 2 * Ns * sizeof(double), hipMemcpyHostToDevice); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of F_s failed!\n"); } } //---------------------------IMMERSED BOUNDARY LATTICE BOLTZMANN STEPS------------------- equilibrium << <gridsize, blocksize >> > (d_u, d_rho, d_f0, d_force, d_F, XDIM, YDIM, TAU); //EQUILIBRIUM STEP { // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "equilibrium launch failed: %s\n", hipGetErrorString(cudaStatus)); } } collision << <gridsize, blocksize >> > (d_f0, d_f, d_f1, d_F, TAU, TAU2, XDIM, YDIM, it); //COLLISION STEP { // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "collision launch failed: %s\n", hipGetErrorString(cudaStatus)); } } streaming << <gridsize, blocksize >> > (d_f1, d_f, XDIM, YDIM); //STREAMING STEP { // Check for any errors launching the kernel cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "collision launch failed: %s\n", hipGetErrorString(cudaStatus)); } } macro << <gridsize, blocksize >> > (d_f, d_u, d_rho, XDIM, YDIM); //MACRO STEP { cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "collision launch failed: %s\n", hipGetErrorString(cudaStatus)); } } interpolate << <gridsize2, blocksize2 >> > (d_rho, d_u, Ns, d_u_s, d_F_s, d_s, XDIM); //IB INTERPOLATION STEP { cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "interpolate launch failed: %s\n", hipGetErrorString(cudaStatus)); } } spread << <gridsize, blocksize >> > (d_rho, d_u, d_f, Ns, d_u_s, d_F_s, d_force, d_s, XDIM, d_Q, d_epsilon); //IB SPREADING STEP { cudaStatus = hipGetLastError(); if (cudaStatus != hipSuccess) { fprintf(stderr, "spread launch failed: %s\n", hipGetErrorString(cudaStatus)); //cout << it << endl; //system("pause"); return 1; } cudaStatus = hipMemcpy(rho, d_rho, size * sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of rho failed!\n"); } cudaStatus = hipMemcpy(u, d_u, 2 * size * sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of u failed!\n"); } cudaStatus = hipMemcpy(&Q, d_Q, sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of u failed!\n"); } cudaStatus = hipMemcpy(F_s, d_F_s, 2 * Ns * sizeof(double), hipMemcpyDeviceToHost); if (cudaStatus != hipSuccess) { fprintf(stderr, "hipMemcpy of rho failed!\n"); } } //----------------------------DATA OUTPUT------------------------------ for (j = 0; j < c_num*LENGTH; j++) { W += abs(F_s[2 * j + 0]) * u_s[2 * j + 0]/c_num/LENGTH; //W += u_s[2 * j + 0]* u_s[2 * j + 0]*(u_s[2 * j + 0]/abs(u_s[2 * j + 0])); } if (it % INTERVAL == 0) { if (BigData) { outfile = raw_data + to_string(it) + "-fluid.dat"; fsA.open(outfile.c_str()); for (j = 0; j < XDIM*YDIM; j++) { int x = j%XDIM; int y = (j - j%XDIM) / XDIM; double ab = sqrt(u[2 * j + 0] * u[2 * j + 0] + u[2 * j + 1] * u[2 * j + 1]); fsA << x*x_scale << "\t" << y*x_scale << "\t" << u[2 * j + 0]*s_scale << "\t" << u[2 * j + 1]*s_scale << "\t" << ab*s_scale << "\t" << rho[j] << endl; if (x == XDIM - 1) fsA << endl; } fsA.close(); outfile = cilia_data + to_string(it) + "-cilia.dat"; fsA.open(outfile.c_str()); for (k = 0; k < Ns; k++) { fsA << s[2 * k + 0]*x_scale << "\t" << s[2 * k + 1]*x_scale << "\t" << u_s[2 * k + 0]*s_scale << "\t" << u_s[2 * k + 1]*s_scale << "\t" << epsilon[k] << "\n"; //LOOP FOR Np if (k % 100 == 99 || s[2 * k + 0] > XDIM - 1 || s[2 * k + 0] < 1) fsA << "\n"; } fsA.close(); } fsB.open(flux.c_str(), ofstream::app); fsB << it*t_scale << "\t" << Q * x_scale << "\t" << f_space_1 << "\t" << f_space_2 << "\t" << endl; fsB.close(); } if (it == INTERVAL) { time_t cycle = seconds(); p_runtime = (cycle - start)*(ITERATIONS / INTERVAL); time_t p_end = rawtime + p_runtime; timeinfo = localtime(&p_end); cout << "\nCompletion time: " << asctime(timeinfo) << endl; fsC << "\nCompletion time: " << asctime(timeinfo) << endl; fsC.close(); } } fsB.open(flux.c_str(), ofstream::app); fsB << it*t_scale << "\t" << Q * x_scale << "\t" << f_space_1 << "\t" << f_space_2 << "\t" << endl; fsB.close(); double end = seconds(); double runtime = end - start; int hours(0), mins(0); double secs(0.); if (runtime > 3600) hours = nearbyint(runtime / 3600 - 0.5); if (runtime > 60) mins = nearbyint((runtime - hours * 3600) / 60 - 0.5); secs = runtime - hours * 3600 - mins * 60; fsC.open(parameters.c_str(), ofstream::app); fsC << "Total runtime: "; if (hours < 10) fsC << 0; fsC << hours << ":"; if (mins < 10) fsC << 0; fsC << mins << ":"; if (secs < 10) fsC << 0; fsC << secs << endl; fsC.close(); hipDeviceReset(); return 0; }
7aaab49c0503ab61af3d60cb74b3a61545e39866.cu
#include <cmath> #include <cstdlib> #include <cstdio> #include <iostream> #include <fstream> #include <string> #include <iomanip> #include <ctime> #include <sstream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "LatticeBoltzmann.cuh" #include "ImmersedBoundary.cuh" #include "seconds.h" using namespace std; //------------------------------------------PHYSICAL CONSTANTS---------------------------- #define C_S 0.577 //SPEED OF SOUND ON LATTICE #define RHO_0 1. //FLUID DENSITY #define PI 3.14159 //PI //-------------------------------------------PARAMETER SCALING---------------------------- double l_0 = 0.000006; //6 MICRON CILIUM LENGTH double t_0 = 0.067; //67ms BEAT PERIOD AT 15Hz __constant__ double A_mn[7 * 2 * 3] = { -0.449, 0.130, -0.169, 0.063, -0.050, -0.040, -0.068, 2.076, -0.003, 0.054, 0.007, 0.026, 0.022, 0.010, -0.072, -1.502, 0.260, -0.123, 0.011, -0.009, 0.196, -1.074, -0.230, -0.305, -0.180, -0.069, 0.001, -0.080, 0.658, 0.793, -0.251, 0.049, 0.009, 0.023, -0.111, 0.381, 0.331, 0.193, 0.082, 0.029, 0.002, 0.048 }; __constant__ double B_mn[7 * 2 * 3] = { 0.0, -0.030, -0.093, 0.037, 0.062, 0.016, -0.065, 0.0, 0.080, -0.044, -0.017, 0.052, 0.007, 0.051, 0.0, 1.285, -0.036, -0.244, -0.093, -0.137, 0.095, 0.0, -0.298, 0.513, 0.004, -0.222, 0.035, -0.128, 0.0, -1.034, 0.050, 0.143, 0.043, 0.098, -0.054, 0.0, 0.210, -0.367, 0.009, 0.120, -0.024, 0.102 }; __global__ void define_filament(const int T, const int it, const double c_space, const int p_step, const double c_num, double * s, double * lasts, double * b_points) { int n(0), j(0); double arcl(0.); int phase(0.); double b_length(0.); double a_n[2 * 7]; double b_n[2 * 7]; int threadnum = blockDim.x*blockIdx.x + threadIdx.x; int k = threadnum % 10000; int m = (threadnum - k) / 10000; { arcl = 1.*k / 10000; if (it + m*p_step == T) phase = T; else phase = (it + m*p_step) % T; double offset = 1.*(m - (c_num - 1) / 2.)*c_space; for (n = 0; n < 7; n++) { a_n[2 * n + 0] = 0.; b_n[2 * n + 0] = 0.; a_n[2 * n + 0] += A_mn[n + 14 * 0 + 7 * 0] * pow(arcl, 0 + 1); b_n[2 * n + 0] += B_mn[n + 14 * 0 + 7 * 0] * pow(arcl, 0 + 1); a_n[2 * n + 0] += A_mn[n + 14 * 1 + 7 * 0] * pow(arcl, 1 + 1); b_n[2 * n + 0] += B_mn[n + 14 * 1 + 7 * 0] * pow(arcl, 1 + 1); a_n[2 * n + 0] += A_mn[n + 14 * 2 + 7 * 0] * pow(arcl, 2 + 1); b_n[2 * n + 0] += B_mn[n + 14 * 2 + 7 * 0] * pow(arcl, 2 + 1); a_n[2 * n + 1] = 0.; b_n[2 * n + 1] = 0.; a_n[2 * n + 1] += A_mn[n + 14 * 0 + 7 * 1] * pow(arcl, 0 + 1); b_n[2 * n + 1] += B_mn[n + 14 * 0 + 7 * 1] * pow(arcl, 0 + 1); a_n[2 * n + 1] += A_mn[n + 14 * 1 + 7 * 1] * pow(arcl, 1 + 1); b_n[2 * n + 1] += B_mn[n + 14 * 1 + 7 * 1] * pow(arcl, 1 + 1); a_n[2 * n + 1] += A_mn[n + 14 * 2 + 7 * 1] * pow(arcl, 2 + 1); b_n[2 * n + 1] += B_mn[n + 14 * 2 + 7 * 1] * pow(arcl, 2 + 1); } s[5 * (k + m * 10000) + 0] = 1. * 115 * a_n[2 * 0 + 0] * 0.5 + offset; s[5 * (k + m * 10000) + 1] = 1. * 115 * a_n[2 * 0 + 1] * 0.5; s[5 * (k + m * 10000) + 2] = 115 * arcl; for (n = 1; n < 7; n++) { s[5 * (k + m * 10000) + 0] += 1. * 115 * (a_n[2 * n + 0] * cos(n*2.*PI*phase / T) + b_n[2 * n + 0] * sin(n*2.*PI*phase / T)); s[5 * (k + m * 10000) + 1] += 1. * 115 * (a_n[2 * n + 1] * cos(n*2.*PI*phase / T) + b_n[2 * n + 1] * sin(n*2.*PI*phase / T)); } if (it > 0) { s[5 * (k + m * 10000) + 3] = s[5 * (k + m * 10000) + 0] - lasts[2 * (k + m * 10000) + 0]; s[5 * (k + m * 10000) + 4] = s[5 * (k + m * 10000) + 1] - lasts[2 * (k + m * 10000) + 1]; } lasts[2 * (k + m * 10000) + 0] = s[5 * (k + m * 10000) + 0]; lasts[2 * (k + m * 10000) + 1] = s[5 * (k + m * 10000) + 1]; } for (j = m*100 ; j < (m + 1)*100; j++) { b_length = j%100; if (abs(s[5 * (k + m * 10000) + 2] - b_length) < 0.01) { b_points[5 * j + 0] = s[5 * (k + m * 10000) + 0]; b_points[5 * j + 1] = s[5 * (k + m * 10000) + 1]; b_points[5 * j + 2] = s[5 * (k + m * 10000) + 3]; b_points[5 * j + 3] = s[5 * (k + m * 10000) + 4]; } } } void boundary_check(const int m, const double c_space, const int c_num, const int L, const double * s, int * epsilon) { int r(0), k(0), l(0); int b_cross = 0; int lowest = 0; bool xclose = 0; bool yclose = 0; int r_max = 2 * L / c_space; double x_m(0.), y_m(0.), x_l(0.), y_l(0.); for (r = 1; r <= r_max; r++) { b_cross = 2 * L - r*c_space; if (b_cross > L) lowest = 0; else lowest = L - b_cross; for (k = lowest; k < L; k++) { x_m = s[2 * (k + m * 100) + 0]; y_m = s[2 * (k + m * 100) + 1]; for (l = lowest; l < L; l++) { xclose = 0; yclose = 0; if (m-r < 0) { x_l = s[2 * (l + (m - r + c_num) * 100) + 0]; y_l = s[2 * (l + (m - r + c_num) * 100) + 1]; } else { x_l = s[2 * (l + (m - r) * 100) + 0]; y_l = s[2 * (l + (m - r) * 100) + 1]; } if (abs(x_l - x_m) < 1) xclose = 1; if (abs(y_l - y_m) < 1) yclose = 1; if (xclose && yclose) epsilon[(k + m * 100)] = 0; } } } } double free_space(const int XDIM, const int c_num, const int L, const double * b_points, const int level) { int cilium_p = 0; int cilium_m = 0; double space(0.); cilium_p = (0 + level) * L; cilium_m = (c_num - level) * L; for (int i = 0; i < L; i++) { space += 1.*(b_points[5*(cilium_p + i) + 0] - (b_points[5*(cilium_m + i) + 0] - XDIM))*(i*1./ L) / L / (b_points[5 * (cilium_p) + 0] - (b_points[5 * (cilium_m) + 0] - XDIM)); } return space; } int main(int argc, char * argv[]) { //----------------------------INITIALISING---------------------------- unsigned int c_fraction = 1; unsigned int c_num = 6; double Re = 1.0; unsigned int XDIM = 300; unsigned int YDIM = 200; unsigned int T = 100000; unsigned int ITERATIONS = T; unsigned int INTERVAL = 100; unsigned int LENGTH = 100; unsigned int c_space = 50; bool ShARC = 0; bool BigData = 0; if (argc < 10) { cout << "Too few arguments! " << argc - 1 << " entered of 9 required. " << endl; return 1; } stringstream arg; arg << argv[1] << ' ' << argv[2] << ' ' << argv[3] << ' ' << argv[4] << ' ' << argv[5] << ' ' << argv[6] << ' ' << argv[7] << ' ' << argv[8] << ' ' << argv[9]; arg >> c_fraction >> c_num >> c_space >> Re >> T >> ITERATIONS >> INTERVAL >> ShARC >> BigData ; XDIM = c_num*c_space; if (XDIM <= 2 * LENGTH) { cout << "not enough cilia in simulation! cilia spacing of " << c_space << "requires atleast " << 2 * LENGTH / c_space << " cilia" << endl; return 1; } const double centre[2] = { XDIM / 2., 0. }; double dx = 1. / LENGTH; double dt = 1. / (T); double SPEED = 0.8*1000/T; double t_scale = 1000.*dt*t_0; //milliseconds double x_scale = 1000000. * dx*l_0; //microns double s_scale = 1000.*x_scale / t_scale; //millimetres per second const double TAU = (SPEED*LENGTH) / (Re*C_S*C_S) + 1. / 2.; const double TAU2 = 1. / (12.*(TAU - (1. / 2.))) + (1. / 2.); time_t rawtime; struct tm * timeinfo; time(&rawtime); timeinfo = localtime(&rawtime); cout << asctime(timeinfo) << endl; cout << "Initialising...\n"; unsigned int i(0), j(0), k(0), m(0); unsigned int it(0); //int phase(0); int p_step = T * c_fraction / c_num; //double offset = 0.; double * lasts; lasts = new double[2 * c_num * 10000]; double * boundary; boundary = new double[5 * c_num * 10000]; int Np = 100 * c_num; double * b_points; b_points = new double[5 * Np]; const int size = XDIM*YDIM; for (k = 0; k < c_num*10000; k++) { boundary[5 * k + 0] = 0.; boundary[5 * k + 1] = 0.; boundary[5 * k + 2] = 0.; boundary[5 * k + 3] = 0.; boundary[5 * k + 4] = 0.; lasts[2 * k + 0] = 0.; lasts[2 * k + 1] = 0.; } //-------------------------------CUDA PARAMETERS DEFINITION----------------------- int blocksize = 500; int gridsize = size / blocksize; int blocksize2 = c_num*LENGTH; int gridsize2 = 1; if (blocksize2 > 1000) { for (blocksize2 = 1000; blocksize2 > 0; blocksize2 -= LENGTH) { if ((c_num*LENGTH) % blocksize2 == 0) { gridsize2 = (c_num*LENGTH) / blocksize2; break; } } } int blocksize3 = 500; int gridsize3 = 20 * c_num; cudaError_t cudaStatus; double Q = 0.; double W = 0.; double f_space_1 = 0.; double f_space_2 = 0.; bool done = 0; if(ShARC) cudaStatus = cudaSetDevice(3); else cudaStatus = cudaSetDevice(0); if (cudaStatus != cudaSuccess) { fprintf(stderr, "Failed to set CUDA device.\n"); } //------------------------------------------ERROR------------------------------------------------ //double l_error = (l_0*dx)*(l_0*dx); //double t_error = (t_0*dt)*(t_0*dt); //double c_error = (t_0*dt)*(t_0*dt) / ((l_0*dx)*(l_0*dx)); double Ma = 1.*SPEED / C_S; time_t p_runtime; //-------------------------------------------ASSIGN CELL VALUES ON HEAP----------------------------- double * u; //VELOCITY VECTOR u = new double[2 * size]; double * rho; //DENSITY rho = new double[size]; double * f0; //EQUILIBRIUM DISTRIBUTION FUNCTION f0 = new double[9 * size]; double * f; //DISTRIBUTION FUNCTION f = new double[9 * size]; double * f1; //POST COLLISION DISTRIBUTION FUNCTION f1 = new double[9 * size]; double * force; //MACROSCOPIC BODY FORCE VECTOR force = new double[2 * size]; double * F; //LATTICE BOLTZMANN FORCE F = new double[9 * size]; unsigned int Ns = LENGTH * c_num; //NUMBER OF BOUNDARY POINTS double * s; //BOUNDARY POINTS double * u_s; //BOUNDARY POINT VELOCITY double * F_s; //BOUNDARY FORCE int * epsilon; s = new double[2 * Ns]; u_s = new double[2 * Ns]; F_s = new double[2 * Ns]; epsilon = new int[Ns]; for (k = 0; k < Ns; k++) { epsilon[k] = 1; } //----------------------------------------CREATE DEVICE VARIABLES----------------------------- double * d_u; //VELOCITY VECTOR double * d_rho; //DENSITY double * d_f0; //EQUILIBRIUM DISTRIBUTION FUNCTION double * d_f; //DISTRIBUTION FUNCTION double * d_f1; //POST COLLISION DISTRIBUTION FUNCTION double * d_centre; double * d_force; double * d_F; double * d_F_s; double * d_s; double * d_u_s; int * d_epsilon; double * d_Q; double * d_lasts; double * d_boundary; double * d_b_points; //---------------------------CUDA MALLOC------------------------------------------------------------- { cudaStatus = cudaMalloc((void**)&d_u, 2 * size * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&d_rho, size * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&d_f0, 9 * size * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&d_f, 9 * size * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&d_f1, 9 * size * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed,"); } cudaStatus = cudaMalloc((void**)&d_centre, 2 * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&d_force, 2 * size * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&d_F, 9 * size * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } cudaStatus = cudaMalloc((void**)&d_Q, sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc failed!"); } } { cudaStatus = cudaMalloc((void**)&d_F_s, 2 * Ns * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc of F_s failed!\n"); } cudaStatus = cudaMalloc((void**)&d_s, 2 * Ns * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc of s failed!\n"); } cudaStatus = cudaMalloc((void**)&d_u_s, 2 * Ns * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc of u_s failed!\n"); } cudaStatus = cudaMalloc((void**)&d_epsilon, Ns * sizeof(int)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc of epsilon failed!\n"); } cudaStatus = cudaMalloc((void**)&d_lasts, 2 * c_num * 10000 * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc of u_s failed!\n"); } cudaStatus = cudaMalloc((void**)&d_boundary, 5 * c_num * 10000 * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc of u_s failed!\n"); } cudaStatus = cudaMalloc((void**)&d_b_points, 5 * Np * sizeof(double)); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMalloc of u_s failed!\n"); } } //----------------------------------------DEFINE DIRECTORIES---------------------------------- string output_data = "Data/Test/"; if(ShARC) output_data = "/shared/soft_matter_physics2/User/Phq16ja/ShARC_Data/"; else output_data = "C:/Users/phq16ja/Documents/Data/"; //output_data = "//uosfstore.shef.ac.uk/shared/soft_matter_physics2/User/Phq16ja/Local_Data/"; string raw_data = output_data + "Raw/"; raw_data += to_string(c_num); raw_data += "/"; string cilia_data = output_data + "Cilia/"; cilia_data += to_string(c_num); cilia_data += "/"; string img_data = output_data + "Img/"; img_data += to_string(c_num); img_data += "/"; string outfile = cilia_data; //----------------------------------------BOUNDARY INITIALISATION------------------------------------------------ string flux = output_data + "/Flux/" + to_string(c_fraction) + "_" + to_string(c_num) + "_" + to_string(c_space) + "-flux.dat"; string fspace = output_data + "/Flux/free_space.dat"; string parameters = raw_data + "/SimLog.txt"; string input = "Data/cilium/"; input += to_string(c_num); input += "/"; ofstream fsA(input.c_str()); ofstream fsB(flux.c_str()); ofstream fsC(parameters.c_str()); ofstream fsD; fsB.open(flux.c_str(), ofstream::trunc); fsB.close(); fsC.open(parameters.c_str(), ofstream::trunc); fsC.close(); //----------------------------------------INITIALISE ALL CELL VALUES--------------------------------------- for (j = 0; j < XDIM*YDIM; j++) { rho[j] = RHO_0; u[2 * j + 0] = 0.0; u[2 * j + 1] = 0.0; force[2 * j + 0] = 0.; force[2 * j + 1] = 0.; for (i = 0; i < 9; i++) { f0[9 * j + i] = 0.; f[9 * j + i] = 0.; f1[9 * j + i] = 0.; F[9 * j + i] = 0.; } } //------------------------------------------------------COPY INITIAL VALUES TO DEVICE----------------------------------------------------------- //CUDA MEMORY COPIES { cudaStatus = cudaMemcpy(d_u, u, 2 * size * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(d_rho, rho, size * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(d_f0, f0, 9 * size * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(d_f, f, 9 * size * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(d_f1, f1, 9 * size * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(d_centre, centre, 2 * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(d_force, force, 2 * size * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(d_F, F, 9 * size * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(d_lasts, lasts, 2 * c_num * 10000 * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of lasts failed!"); } cudaStatus = cudaMemcpy(d_boundary, boundary, 5 * c_num * 10000 * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of boundary failed!"); } cudaStatus = cudaMemcpy(d_Q, &Q, sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } } //------------------------------------------------------SET INITIAL DISTRIBUTION TO EQUILIBRIUM------------------------------------------------- equilibrium << <gridsize, blocksize >> > (d_u, d_rho, d_f0, d_force, d_F, XDIM, YDIM, TAU); //INITIAL EQUILIBRIUM SET { // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "first equilibrium launch failed: %s\n", cudaGetErrorString(cudaStatus)); } cudaStatus = cudaMemcpy(f0, d_f0, 9 * size * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } cudaStatus = cudaMemcpy(F, d_F, 9 * size * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy failed!"); } } for (j = 0; j < XDIM*YDIM; j++) { for (i = 0; i < 9; i++) { f[9 * j + i] = f0[9 * j + i]; } } cudaStatus = cudaMemcpy(d_f, f, 9 * size * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of f failed!\n"); } //-----------------------------------------------------OUTPUT PARAMETERS------------------------------------------------------------------------ fsC.open(parameters.c_str(), ofstream::trunc); fsC.close(); fsC.open(parameters.c_str(), ofstream::app); fsC << asctime(timeinfo) << endl; fsC << "Size: " << XDIM << "x" << YDIM << endl; fsC << "Iterations: " << ITERATIONS << endl; fsC << "Reynolds Number: " << Re << endl; fsC << "Relaxation times: " << TAU << ", " << TAU2 << endl; //if (TAU <= 0.6) fsC << "POSSIBLE INSTABILITY! Relaxation time: " << TAU << endl; //if (TAU >= 2.01) fsC << "POSSIBLE INACCURACY! Relaxation time: " << TAU << endl; fsC << "Spatial step: " << dx*l_0 << "m" << endl; fsC << "Time step: " << dt*t_0 << "s" << endl; fsC << "Mach number: " << Ma << endl; //fsC << "Spatial discretisation error: " << l_error << endl; //fsC << "Time discretisation error: " << t_error << endl; //fsC << "Compressibility error: " << c_error << endl; fsC << "Phase Step: " << c_fraction << "/" << c_num << endl; //fsC << "\nThreads per block: " << blocksize << endl; //fsC << "Blocks: " << gridsize << endl; if (BigData) fsC << "\nBig Data is ON" << endl; else fsC << "\nBig Data is OFF" << endl; if (ShARC) fsC << "Running on ShARC" << endl; else fsC << "Running on local GPU" << endl; //--------------------------ITERATION LOOP----------------------------- cout << "Running Simulation...\n"; time_t start = seconds(); for (it = 0; it < ITERATIONS; it++) { //--------------------------CILIA BEAT DEFINITION------------------------- define_filament << <gridsize3, blocksize3 >> > (T, it, c_space, p_step, c_num, d_boundary, d_lasts, d_b_points); cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "define_filament failed: %s\n", cudaGetErrorString(cudaStatus)); } cudaStatus = cudaMemcpy(b_points, d_b_points, 5 * Np * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of b_points failed!\n"); } f_space_1 = free_space(XDIM, c_num, LENGTH, b_points, 1); f_space_2 = free_space(XDIM, c_num, LENGTH, b_points, 2); if (1.*it / ITERATIONS > 0.166 && !done) { fsD.open(fspace.c_str(), ofstream::app); fsD << c_fraction *1./ c_num << "\t" << f_space_1 << "\t" << f_space_2 << endl; fsD.close(); done = 1; } for (j = 0; j < c_num*LENGTH; j++) { k = j; s[2 * k + 0] = (c_space*c_num) / 2. + b_points[5 * j + 0]; if (s[2 * k + 0] < 0) s[2 * k + 0] += XDIM; else if (s[2 * k + 0] > XDIM) s[2 * k + 0] -= XDIM; s[2 * k + 1] = b_points[5 * j + 1] + 1; if (it == 0) { u_s[2 * k + 0] = 0.; u_s[2 * k + 1] = 0.; } else { u_s[2 * k + 0] = b_points[5 * j + 2]; u_s[2 * k + 1] = b_points[5 * j + 3]; } epsilon[k] = 1; } for (m = 0; m < c_num; m++) { boundary_check(m, c_space, c_num, LENGTH, s, epsilon); } //---------------------------CILIUM COPY---------------------------------------- { cudaStatus = cudaMemcpy(d_epsilon, epsilon, Ns * sizeof(int), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of epsilon failed!\n"); } cudaStatus = cudaMemcpy(d_s, s, 2 * Ns * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of s failed!\n"); } cudaStatus = cudaMemcpy(d_u_s, u_s, 2 * Ns * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of u_s failed!\n"); } cudaStatus = cudaMemcpy(d_F_s, F_s, 2 * Ns * sizeof(double), cudaMemcpyHostToDevice); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of F_s failed!\n"); } } //---------------------------IMMERSED BOUNDARY LATTICE BOLTZMANN STEPS------------------- equilibrium << <gridsize, blocksize >> > (d_u, d_rho, d_f0, d_force, d_F, XDIM, YDIM, TAU); //EQUILIBRIUM STEP { // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "equilibrium launch failed: %s\n", cudaGetErrorString(cudaStatus)); } } collision << <gridsize, blocksize >> > (d_f0, d_f, d_f1, d_F, TAU, TAU2, XDIM, YDIM, it); //COLLISION STEP { // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "collision launch failed: %s\n", cudaGetErrorString(cudaStatus)); } } streaming << <gridsize, blocksize >> > (d_f1, d_f, XDIM, YDIM); //STREAMING STEP { // Check for any errors launching the kernel cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "collision launch failed: %s\n", cudaGetErrorString(cudaStatus)); } } macro << <gridsize, blocksize >> > (d_f, d_u, d_rho, XDIM, YDIM); //MACRO STEP { cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "collision launch failed: %s\n", cudaGetErrorString(cudaStatus)); } } interpolate << <gridsize2, blocksize2 >> > (d_rho, d_u, Ns, d_u_s, d_F_s, d_s, XDIM); //IB INTERPOLATION STEP { cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "interpolate launch failed: %s\n", cudaGetErrorString(cudaStatus)); } } spread << <gridsize, blocksize >> > (d_rho, d_u, d_f, Ns, d_u_s, d_F_s, d_force, d_s, XDIM, d_Q, d_epsilon); //IB SPREADING STEP { cudaStatus = cudaGetLastError(); if (cudaStatus != cudaSuccess) { fprintf(stderr, "spread launch failed: %s\n", cudaGetErrorString(cudaStatus)); //cout << it << endl; //system("pause"); return 1; } cudaStatus = cudaMemcpy(rho, d_rho, size * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of rho failed!\n"); } cudaStatus = cudaMemcpy(u, d_u, 2 * size * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of u failed!\n"); } cudaStatus = cudaMemcpy(&Q, d_Q, sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of u failed!\n"); } cudaStatus = cudaMemcpy(F_s, d_F_s, 2 * Ns * sizeof(double), cudaMemcpyDeviceToHost); if (cudaStatus != cudaSuccess) { fprintf(stderr, "cudaMemcpy of rho failed!\n"); } } //----------------------------DATA OUTPUT------------------------------ for (j = 0; j < c_num*LENGTH; j++) { W += abs(F_s[2 * j + 0]) * u_s[2 * j + 0]/c_num/LENGTH; //W += u_s[2 * j + 0]* u_s[2 * j + 0]*(u_s[2 * j + 0]/abs(u_s[2 * j + 0])); } if (it % INTERVAL == 0) { if (BigData) { outfile = raw_data + to_string(it) + "-fluid.dat"; fsA.open(outfile.c_str()); for (j = 0; j < XDIM*YDIM; j++) { int x = j%XDIM; int y = (j - j%XDIM) / XDIM; double ab = sqrt(u[2 * j + 0] * u[2 * j + 0] + u[2 * j + 1] * u[2 * j + 1]); fsA << x*x_scale << "\t" << y*x_scale << "\t" << u[2 * j + 0]*s_scale << "\t" << u[2 * j + 1]*s_scale << "\t" << ab*s_scale << "\t" << rho[j] << endl; if (x == XDIM - 1) fsA << endl; } fsA.close(); outfile = cilia_data + to_string(it) + "-cilia.dat"; fsA.open(outfile.c_str()); for (k = 0; k < Ns; k++) { fsA << s[2 * k + 0]*x_scale << "\t" << s[2 * k + 1]*x_scale << "\t" << u_s[2 * k + 0]*s_scale << "\t" << u_s[2 * k + 1]*s_scale << "\t" << epsilon[k] << "\n"; //LOOP FOR Np if (k % 100 == 99 || s[2 * k + 0] > XDIM - 1 || s[2 * k + 0] < 1) fsA << "\n"; } fsA.close(); } fsB.open(flux.c_str(), ofstream::app); fsB << it*t_scale << "\t" << Q * x_scale << "\t" << f_space_1 << "\t" << f_space_2 << "\t" << endl; fsB.close(); } if (it == INTERVAL) { time_t cycle = seconds(); p_runtime = (cycle - start)*(ITERATIONS / INTERVAL); time_t p_end = rawtime + p_runtime; timeinfo = localtime(&p_end); cout << "\nCompletion time: " << asctime(timeinfo) << endl; fsC << "\nCompletion time: " << asctime(timeinfo) << endl; fsC.close(); } } fsB.open(flux.c_str(), ofstream::app); fsB << it*t_scale << "\t" << Q * x_scale << "\t" << f_space_1 << "\t" << f_space_2 << "\t" << endl; fsB.close(); double end = seconds(); double runtime = end - start; int hours(0), mins(0); double secs(0.); if (runtime > 3600) hours = nearbyint(runtime / 3600 - 0.5); if (runtime > 60) mins = nearbyint((runtime - hours * 3600) / 60 - 0.5); secs = runtime - hours * 3600 - mins * 60; fsC.open(parameters.c_str(), ofstream::app); fsC << "Total runtime: "; if (hours < 10) fsC << 0; fsC << hours << ":"; if (mins < 10) fsC << 0; fsC << mins << ":"; if (secs < 10) fsC << 0; fsC << secs << endl; fsC.close(); cudaDeviceReset(); return 0; }
1d098c421bd33fba966f401a55745ce32da69248.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include <hipcub/hipcub.hpp> namespace oneflow { namespace { template<typename T> class TmpBufferManager final { public: OF_DISALLOW_COPY_AND_MOVE(TmpBufferManager); TmpBufferManager(int32_t capacity, void* ptr, int32_t instance_num) : capacity_{capacity}, key_value_out_elem_cnt_{instance_num} { const int32_t key_value_out_aligned_bytes = GetCudaAlignedSize(key_value_out_elem_cnt_ * sizeof(hipcub::KeyValuePair<int32_t, T>)); key_value_out_ptr_ = reinterpret_cast<hipcub::KeyValuePair<int32_t, T>*>(ptr); temp_storage_ptr_ = reinterpret_cast<void*>(reinterpret_cast<char*>(key_value_out_ptr_) + key_value_out_aligned_bytes); temp_storage_bytes_ = capacity_ - key_value_out_aligned_bytes; CHECK_GE(temp_storage_bytes_, 0); } ~TmpBufferManager() = default; hipcub::KeyValuePair<int32_t, T>* KeyValueOutPtr() const { return key_value_out_ptr_; } void* TempStoragePtr() const { return temp_storage_ptr_; } int32_t TempStorageBytes() const { return temp_storage_bytes_; } private: int32_t capacity_; hipcub::KeyValuePair<int32_t, T>* key_value_out_ptr_; void* temp_storage_ptr_; int32_t key_value_out_elem_cnt_; int32_t temp_storage_bytes_; }; class MultiplyFunctor final { public: MultiplyFunctor(int32_t num_col) : num_col_(num_col) {} __host__ __device__ __forceinline__ int32_t operator()(int32_t idx) const { return idx * num_col_; } private: int32_t num_col_; }; template<typename T> size_t InferTempStorageForArgMax(int32_t num_row, int32_t num_col) { using SegmentOffsetIter = hipcub::TransformInputIterator<int32_t, MultiplyFunctor, hipcub::CountingInputIterator<int32_t>>; hipcub::CountingInputIterator<int32_t> counting_iter(0); MultiplyFunctor multiply_functor(num_col); SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor); size_t temp_storage_bytes = 0; auto err = hipcub::DeviceSegmentedReduce::ArgMax<T*, hipcub::KeyValuePair<int32_t, T>*, SegmentOffsetIter>( /* d_temp_storage */ nullptr, /* temp_storage_bytes */ temp_storage_bytes, /* d_in */ nullptr, /* d_out */ nullptr, /* num_segments */ num_row, /* d_begin_offsets */ segment_offset_iter, /* d_end_offsets */ segment_offset_iter + 1, /* stream */ 0); OF_CUDA_CHECK(err); return temp_storage_bytes; } template<typename T> void ArgMax(const T* in_ptr, int32_t num_row, int32_t num_col, void* temp_storage_ptr, int32_t temp_storage_bytes, hipcub::KeyValuePair<int32_t, T>* out_ptr, hipStream_t stream) { size_t rt_inferred_temp_storage_bytes = InferTempStorageForArgMax<T>(num_row, num_col); CHECK_LE(rt_inferred_temp_storage_bytes, temp_storage_bytes); using SegmentOffsetIter = hipcub::TransformInputIterator<int32_t, MultiplyFunctor, hipcub::CountingInputIterator<int32_t>>; hipcub::CountingInputIterator<int32_t> counting_iter(0); MultiplyFunctor multiply_functor(num_col); SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor); auto err = hipcub::DeviceSegmentedReduce::ArgMax( /* d_temp_storage */ temp_storage_ptr, /* temp_storage_bytes */ rt_inferred_temp_storage_bytes, /* d_in */ in_ptr, /* d_out */ out_ptr, /* num_segments */ num_row, /* d_begin_offsets */ segment_offset_iter, /* d_end_offsets */ segment_offset_iter + 1, /* stream */ stream); OF_CUDA_CHECK(err); } template<typename T> __global__ void WriteKeysToOutput(const int32_t instance_num, const hipcub::KeyValuePair<int32_t, T>* key_value_out_ptr, int64_t* out_ptr) { CUDA_1D_KERNEL_LOOP(i, instance_num) { out_ptr[i] = key_value_out_ptr[i].key; } } } // namespace template<typename T> class GpuArgMaxKernel final : public user_op::OpKernel { public: GpuArgMaxKernel() = default; ~GpuArgMaxKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const int32_t elem_cnt = in->shape().elem_cnt(); const int32_t instance_size = in->shape().At(in->shape().NumAxes() - 1); const int32_t instance_num = elem_cnt / instance_size; TmpBufferManager<T> buffer_manager(tmp_buffer->shape().elem_cnt(), tmp_buffer->mut_dptr<void>(), instance_num); ArgMax(in->dptr<T>(), instance_num, instance_size, buffer_manager.TempStoragePtr(), buffer_manager.TempStorageBytes(), buffer_manager.KeyValueOutPtr(), ctx->device_ctx()->cuda_stream()); hipLaunchKernelGGL(( WriteKeysToOutput<T>), dim3(BlocksNum4ThreadsNum(instance_num)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(), instance_num, buffer_manager.KeyValueOutPtr(), out->mut_dptr<int64_t>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_GPU_ARGMAX_KERNEL(dtype) \ REGISTER_USER_KERNEL("argmax") \ .SetCreateFn<GpuArgMaxKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn([](user_op::InferContext* ctx) { \ const Shape& in_shape = ctx->InputShape("in", 0); \ const int32_t instance_size = in_shape.dim_vec().back(); \ const int32_t instance_num = in_shape.elem_cnt() / instance_size; \ \ /* Key-Value Out */ \ int32_t key_value_out_bytes = \ GetCudaAlignedSize(instance_num * sizeof(hipcub::KeyValuePair<int32_t, dtype>)); \ \ /* CUB Temp Storage */ \ size_t temp_storage_bytes = InferTempStorageForArgMax<dtype>(instance_num, instance_size); \ \ return key_value_out_bytes + temp_storage_bytes; \ }); REGISTER_GPU_ARGMAX_KERNEL(float) REGISTER_GPU_ARGMAX_KERNEL(double) REGISTER_GPU_ARGMAX_KERNEL(uint8_t) REGISTER_GPU_ARGMAX_KERNEL(int8_t) REGISTER_GPU_ARGMAX_KERNEL(int32_t) REGISTER_GPU_ARGMAX_KERNEL(int64_t) } // namespace oneflow
1d098c421bd33fba966f401a55745ce32da69248.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" #include <cub/cub.cuh> namespace oneflow { namespace { template<typename T> class TmpBufferManager final { public: OF_DISALLOW_COPY_AND_MOVE(TmpBufferManager); TmpBufferManager(int32_t capacity, void* ptr, int32_t instance_num) : capacity_{capacity}, key_value_out_elem_cnt_{instance_num} { const int32_t key_value_out_aligned_bytes = GetCudaAlignedSize(key_value_out_elem_cnt_ * sizeof(cub::KeyValuePair<int32_t, T>)); key_value_out_ptr_ = reinterpret_cast<cub::KeyValuePair<int32_t, T>*>(ptr); temp_storage_ptr_ = reinterpret_cast<void*>(reinterpret_cast<char*>(key_value_out_ptr_) + key_value_out_aligned_bytes); temp_storage_bytes_ = capacity_ - key_value_out_aligned_bytes; CHECK_GE(temp_storage_bytes_, 0); } ~TmpBufferManager() = default; cub::KeyValuePair<int32_t, T>* KeyValueOutPtr() const { return key_value_out_ptr_; } void* TempStoragePtr() const { return temp_storage_ptr_; } int32_t TempStorageBytes() const { return temp_storage_bytes_; } private: int32_t capacity_; cub::KeyValuePair<int32_t, T>* key_value_out_ptr_; void* temp_storage_ptr_; int32_t key_value_out_elem_cnt_; int32_t temp_storage_bytes_; }; class MultiplyFunctor final { public: MultiplyFunctor(int32_t num_col) : num_col_(num_col) {} __host__ __device__ __forceinline__ int32_t operator()(int32_t idx) const { return idx * num_col_; } private: int32_t num_col_; }; template<typename T> size_t InferTempStorageForArgMax(int32_t num_row, int32_t num_col) { using SegmentOffsetIter = cub::TransformInputIterator<int32_t, MultiplyFunctor, cub::CountingInputIterator<int32_t>>; cub::CountingInputIterator<int32_t> counting_iter(0); MultiplyFunctor multiply_functor(num_col); SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor); size_t temp_storage_bytes = 0; auto err = cub::DeviceSegmentedReduce::ArgMax<T*, cub::KeyValuePair<int32_t, T>*, SegmentOffsetIter>( /* d_temp_storage */ nullptr, /* temp_storage_bytes */ temp_storage_bytes, /* d_in */ nullptr, /* d_out */ nullptr, /* num_segments */ num_row, /* d_begin_offsets */ segment_offset_iter, /* d_end_offsets */ segment_offset_iter + 1, /* stream */ 0); OF_CUDA_CHECK(err); return temp_storage_bytes; } template<typename T> void ArgMax(const T* in_ptr, int32_t num_row, int32_t num_col, void* temp_storage_ptr, int32_t temp_storage_bytes, cub::KeyValuePair<int32_t, T>* out_ptr, cudaStream_t stream) { size_t rt_inferred_temp_storage_bytes = InferTempStorageForArgMax<T>(num_row, num_col); CHECK_LE(rt_inferred_temp_storage_bytes, temp_storage_bytes); using SegmentOffsetIter = cub::TransformInputIterator<int32_t, MultiplyFunctor, cub::CountingInputIterator<int32_t>>; cub::CountingInputIterator<int32_t> counting_iter(0); MultiplyFunctor multiply_functor(num_col); SegmentOffsetIter segment_offset_iter(counting_iter, multiply_functor); auto err = cub::DeviceSegmentedReduce::ArgMax( /* d_temp_storage */ temp_storage_ptr, /* temp_storage_bytes */ rt_inferred_temp_storage_bytes, /* d_in */ in_ptr, /* d_out */ out_ptr, /* num_segments */ num_row, /* d_begin_offsets */ segment_offset_iter, /* d_end_offsets */ segment_offset_iter + 1, /* stream */ stream); OF_CUDA_CHECK(err); } template<typename T> __global__ void WriteKeysToOutput(const int32_t instance_num, const cub::KeyValuePair<int32_t, T>* key_value_out_ptr, int64_t* out_ptr) { CUDA_1D_KERNEL_LOOP(i, instance_num) { out_ptr[i] = key_value_out_ptr[i].key; } } } // namespace template<typename T> class GpuArgMaxKernel final : public user_op::OpKernel { public: GpuArgMaxKernel() = default; ~GpuArgMaxKernel() = default; private: using user_op::OpKernel::Compute; void Compute(user_op::KernelComputeContext* ctx) const override { const user_op::Tensor* in = ctx->Tensor4ArgNameAndIndex("in", 0); user_op::Tensor* out = ctx->Tensor4ArgNameAndIndex("out", 0); user_op::Tensor* tmp_buffer = ctx->Tensor4ArgNameAndIndex("tmp_buffer", 0); const int32_t elem_cnt = in->shape().elem_cnt(); const int32_t instance_size = in->shape().At(in->shape().NumAxes() - 1); const int32_t instance_num = elem_cnt / instance_size; TmpBufferManager<T> buffer_manager(tmp_buffer->shape().elem_cnt(), tmp_buffer->mut_dptr<void>(), instance_num); ArgMax(in->dptr<T>(), instance_num, instance_size, buffer_manager.TempStoragePtr(), buffer_manager.TempStorageBytes(), buffer_manager.KeyValueOutPtr(), ctx->device_ctx()->cuda_stream()); WriteKeysToOutput<T><<<BlocksNum4ThreadsNum(instance_num), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>( instance_num, buffer_manager.KeyValueOutPtr(), out->mut_dptr<int64_t>()); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_GPU_ARGMAX_KERNEL(dtype) \ REGISTER_USER_KERNEL("argmax") \ .SetCreateFn<GpuArgMaxKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceTag() == "gpu") \ & (user_op::HobDataType("in", 0) == GetDataType<dtype>::value)) \ .SetInferTmpSizeFn([](user_op::InferContext* ctx) { \ const Shape& in_shape = ctx->InputShape("in", 0); \ const int32_t instance_size = in_shape.dim_vec().back(); \ const int32_t instance_num = in_shape.elem_cnt() / instance_size; \ \ /* Key-Value Out */ \ int32_t key_value_out_bytes = \ GetCudaAlignedSize(instance_num * sizeof(cub::KeyValuePair<int32_t, dtype>)); \ \ /* CUB Temp Storage */ \ size_t temp_storage_bytes = InferTempStorageForArgMax<dtype>(instance_num, instance_size); \ \ return key_value_out_bytes + temp_storage_bytes; \ }); REGISTER_GPU_ARGMAX_KERNEL(float) REGISTER_GPU_ARGMAX_KERNEL(double) REGISTER_GPU_ARGMAX_KERNEL(uint8_t) REGISTER_GPU_ARGMAX_KERNEL(int8_t) REGISTER_GPU_ARGMAX_KERNEL(int32_t) REGISTER_GPU_ARGMAX_KERNEL(int64_t) } // namespace oneflow
89d3f88fdd90f0225684fb86315403d830bb9c89.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "getRestricted.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int countx = 1; int county = 1; int rows = XSIZE; int cols = YSIZE; float *mX = NULL; hipMalloc(&mX, XSIZE*YSIZE); int mXdim = 1; float *vY = NULL; hipMalloc(&vY, XSIZE*YSIZE); int vYdim = 1; float *mQ = NULL; hipMalloc(&mQ, XSIZE*YSIZE); int mQdim = 1; float *mR = NULL; hipMalloc(&mR, XSIZE*YSIZE); int mRdim = 1; float *vectB = NULL; hipMalloc(&vectB, XSIZE*YSIZE); int vectBdim = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( getRestricted), dim3(gridBlock),dim3(threadBlock), 0, 0, countx,county,rows,cols,mX,mXdim,vY,vYdim,mQ,mQdim,mR,mRdim,vectB,vectBdim); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( getRestricted), dim3(gridBlock),dim3(threadBlock), 0, 0, countx,county,rows,cols,mX,mXdim,vY,vYdim,mQ,mQdim,mR,mRdim,vectB,vectBdim); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( getRestricted), dim3(gridBlock),dim3(threadBlock), 0, 0, countx,county,rows,cols,mX,mXdim,vY,vYdim,mQ,mQdim,mR,mRdim,vectB,vectBdim); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
89d3f88fdd90f0225684fb86315403d830bb9c89.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "getRestricted.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int countx = 1; int county = 1; int rows = XSIZE; int cols = YSIZE; float *mX = NULL; cudaMalloc(&mX, XSIZE*YSIZE); int mXdim = 1; float *vY = NULL; cudaMalloc(&vY, XSIZE*YSIZE); int vYdim = 1; float *mQ = NULL; cudaMalloc(&mQ, XSIZE*YSIZE); int mQdim = 1; float *mR = NULL; cudaMalloc(&mR, XSIZE*YSIZE); int mRdim = 1; float *vectB = NULL; cudaMalloc(&vectB, XSIZE*YSIZE); int vectBdim = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); getRestricted<<<gridBlock,threadBlock>>>(countx,county,rows,cols,mX,mXdim,vY,vYdim,mQ,mQdim,mR,mRdim,vectB,vectBdim); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { getRestricted<<<gridBlock,threadBlock>>>(countx,county,rows,cols,mX,mXdim,vY,vYdim,mQ,mQdim,mR,mRdim,vectB,vectBdim); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { getRestricted<<<gridBlock,threadBlock>>>(countx,county,rows,cols,mX,mXdim,vY,vYdim,mQ,mQdim,mR,mRdim,vectB,vectBdim); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
49fb3bf9ee140a2c74a783c667b6293cc0c003b6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "rocblas.h" #include <thrust/sort.h> #include <thrust/extrema.h> #include <thrust/execution_policy.h> #include <stdio.h> #include <iostream> #include <iomanip> #include <fstream> #include <string> #include <cmath> #include <Windows.h> using namespace std; //Set grid and block size for the kernels that run and sets the number of neighbors desired int k = 7; int gridSize = 100; int blockSize = 1024; //The distKernel computes the difference squared between two points. Requires size number of threads __global__ void distKernel(float *inX, float *dataSet, int row, int col, float *distance) { int gid = threadIdx.x + blockIdx.x * blockDim.x; if (gid < row) { float dist = 0.0; for (int i = 0; i<col; i++) { float f = (inX[i] - dataSet[gid * col + i]); dist += f * f; } distance[gid] = dist; } } int main() { //Input file to read the labeled data ifstream inputData; //The arrays that will hold the labeled data float* coors; //Arrays to hold the distances computed via the GPU and CPU float* distances; float* distances2; //An array to hold the unsorted labels int* unsortedlabels; //Variable used to label new data; defaults to 0 int *outputLabel; //Variables used to hold one data point float* dataPoint; //Variables to hold the number of labeled data points and the number or points being entered int numInput; int numPoints; int numDim; int numLabel; //Opens a file whose first line is the number of elements and every subsequent line is an x coordinate, y coordinate, //and label that are seperated by spaces inputData.open("Customized_Data_Updated.txt"); //Make sure the file opens correctly if (!inputData.is_open()){ cout << "Something went wrong while reading in the data. Check where it is located again." << endl; exit(0); } //Prompt the user for the number of points being classified cout << "How many points do you want to read? "; cin >> numPoints; //Store the number of labeled data points, the number of dimensions and the total number of labels inputData >> numInput >> numDim >> numLabel; //Set up the arrays to have a max capacity equal to the sum of the number of labeled and unlabeled points coors = new float[numDim*(numInput + numPoints)]; unsortedlabels = new int[numInput + numPoints]; distances = new float[numInput + numPoints]; distances2 = new float[numInput + numPoints]; dataPoint = new float[numDim]; //Set up pointers for the arrays used for the GPU implementation float *devX, *devD, *devP; //Set up the proper grid size gridSize = (numInput + numPoints) / blockSize + 1; for (int i = 0; i < numInput; i++){ //Begin modifying data to find the distance more easily as no scalar - vector CUBLAS sum function for (int j = 0; j < numDim; j++){ inputData >> coors[i*numDim + j]; } inputData >> unsortedlabels[i]; } //Close the input file inputData.close(); //Collect the data points that the user wants classified for (int i = 0; i < numPoints; i++){ cout << i << " data point: " << endl; for (int j = 0; j < numDim; j++){ cout << j << " dim: "; cin >> coors[(i + numInput)*numDim + j]; } cout << endl; } //Run the KNN distance finding and sorting code for however many points the user entered for (int z = 0; z < numPoints; z++){ //Get the coordinates of the point to be classified for (int i = 0; i < numDim; i++){ dataPoint[i] = coors[numDim*(numInput + z) + i]; } cout << z << " data point: " << endl; //Create an array to hold labels that will be sorted int* labels; labels = new int[numInput + z]; //Copy all of the labels to the new array for (int i = 0; i < numInput + z; i++){ labels[i] = unsortedlabels[i]; } //Time the sequential version using Windows's QueryPerfomanceCounter() //Number of ticks per second LARGE_INTEGER frequency; //Measure times LARGE_INTEGER t1, t2; //Store time double elapsedTime; //Fill the frequency variable QueryPerformanceFrequency(&frequency); //Get the first time QueryPerformanceCounter(&t1); for (int i = 0; i < numInput + z; i++){ //Compute the distances using the CPU distances2[i] = 0.0; for (int j = 0; j < numDim; j++){ distances2[i] += (coors[i*numDim + j] - dataPoint[j])*(coors[i*numDim + j] - dataPoint[j]); } } //Get the second time QueryPerformanceCounter(&t2); //Get the elapsed time in milliseconds elapsedTime = (t2.QuadPart - t1.QuadPart) * 1000.0 / frequency.QuadPart; cout << elapsedTime << " milliseconds for sequential run." << endl; //Allocate and fill the arrays for the GPU version hipMalloc((void**)&devX, (numInput + z)*numDim*sizeof(float)); hipMalloc((void**)&devD, (numInput + z)*sizeof(float)); hipMalloc((void**)&devP, (numDim)*sizeof(float)); //Create CUDA Events to time the GPU version hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); //Start timer hipEventRecord(start); //Copy in the data hipMemcpy(devX, coors, (numInput + z)*numDim*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(devD, coors, (numInput + z)*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(devP, dataPoint, (numDim)*sizeof(float), hipMemcpyHostToDevice); //Compute the distances for the next dimension distKernel << <blockSize, gridSize >> >(devP, devX, numInput + z, numDim, devD); //Finish timing hipEventRecord(stop); //Find the time for the GPU version and print it hipEventSynchronize(stop); float milliseconds = 0; hipEventElapsedTime(&milliseconds, start, stop); cout << milliseconds << " milliseconds for parallel run." << endl; //Copy the GPU computed distances over to the host hipMemcpy(distances, devD, (numInput + z)*sizeof(float), hipMemcpyDeviceToHost); //Calculate the number of distances that were computed differently by the CPU and GPU int numWrong = 0; for (int i = 0; i < numInput + z; i++){ if (distances2[i] != distances[i]) numWrong++; } //Print a message if any distances were incorrectly computed if (numWrong > 0) cout << numWrong << " distances miscomputed\n"; //Free the CUDA Arrays hipFree(devX); hipFree(devD); //Use the thrust library to sort the distances and theit corresponding labels thrust::sort_by_key(distances, distances + numInput + z, labels); //Set up an array to hold the number of k-nearest-neighbors with that label int* labelCounts = new int[numLabel]; for (int i = 0; i < numLabel; i++){ labelCounts[i] = 0; } //Count the number of points labeled 0 and 1, telling the user the label of all k-nearest neighbors for (int i = 0; i < k; i++){ labelCounts[labels[i]] += 1; cout << "" << i + 1 << " closest point has a label of: " << labels[i] << " with a squared distance of " << distances[i] << endl; } //Find the correct output label outputLabel = thrust::max_element(thrust::host, labelCounts, labelCounts + k); int output = outputLabel - labelCounts; //Output the classification for the data point cout << "Point " << z << " should be classified as: " << output << endl; cout << endl; //Add the points label to the unsorted labels unsortedlabels[z + numInput] = output; //Free the sorted array of labels free(labels); free(labelCounts); } // hipDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. hipDeviceReset(); //Create an ofstream to print the data to so that it can be used as a labeled data set for another run ofstream outputData; //Open a file and make sure it is empty before writing to it outputData.open("Customizedb_Data_Updated.txt", ios::out | ios::trunc); //Make sure the file opens correctly if (!outputData.is_open()){ cout << "Something went wrong with opening the output file. Check where it is located again." << endl; exit(0); } //Put the total number of data points at the top of the file outputData << (numInput + numPoints) << " " << numDim << " " << numLabel << endl; //Print each point and its correspoding label for (int i = 0; i < numInput + numPoints; i++){ for (int j = 0; j < numDim; j++){ outputData << coors[i*numDim + j] << " " << endl; } outputData << unsortedlabels[i] << endl; } //Close the file once it is written outputData.close(); free(coors); free(dataPoint); free(unsortedlabels); free(distances); free(distances2); //Pause on Windows machines to view output system("pause"); return 0; }
49fb3bf9ee140a2c74a783c667b6293cc0c003b6.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "cublas_v2.h" #include <thrust/sort.h> #include <thrust/extrema.h> #include <thrust/execution_policy.h> #include <stdio.h> #include <iostream> #include <iomanip> #include <fstream> #include <string> #include <cmath> #include <Windows.h> using namespace std; //Set grid and block size for the kernels that run and sets the number of neighbors desired int k = 7; int gridSize = 100; int blockSize = 1024; //The distKernel computes the difference squared between two points. Requires size number of threads __global__ void distKernel(float *inX, float *dataSet, int row, int col, float *distance) { int gid = threadIdx.x + blockIdx.x * blockDim.x; if (gid < row) { float dist = 0.0; for (int i = 0; i<col; i++) { float f = (inX[i] - dataSet[gid * col + i]); dist += f * f; } distance[gid] = dist; } } int main() { //Input file to read the labeled data ifstream inputData; //The arrays that will hold the labeled data float* coors; //Arrays to hold the distances computed via the GPU and CPU float* distances; float* distances2; //An array to hold the unsorted labels int* unsortedlabels; //Variable used to label new data; defaults to 0 int *outputLabel; //Variables used to hold one data point float* dataPoint; //Variables to hold the number of labeled data points and the number or points being entered int numInput; int numPoints; int numDim; int numLabel; //Opens a file whose first line is the number of elements and every subsequent line is an x coordinate, y coordinate, //and label that are seperated by spaces inputData.open("Customized_Data_Updated.txt"); //Make sure the file opens correctly if (!inputData.is_open()){ cout << "Something went wrong while reading in the data. Check where it is located again." << endl; exit(0); } //Prompt the user for the number of points being classified cout << "How many points do you want to read? "; cin >> numPoints; //Store the number of labeled data points, the number of dimensions and the total number of labels inputData >> numInput >> numDim >> numLabel; //Set up the arrays to have a max capacity equal to the sum of the number of labeled and unlabeled points coors = new float[numDim*(numInput + numPoints)]; unsortedlabels = new int[numInput + numPoints]; distances = new float[numInput + numPoints]; distances2 = new float[numInput + numPoints]; dataPoint = new float[numDim]; //Set up pointers for the arrays used for the GPU implementation float *devX, *devD, *devP; //Set up the proper grid size gridSize = (numInput + numPoints) / blockSize + 1; for (int i = 0; i < numInput; i++){ //Begin modifying data to find the distance more easily as no scalar - vector CUBLAS sum function for (int j = 0; j < numDim; j++){ inputData >> coors[i*numDim + j]; } inputData >> unsortedlabels[i]; } //Close the input file inputData.close(); //Collect the data points that the user wants classified for (int i = 0; i < numPoints; i++){ cout << i << " data point: " << endl; for (int j = 0; j < numDim; j++){ cout << j << " dim: "; cin >> coors[(i + numInput)*numDim + j]; } cout << endl; } //Run the KNN distance finding and sorting code for however many points the user entered for (int z = 0; z < numPoints; z++){ //Get the coordinates of the point to be classified for (int i = 0; i < numDim; i++){ dataPoint[i] = coors[numDim*(numInput + z) + i]; } cout << z << " data point: " << endl; //Create an array to hold labels that will be sorted int* labels; labels = new int[numInput + z]; //Copy all of the labels to the new array for (int i = 0; i < numInput + z; i++){ labels[i] = unsortedlabels[i]; } //Time the sequential version using Windows's QueryPerfomanceCounter() //Number of ticks per second LARGE_INTEGER frequency; //Measure times LARGE_INTEGER t1, t2; //Store time double elapsedTime; //Fill the frequency variable QueryPerformanceFrequency(&frequency); //Get the first time QueryPerformanceCounter(&t1); for (int i = 0; i < numInput + z; i++){ //Compute the distances using the CPU distances2[i] = 0.0; for (int j = 0; j < numDim; j++){ distances2[i] += (coors[i*numDim + j] - dataPoint[j])*(coors[i*numDim + j] - dataPoint[j]); } } //Get the second time QueryPerformanceCounter(&t2); //Get the elapsed time in milliseconds elapsedTime = (t2.QuadPart - t1.QuadPart) * 1000.0 / frequency.QuadPart; cout << elapsedTime << " milliseconds for sequential run." << endl; //Allocate and fill the arrays for the GPU version cudaMalloc((void**)&devX, (numInput + z)*numDim*sizeof(float)); cudaMalloc((void**)&devD, (numInput + z)*sizeof(float)); cudaMalloc((void**)&devP, (numDim)*sizeof(float)); //Create CUDA Events to time the GPU version cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); //Start timer cudaEventRecord(start); //Copy in the data cudaMemcpy(devX, coors, (numInput + z)*numDim*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(devD, coors, (numInput + z)*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(devP, dataPoint, (numDim)*sizeof(float), cudaMemcpyHostToDevice); //Compute the distances for the next dimension distKernel << <blockSize, gridSize >> >(devP, devX, numInput + z, numDim, devD); //Finish timing cudaEventRecord(stop); //Find the time for the GPU version and print it cudaEventSynchronize(stop); float milliseconds = 0; cudaEventElapsedTime(&milliseconds, start, stop); cout << milliseconds << " milliseconds for parallel run." << endl; //Copy the GPU computed distances over to the host cudaMemcpy(distances, devD, (numInput + z)*sizeof(float), cudaMemcpyDeviceToHost); //Calculate the number of distances that were computed differently by the CPU and GPU int numWrong = 0; for (int i = 0; i < numInput + z; i++){ if (distances2[i] != distances[i]) numWrong++; } //Print a message if any distances were incorrectly computed if (numWrong > 0) cout << numWrong << " distances miscomputed\n"; //Free the CUDA Arrays cudaFree(devX); cudaFree(devD); //Use the thrust library to sort the distances and theit corresponding labels thrust::sort_by_key(distances, distances + numInput + z, labels); //Set up an array to hold the number of k-nearest-neighbors with that label int* labelCounts = new int[numLabel]; for (int i = 0; i < numLabel; i++){ labelCounts[i] = 0; } //Count the number of points labeled 0 and 1, telling the user the label of all k-nearest neighbors for (int i = 0; i < k; i++){ labelCounts[labels[i]] += 1; cout << "" << i + 1 << " closest point has a label of: " << labels[i] << " with a squared distance of " << distances[i] << endl; } //Find the correct output label outputLabel = thrust::max_element(thrust::host, labelCounts, labelCounts + k); int output = outputLabel - labelCounts; //Output the classification for the data point cout << "Point " << z << " should be classified as: " << output << endl; cout << endl; //Add the points label to the unsorted labels unsortedlabels[z + numInput] = output; //Free the sorted array of labels free(labels); free(labelCounts); } // cudaDeviceReset must be called before exiting in order for profiling and // tracing tools such as Nsight and Visual Profiler to show complete traces. cudaDeviceReset(); //Create an ofstream to print the data to so that it can be used as a labeled data set for another run ofstream outputData; //Open a file and make sure it is empty before writing to it outputData.open("Customizedb_Data_Updated.txt", ios::out | ios::trunc); //Make sure the file opens correctly if (!outputData.is_open()){ cout << "Something went wrong with opening the output file. Check where it is located again." << endl; exit(0); } //Put the total number of data points at the top of the file outputData << (numInput + numPoints) << " " << numDim << " " << numLabel << endl; //Print each point and its correspoding label for (int i = 0; i < numInput + numPoints; i++){ for (int j = 0; j < numDim; j++){ outputData << coors[i*numDim + j] << " " << endl; } outputData << unsortedlabels[i] << endl; } //Close the file once it is written outputData.close(); free(coors); free(dataPoint); free(unsortedlabels); free(distances); free(distances2); //Pause on Windows machines to view output system("pause"); return 0; }
542828545c8c4112be3bf05fa92fd7ab64a9c0f9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<30; // 1M elements float *x; float *y; hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the CPU hipLaunchKernelGGL(( add), dim3(1), dim3(1), 0, 0, N, x, y); hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory hipFree(x); hipFree(y); return 0; }
542828545c8c4112be3bf05fa92fd7ab64a9c0f9.cu
#include <iostream> #include <math.h> // function to add the elements of two arrays __global__ void add(int n, float *x, float *y) { for (int i = 0; i < n; i++) y[i] = x[i] + y[i]; } int main(void) { int N = 1<<30; // 1M elements float *x; float *y; cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on the CPU add<<<1, 1>>>(N, x, y); cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error: " << maxError << std::endl; // Free memory cudaFree(x); cudaFree(y); return 0; }
e533ab9c2e8cbe3d70e90c54d6ddbadb1f85bc89.hip
// !!! This is a file automatically generated by hipify!!! #include "../common/common.h" #include <hip/hip_runtime.h> #include <stdio.h> /* * This example demonstrates a simple vector sum on the GPU and on the host. * sumArraysOnGPU splits the work of the vector sum across CUDA threads on the * GPU. Only a single thread block is used in this small case, for simplicity. * sumArraysOnHost sequentially iterates through vector elements on the host. * This version of sumArrays adds host timers to measure GPU and CPU * performance. */ void checkResult(double *hostRef, double *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n\n"); return; } void initialData(double *ip, int size) { // generate different seed for random number time_t t; srand((unsigned) time(&t)); for (int i = 0; i < size; i++) { ip[i] = (double)( rand() & 0xFF ) / 10.0f; } return; } void sumArraysOnHost(double *A, double *B, double *C, const int N) { for (int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } //hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem); __global__ void sumArraysOnGPU(double *A, double *B, double *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i] + 7*A[i] + 4*B[i]/123.1 - B[i]*A[i] + B[i]*B[i] - 9*B[i]*B[i]*B[i]/0.4 + A[i]/0.2 + B[i]*B[i]; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; hipDeviceProp_t deviceProp; CHECK(hipGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(hipSetDevice(dev)); // set up data size of vectors int nElem = 1 << 24; printf("Vector Size %d\n", nElem); // malloc host memory size_t nBytes = nElem * sizeof(double); double *h_A, *h_B, *hostRef, *gpuRef; h_A = (double *)malloc(nBytes); h_B = (double *)malloc(nBytes); hostRef = (double *)malloc(nBytes); gpuRef = (double *)malloc(nBytes); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // malloc device global memory double *d_A, *d_B, *d_C; CHECK(hipMalloc((double**)&d_A, nBytes)); CHECK(hipMalloc((double**)&d_B, nBytes)); CHECK(hipMalloc((double**)&d_C, nBytes)); // transfer data from host to device CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice)); CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice)); // invoke kernel at host side int iLen = 512; dim3 block (iLen); dim3 grid (((nElem + block.x - 1) / block.x)); hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem); CHECK(hipDeviceSynchronize()); printf("sumArraysOnGPU <<< %d, %d >>> \n", grid.x, block.x); // check kernel error CHECK(hipGetLastError()) ; // copy kernel result back to host side CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost)); // check device results checkResult(hostRef, gpuRef, nElem); // free device global memory CHECK(hipFree(d_A)); CHECK(hipFree(d_B)); CHECK(hipFree(d_C)); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); return(0); }
e533ab9c2e8cbe3d70e90c54d6ddbadb1f85bc89.cu
#include "../common/common.h" #include <cuda_runtime.h> #include <stdio.h> /* * This example demonstrates a simple vector sum on the GPU and on the host. * sumArraysOnGPU splits the work of the vector sum across CUDA threads on the * GPU. Only a single thread block is used in this small case, for simplicity. * sumArraysOnHost sequentially iterates through vector elements on the host. * This version of sumArrays adds host timers to measure GPU and CPU * performance. */ void checkResult(double *hostRef, double *gpuRef, const int N) { double epsilon = 1.0E-8; bool match = 1; for (int i = 0; i < N; i++) { if (abs(hostRef[i] - gpuRef[i]) > epsilon) { match = 0; printf("Arrays do not match!\n"); printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i], gpuRef[i], i); break; } } if (match) printf("Arrays match.\n\n"); return; } void initialData(double *ip, int size) { // generate different seed for random number time_t t; srand((unsigned) time(&t)); for (int i = 0; i < size; i++) { ip[i] = (double)( rand() & 0xFF ) / 10.0f; } return; } void sumArraysOnHost(double *A, double *B, double *C, const int N) { for (int idx = 0; idx < N; idx++) { C[idx] = A[idx] + B[idx]; } } // sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C, nElem); __global__ void sumArraysOnGPU(double *A, double *B, double *C, const int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < N) C[i] = A[i] + B[i] + 7*A[i] + 4*B[i]/123.1 - B[i]*A[i] + B[i]*B[i] - 9*B[i]*B[i]*B[i]/0.4 + A[i]/0.2 + B[i]*B[i]; } int main(int argc, char **argv) { printf("%s Starting...\n", argv[0]); // set up device int dev = 0; cudaDeviceProp deviceProp; CHECK(cudaGetDeviceProperties(&deviceProp, dev)); printf("Using Device %d: %s\n", dev, deviceProp.name); CHECK(cudaSetDevice(dev)); // set up data size of vectors int nElem = 1 << 24; printf("Vector Size %d\n", nElem); // malloc host memory size_t nBytes = nElem * sizeof(double); double *h_A, *h_B, *hostRef, *gpuRef; h_A = (double *)malloc(nBytes); h_B = (double *)malloc(nBytes); hostRef = (double *)malloc(nBytes); gpuRef = (double *)malloc(nBytes); memset(hostRef, 0, nBytes); memset(gpuRef, 0, nBytes); // malloc device global memory double *d_A, *d_B, *d_C; CHECK(cudaMalloc((double**)&d_A, nBytes)); CHECK(cudaMalloc((double**)&d_B, nBytes)); CHECK(cudaMalloc((double**)&d_C, nBytes)); // transfer data from host to device CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice)); CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice)); // invoke kernel at host side int iLen = 512; dim3 block (iLen); dim3 grid (((nElem + block.x - 1) / block.x)); sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C, nElem); CHECK(cudaDeviceSynchronize()); printf("sumArraysOnGPU <<< %d, %d >>> \n", grid.x, block.x); // check kernel error CHECK(cudaGetLastError()) ; // copy kernel result back to host side CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost)); // check device results checkResult(hostRef, gpuRef, nElem); // free device global memory CHECK(cudaFree(d_A)); CHECK(cudaFree(d_B)); CHECK(cudaFree(d_C)); // free host memory free(h_A); free(h_B); free(hostRef); free(gpuRef); return(0); }
9d9d95c8ea060e5108d79568bbb19a702e91edc3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "matmul.h" #include <unistd.h> #include <iostream> #include <sys/time.h> using namespace std; void display_mata(float *mat, int heg, int wid, int woid) { int total; cout << endl << "\t\tThe matrix display:" << endl; switch (woid) { case 0: total = heg * heg; for (int i=0; i<total; i++) { if (i%heg == 0) { cout << "\t\t" << endl; } cout << mat[i] << " "; } cout << endl; break; case 1: total = heg * wid; for (int i=0; i<total; i++) { if (i%heg == 0) { cout << "\t\t" << endl; } cout << mat[i] << " "; } cout << endl; break; case 2: total = heg * wid; for (int i=0; i<total; i++) { if (i%wid == 0) { cout << "\t\t" << endl; } cout << mat[i] << " "; } cout << endl; break; } } __global__ void ComputeMatrix_Kernel(int h, int w, int offset, int subnum, float *matA, float *matB, float *matC) { int row, col, idx; int tid = (blockIdx.x * blockDim.x) + threadIdx.x; int gridsize = gridDim.x * blockDim.x; float sum = 0.0; #if 0 int stride = (subnum / gridsize) + 1; for (int i = 0; i < stride; i++) { idx = (i * gridsize) + tid; #else for (int i = 0; i < subnum; i += gridsize) { idx = i + tid; #endif row = idx / w; col = idx % w; if (idx < subnum) { for (int j = 0; j < h; j++) { sum += matA[row*h + j] * matB[h*col + j]; } matC[idx] = sum; sum = 0.0; } } } void CHK_ERR(int line, hipError_t ce) { if (ce != hipSuccess){ cout << "Error: line " << line << " "<< hipGetErrorString(ce) << endl; } } void cudaMatMul(int heg, int wid, int wh, int off, int subnum, float *matA, float *matB, float *matC, int peid_, int pid) { float *d_A, *d_B, *d_C; int size_A = wh * heg * sizeof(float); int size_B = wid * heg * sizeof(float); int s_size = subnum * sizeof(float); cout << "\t\tTotal size: " << size_A + size_B + s_size << " wh: " << wh << " wid: " << wid<< " subnum: " << subnum <<endl; int peid = pid; char hostname[128]; gethostname(hostname, 128); int deviceCnt = 0; CHK_ERR( __LINE__, hipGetDeviceCount(&deviceCnt)); CHK_ERR( __LINE__, hipSetDevice(peid % deviceCnt)); cout << "\t\tPE:" << peid_ << "[" << hostname << "]: RUN: Device[" << peid%deviceCnt << "]" << endl; CHK_ERR( __LINE__, hipMalloc(&d_A, size_A)); CHK_ERR( __LINE__, hipMalloc(&d_B, size_B)); CHK_ERR( __LINE__, hipMalloc(&d_C, s_size)); CHK_ERR( __LINE__, hipMemcpy(d_A, matA, size_A, hipMemcpyHostToDevice)); CHK_ERR( __LINE__, hipMemcpy(d_B, matB, size_B, hipMemcpyHostToDevice)); CHK_ERR( __LINE__, hipMemset(d_C, 0, s_size)); cout << " Launching" << endl; struct timeval time_b, time_e; gettimeofday(&time_b, NULL); hipLaunchKernelGGL(( ComputeMatrix_Kernel) , dim3(16), dim3(128), 0, 0, heg, wid, off, subnum, d_A, d_B, d_C); CHK_ERR( __LINE__, hipDeviceSynchronize()); gettimeofday(&time_e, NULL); cout << "Kernel time: " << (time_e.tv_usec - time_b.tv_usec)*1e-6 + ((double)time_e.tv_sec - (double)time_b.tv_sec) << endl; // CHK_ERR( __LINE__, hipMemcpy(matC, d_C, s_size, // hipMemcpyDeviceToHost)); CHK_ERR( __LINE__, hipFree(d_A)); CHK_ERR( __LINE__, hipFree(d_B)); CHK_ERR( __LINE__, hipFree(d_C)); // display_mata(matC, wh, wid, 2); }
9d9d95c8ea060e5108d79568bbb19a702e91edc3.cu
#include "matmul.h" #include <unistd.h> #include <iostream> #include <sys/time.h> using namespace std; void display_mata(float *mat, int heg, int wid, int woid) { int total; cout << endl << "\t\tThe matrix display:" << endl; switch (woid) { case 0: total = heg * heg; for (int i=0; i<total; i++) { if (i%heg == 0) { cout << "\t\t" << endl; } cout << mat[i] << " "; } cout << endl; break; case 1: total = heg * wid; for (int i=0; i<total; i++) { if (i%heg == 0) { cout << "\t\t" << endl; } cout << mat[i] << " "; } cout << endl; break; case 2: total = heg * wid; for (int i=0; i<total; i++) { if (i%wid == 0) { cout << "\t\t" << endl; } cout << mat[i] << " "; } cout << endl; break; } } __global__ void ComputeMatrix_Kernel(int h, int w, int offset, int subnum, float *matA, float *matB, float *matC) { int row, col, idx; int tid = (blockIdx.x * blockDim.x) + threadIdx.x; int gridsize = gridDim.x * blockDim.x; float sum = 0.0; #if 0 int stride = (subnum / gridsize) + 1; for (int i = 0; i < stride; i++) { idx = (i * gridsize) + tid; #else for (int i = 0; i < subnum; i += gridsize) { idx = i + tid; #endif row = idx / w; col = idx % w; if (idx < subnum) { for (int j = 0; j < h; j++) { sum += matA[row*h + j] * matB[h*col + j]; } matC[idx] = sum; sum = 0.0; } } } void CHK_ERR(int line, cudaError_t ce) { if (ce != cudaSuccess){ cout << "Error: line " << line << " "<< cudaGetErrorString(ce) << endl; } } void cudaMatMul(int heg, int wid, int wh, int off, int subnum, float *matA, float *matB, float *matC, int peid_, int pid) { float *d_A, *d_B, *d_C; int size_A = wh * heg * sizeof(float); int size_B = wid * heg * sizeof(float); int s_size = subnum * sizeof(float); cout << "\t\tTotal size: " << size_A + size_B + s_size << " wh: " << wh << " wid: " << wid<< " subnum: " << subnum <<endl; int peid = pid; char hostname[128]; gethostname(hostname, 128); int deviceCnt = 0; CHK_ERR( __LINE__, cudaGetDeviceCount(&deviceCnt)); CHK_ERR( __LINE__, cudaSetDevice(peid % deviceCnt)); cout << "\t\tPE:" << peid_ << "[" << hostname << "]: RUN: Device[" << peid%deviceCnt << "]" << endl; CHK_ERR( __LINE__, cudaMalloc(&d_A, size_A)); CHK_ERR( __LINE__, cudaMalloc(&d_B, size_B)); CHK_ERR( __LINE__, cudaMalloc(&d_C, s_size)); CHK_ERR( __LINE__, cudaMemcpy(d_A, matA, size_A, cudaMemcpyHostToDevice)); CHK_ERR( __LINE__, cudaMemcpy(d_B, matB, size_B, cudaMemcpyHostToDevice)); CHK_ERR( __LINE__, cudaMemset(d_C, 0, s_size)); cout << " Launching" << endl; struct timeval time_b, time_e; gettimeofday(&time_b, NULL); ComputeMatrix_Kernel <<<16, 128>>>(heg, wid, off, subnum, d_A, d_B, d_C); CHK_ERR( __LINE__, cudaDeviceSynchronize()); gettimeofday(&time_e, NULL); cout << "Kernel time: " << (time_e.tv_usec - time_b.tv_usec)*1e-6 + ((double)time_e.tv_sec - (double)time_b.tv_sec) << endl; // CHK_ERR( __LINE__, cudaMemcpy(matC, d_C, s_size, // cudaMemcpyDeviceToHost)); CHK_ERR( __LINE__, cudaFree(d_A)); CHK_ERR( __LINE__, cudaFree(d_B)); CHK_ERR( __LINE__, cudaFree(d_C)); // display_mata(matC, wh, wid, 2); }
326d470882e1e5c169959ef52536de954f0db04d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "my_graph_net_sub.cuh" #include "my_graph_net.cuh" #include "my_device_func.cuh" #include <string.h> #include <stdarg.h> //--------------------------------------- MY_MATRIX_DEVICE :: MY_MATRIX_DEVICE() { x = NULL; grad_x = NULL; row=0; column =0; rate = 0.0; } MY_MATRIX_DEVICE :: ~MY_MATRIX_DEVICE() { if(x != NULL) CUDA_CALL(hipFree(x)); if(grad_x != NULL) CUDA_CALL(hipFree(grad_x)); } MY_MATRIX_DEQUE :: MY_MATRIX_DEQUE() { head = NULL; tail = NULL; } MY_MATRIX_DEQUE :: ~MY_MATRIX_DEQUE() { if(head != NULL) { while(IsEmpty() == false){ RemoveLast(); } } } bool MY_MATRIX_DEQUE :: IsEmpty() { if(head == NULL) return true; else return false; } void MY_MATRIX_DEQUE :: AddFirst(MY_MATRIX_DEVICE *pdata) { _node_matrix *newNode = (_node_matrix*)malloc(sizeof(_node_matrix)); newNode->data = pdata; newNode->next = head; if(IsEmpty()) tail = newNode; else head->prev = newNode; newNode->prev = NULL; head = newNode; } void MY_MATRIX_DEQUE :: AddLast(MY_MATRIX_DEVICE *pdata) { _node_matrix *newNode = (_node_matrix*)malloc(sizeof(_node_matrix)); newNode->data = pdata; newNode->prev = tail; if(IsEmpty()) head = newNode; else tail->next = newNode; newNode->next = NULL; tail = newNode; } void MY_MATRIX_DEQUE :: RemoveFirst() { _node_matrix *rnode = head; delete rnode->data;// head = head->next; free(rnode); if(head == NULL) tail = NULL; else head->prev = NULL; } void MY_MATRIX_DEQUE :: RemoveLast() { _node_matrix *rnode = tail; delete rnode->data;// tail = tail->prev; free(rnode); if(tail == NULL) head = NULL; else tail->next = NULL; } //--------------------------------------- MY_GRAPH_NET_DEQUE :: MY_GRAPH_NET_DEQUE() { head = NULL; tail = NULL; } MY_GRAPH_NET_DEQUE :: ~MY_GRAPH_NET_DEQUE() { if(head != NULL) { while(IsEmpty() == false) RemoveLast(); } } bool MY_GRAPH_NET_DEQUE :: IsEmpty() { if(head == NULL) return true; else return false; } void MY_GRAPH_NET_DEQUE :: AddFirst(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, void (*func)(void*, MY_MATRIX_DEVICE*, MY_MATRIX_DEVICE*, MY_MATRIX_DEVICE*, GATE_STAT)) { _node_graph_net *newNode = (_node_graph_net*)malloc(sizeof(_node_graph_net)); newNode->in1 = pa; newNode->in2 = pb; newNode->out = pc; newNode->operate = func; newNode->next = head; if(IsEmpty()) tail = newNode; else head->prev = newNode; newNode->prev = NULL; head = newNode; } void MY_GRAPH_NET_DEQUE :: AddLast(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, void (*func)(void*, MY_MATRIX_DEVICE*, MY_MATRIX_DEVICE*, MY_MATRIX_DEVICE*, GATE_STAT)) { _node_graph_net *newNode = (_node_graph_net*)malloc(sizeof(_node_graph_net)); newNode->in1 = pa; newNode->in2 = pb; newNode->out = pc; newNode->operate = func; newNode->prev = tail; if(IsEmpty()) head = newNode; else tail->next = newNode; newNode->next = NULL; tail = newNode; } void MY_GRAPH_NET_DEQUE :: RemoveFirst() { _node_graph_net *rnode = head; // head = head->next; free(rnode); if(head == NULL) tail = NULL; else head->prev = NULL; } void MY_GRAPH_NET_DEQUE :: RemoveLast() { _node_graph_net *rnode = tail; // tail = tail->prev; free(rnode); if(tail == NULL) head = NULL; else tail->next = NULL; } MY_PARA_MANAGER :: MY_PARA_MANAGER() { } MY_PARA_MANAGER :: ~MY_PARA_MANAGER() { } MY_MATRIX_DEVICE* MY_PARA_MANAGER :: set(const char *name, int row, int column) { MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE; deque_matrix.AddLast(pc); pc->row = row; pc->column = column; strcpy(pc->name,name); int const threadsPerBolck = 1024; int blocksPerGride = 0; CUDA_CALL(hipMalloc(&(pc->grad_x),sizeof(float)*(pc->row)*(pc->column))); blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck; hipLaunchKernelGGL(( make_zeros), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->grad_x,(pc->row)*(pc->column)); CUDA_CALL(hipMalloc(&(pc->x),sizeof(float)*(pc->row)*(pc->column))); blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck; hipLaunchKernelGGL(( make_zeros), dim3(blocksPerGride), dim3(threadsPerBolck), 0, 0, pc->x,(pc->row)*(pc->column)); return pc; } void my_set_gaussian(float mean, float std, ...) { va_list ap; MY_MATRIX_DEVICE *arg; hiprandGenerator_t rand_gen; CURAND_CALL(hiprandCreateGenerator(&rand_gen,HIPRAND_RNG_PSEUDO_DEFAULT)); CURAND_CALL(hiprandSetPseudoRandomGeneratorSeed(rand_gen,rand())); va_start(ap,std); while(1){ arg=va_arg(ap,MY_MATRIX_DEVICE*); if (arg == NULL) break; CURAND_CALL(hiprandGenerateNormal(rand_gen,arg->x,(arg->row)*(arg->column),mean,std)); } va_end(ap); CURAND_CALL(hiprandDestroyGenerator(rand_gen)); } void my_para_write(const char *filename, ...) { va_list ap; MY_MATRIX_DEVICE *arg; va_start(ap,filename); char filename_para[64]; strcpy(filename_para,filename); *(strstr(filename_para,".txt")) = (char)NULL; FILE *fd_table,*fd_para; fd_table = fopen(filename,"w"); fd_para = fopen(filename_para,"wb"); MY_FUNC_ERROR(fd_table != NULL); MY_FUNC_ERROR(fd_para != NULL); float *temp; while(1){ arg=va_arg(ap,MY_MATRIX_DEVICE*); if (arg == NULL) break; fprintf(fd_table,"%s %d %d\n",arg->name,arg->row,arg->column); temp = (float*)malloc(sizeof(float)*(arg->row)*(arg->column)); CUDA_CALL(hipMemcpy(temp,arg->x,sizeof(float)*(arg->row)*(arg->column),hipMemcpyDeviceToHost)); fwrite(temp,sizeof(float),(arg->row)*(arg->column),fd_para); free(temp); } va_end(ap); fclose(fd_table); fclose(fd_para); } void my_para_read(const char *filename, ... ) { int cnt = 0; va_list ap; MY_MATRIX_DEVICE *arg,**arr_arg; va_start(ap,filename); while(1){ arg=va_arg(ap,MY_MATRIX_DEVICE*); if (arg == NULL) break; cnt++; } va_end(ap); arr_arg = new MY_MATRIX_DEVICE* [cnt]; int i = 0; va_start(ap,filename); while(1){ arg=va_arg(ap,MY_MATRIX_DEVICE*); if (arg == NULL) break; arr_arg[i] = arg; i++; } va_end(ap); char filename_para[64]; strcpy(filename_para,filename); *(strstr(filename_para,".txt")) = (char)NULL; FILE *fd_table,*fd_para; fd_table = fopen(filename,"r"); fd_para = fopen(filename_para,"rb"); MY_FUNC_ERROR(fd_table != NULL); MY_FUNC_ERROR(fd_para != NULL); char para_name[64]; int row,column; float *temp; while( !feof(fd_table)) // { arg = NULL; fscanf(fd_table,"%s %d %d\n",para_name,&row,&column); for(i = 0 ; i < cnt ; i++) { if((strcmp(para_name,arr_arg[i]->name) == 0)&&(row == arr_arg[i]->row)&&(column == arr_arg[i]->column)) { arg = arr_arg[i]; break; } } MY_FUNC_ERROR(arg != NULL); temp = (float*)malloc(sizeof(float)*(arg->row)*(arg->column)); fread(temp,sizeof(float),(arg->row)*(arg->column),fd_para); CUDA_CALL(hipMemcpy(arg->x,temp,sizeof(float)*(arg->row)*(arg->column),hipMemcpyHostToDevice)); free(temp); } fclose(fd_table); fclose(fd_para); delete [] arr_arg; } void my_host2device(float *host, float* device, int n) { CUDA_CALL(hipMemcpy(device,host,sizeof(float)*n,hipMemcpyHostToDevice)); } void my_print(MY_MATRIX_DEVICE *pa) { cout<<endl; float aaa[1000000]; int yy,xx; CUDA_CALL(hipMemcpy(aaa,pa->x,sizeof(float)*(pa->row)*(pa->column),hipMemcpyDeviceToHost)); printf("%d %d\n",pa->row,pa->column); yy = pa->row; xx = pa->column; if(yy > 20) { yy = 20; } if(xx > 20) { xx = 20; } for(int y = 0 ; y < yy ;y++){ for(int x = 0 ; x < xx ;x++) { printf("%1.7f ",aaa[IDX2C(y,x,pa->row)]); } cout<<endl; } cout<<"---grad ---"<<endl; CUDA_CALL(hipMemcpy(aaa,pa->grad_x,sizeof(float)*(pa->row)*(pa->column),hipMemcpyDeviceToHost)); for(int y = 0 ; y < yy ;y++){ for(int x = 0 ; x < xx ;x++) { printf("%1.7f ",aaa[IDX2C(y,x,pa->row)]); } cout<<endl; } } void get_mnist_image(const char *name, MY_MATRIX_DEVICE *pa) { int width = 28; int height = 28; int depth = 8; int channels = 1; char temp[100]; float *float_data = new float[pa->row*pa->column]; unsigned char *uch_data = new unsigned char[pa->row*pa->column]; CUDA_CALL(hipMemcpy(float_data,pa->x,sizeof(float)*(pa->row)*(pa->column),hipMemcpyDeviceToHost)); for(int i = 0 ; i < pa->row*pa->column ; i++) { uch_data[i] = (unsigned char)(float_data[i]*255.0); } IplImage *img = cvCreateImage(cvSize(width,height),depth,channels); for(int i = 0 ; i < pa->column ; i++) { memcpy(img->imageData,uch_data+i*784,784); sprintf(temp,"%s_%d.jpg",name,i); cvSaveImage(temp,img); } cvReleaseImage(&img); }
326d470882e1e5c169959ef52536de954f0db04d.cu
#include "my_graph_net_sub.cuh" #include "my_graph_net.cuh" #include "my_device_func.cuh" #include <string.h> #include <stdarg.h> //--------------------------------------- MY_MATRIX_DEVICE :: MY_MATRIX_DEVICE() { x = NULL; grad_x = NULL; row=0; column =0; rate = 0.0; } MY_MATRIX_DEVICE :: ~MY_MATRIX_DEVICE() { if(x != NULL) CUDA_CALL(cudaFree(x)); if(grad_x != NULL) CUDA_CALL(cudaFree(grad_x)); } MY_MATRIX_DEQUE :: MY_MATRIX_DEQUE() { head = NULL; tail = NULL; } MY_MATRIX_DEQUE :: ~MY_MATRIX_DEQUE() { if(head != NULL) { while(IsEmpty() == false){ RemoveLast(); } } } bool MY_MATRIX_DEQUE :: IsEmpty() { if(head == NULL) return true; else return false; } void MY_MATRIX_DEQUE :: AddFirst(MY_MATRIX_DEVICE *pdata) { _node_matrix *newNode = (_node_matrix*)malloc(sizeof(_node_matrix)); newNode->data = pdata; newNode->next = head; if(IsEmpty()) tail = newNode; else head->prev = newNode; newNode->prev = NULL; head = newNode; } void MY_MATRIX_DEQUE :: AddLast(MY_MATRIX_DEVICE *pdata) { _node_matrix *newNode = (_node_matrix*)malloc(sizeof(_node_matrix)); newNode->data = pdata; newNode->prev = tail; if(IsEmpty()) head = newNode; else tail->next = newNode; newNode->next = NULL; tail = newNode; } void MY_MATRIX_DEQUE :: RemoveFirst() { _node_matrix *rnode = head; delete rnode->data;// head = head->next; free(rnode); if(head == NULL) tail = NULL; else head->prev = NULL; } void MY_MATRIX_DEQUE :: RemoveLast() { _node_matrix *rnode = tail; delete rnode->data;// tail = tail->prev; free(rnode); if(tail == NULL) head = NULL; else tail->next = NULL; } //--------------------------------------- MY_GRAPH_NET_DEQUE :: MY_GRAPH_NET_DEQUE() { head = NULL; tail = NULL; } MY_GRAPH_NET_DEQUE :: ~MY_GRAPH_NET_DEQUE() { if(head != NULL) { while(IsEmpty() == false) RemoveLast(); } } bool MY_GRAPH_NET_DEQUE :: IsEmpty() { if(head == NULL) return true; else return false; } void MY_GRAPH_NET_DEQUE :: AddFirst(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, void (*func)(void*, MY_MATRIX_DEVICE*, MY_MATRIX_DEVICE*, MY_MATRIX_DEVICE*, GATE_STAT)) { _node_graph_net *newNode = (_node_graph_net*)malloc(sizeof(_node_graph_net)); newNode->in1 = pa; newNode->in2 = pb; newNode->out = pc; newNode->operate = func; newNode->next = head; if(IsEmpty()) tail = newNode; else head->prev = newNode; newNode->prev = NULL; head = newNode; } void MY_GRAPH_NET_DEQUE :: AddLast(MY_MATRIX_DEVICE *pa, MY_MATRIX_DEVICE *pb, MY_MATRIX_DEVICE *pc, void (*func)(void*, MY_MATRIX_DEVICE*, MY_MATRIX_DEVICE*, MY_MATRIX_DEVICE*, GATE_STAT)) { _node_graph_net *newNode = (_node_graph_net*)malloc(sizeof(_node_graph_net)); newNode->in1 = pa; newNode->in2 = pb; newNode->out = pc; newNode->operate = func; newNode->prev = tail; if(IsEmpty()) head = newNode; else tail->next = newNode; newNode->next = NULL; tail = newNode; } void MY_GRAPH_NET_DEQUE :: RemoveFirst() { _node_graph_net *rnode = head; // head = head->next; free(rnode); if(head == NULL) tail = NULL; else head->prev = NULL; } void MY_GRAPH_NET_DEQUE :: RemoveLast() { _node_graph_net *rnode = tail; // tail = tail->prev; free(rnode); if(tail == NULL) head = NULL; else tail->next = NULL; } MY_PARA_MANAGER :: MY_PARA_MANAGER() { } MY_PARA_MANAGER :: ~MY_PARA_MANAGER() { } MY_MATRIX_DEVICE* MY_PARA_MANAGER :: set(const char *name, int row, int column) { MY_MATRIX_DEVICE *pc = new MY_MATRIX_DEVICE; deque_matrix.AddLast(pc); pc->row = row; pc->column = column; strcpy(pc->name,name); int const threadsPerBolck = 1024; int blocksPerGride = 0; CUDA_CALL(cudaMalloc(&(pc->grad_x),sizeof(float)*(pc->row)*(pc->column))); blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck; make_zeros<<<blocksPerGride, threadsPerBolck>>>(pc->grad_x,(pc->row)*(pc->column)); CUDA_CALL(cudaMalloc(&(pc->x),sizeof(float)*(pc->row)*(pc->column))); blocksPerGride = ((pc->row)*(pc->column) + threadsPerBolck -1)/threadsPerBolck; make_zeros<<<blocksPerGride, threadsPerBolck>>>(pc->x,(pc->row)*(pc->column)); return pc; } void my_set_gaussian(float mean, float std, ...) { va_list ap; MY_MATRIX_DEVICE *arg; curandGenerator_t rand_gen; CURAND_CALL(curandCreateGenerator(&rand_gen,CURAND_RNG_PSEUDO_DEFAULT)); CURAND_CALL(curandSetPseudoRandomGeneratorSeed(rand_gen,rand())); va_start(ap,std); while(1){ arg=va_arg(ap,MY_MATRIX_DEVICE*); if (arg == NULL) break; CURAND_CALL(curandGenerateNormal(rand_gen,arg->x,(arg->row)*(arg->column),mean,std)); } va_end(ap); CURAND_CALL(curandDestroyGenerator(rand_gen)); } void my_para_write(const char *filename, ...) { va_list ap; MY_MATRIX_DEVICE *arg; va_start(ap,filename); char filename_para[64]; strcpy(filename_para,filename); *(strstr(filename_para,".txt")) = (char)NULL; FILE *fd_table,*fd_para; fd_table = fopen(filename,"w"); fd_para = fopen(filename_para,"wb"); MY_FUNC_ERROR(fd_table != NULL); MY_FUNC_ERROR(fd_para != NULL); float *temp; while(1){ arg=va_arg(ap,MY_MATRIX_DEVICE*); if (arg == NULL) break; fprintf(fd_table,"%s %d %d\n",arg->name,arg->row,arg->column); temp = (float*)malloc(sizeof(float)*(arg->row)*(arg->column)); CUDA_CALL(cudaMemcpy(temp,arg->x,sizeof(float)*(arg->row)*(arg->column),cudaMemcpyDeviceToHost)); fwrite(temp,sizeof(float),(arg->row)*(arg->column),fd_para); free(temp); } va_end(ap); fclose(fd_table); fclose(fd_para); } void my_para_read(const char *filename, ... ) { int cnt = 0; va_list ap; MY_MATRIX_DEVICE *arg,**arr_arg; va_start(ap,filename); while(1){ arg=va_arg(ap,MY_MATRIX_DEVICE*); if (arg == NULL) break; cnt++; } va_end(ap); arr_arg = new MY_MATRIX_DEVICE* [cnt]; int i = 0; va_start(ap,filename); while(1){ arg=va_arg(ap,MY_MATRIX_DEVICE*); if (arg == NULL) break; arr_arg[i] = arg; i++; } va_end(ap); char filename_para[64]; strcpy(filename_para,filename); *(strstr(filename_para,".txt")) = (char)NULL; FILE *fd_table,*fd_para; fd_table = fopen(filename,"r"); fd_para = fopen(filename_para,"rb"); MY_FUNC_ERROR(fd_table != NULL); MY_FUNC_ERROR(fd_para != NULL); char para_name[64]; int row,column; float *temp; while( !feof(fd_table)) // 파일의 끝을 만난 때 까지 루프 { arg = NULL; fscanf(fd_table,"%s %d %d\n",para_name,&row,&column); for(i = 0 ; i < cnt ; i++) { if((strcmp(para_name,arr_arg[i]->name) == 0)&&(row == arr_arg[i]->row)&&(column == arr_arg[i]->column)) { arg = arr_arg[i]; break; } } MY_FUNC_ERROR(arg != NULL); temp = (float*)malloc(sizeof(float)*(arg->row)*(arg->column)); fread(temp,sizeof(float),(arg->row)*(arg->column),fd_para); CUDA_CALL(cudaMemcpy(arg->x,temp,sizeof(float)*(arg->row)*(arg->column),cudaMemcpyHostToDevice)); free(temp); } fclose(fd_table); fclose(fd_para); delete [] arr_arg; } void my_host2device(float *host, float* device, int n) { CUDA_CALL(cudaMemcpy(device,host,sizeof(float)*n,cudaMemcpyHostToDevice)); } void my_print(MY_MATRIX_DEVICE *pa) { cout<<endl; float aaa[1000000]; int yy,xx; CUDA_CALL(cudaMemcpy(aaa,pa->x,sizeof(float)*(pa->row)*(pa->column),cudaMemcpyDeviceToHost)); printf("%d %d\n",pa->row,pa->column); yy = pa->row; xx = pa->column; if(yy > 20) { yy = 20; } if(xx > 20) { xx = 20; } for(int y = 0 ; y < yy ;y++){ for(int x = 0 ; x < xx ;x++) { printf("%1.7f ",aaa[IDX2C(y,x,pa->row)]); } cout<<endl; } cout<<"---grad ---"<<endl; CUDA_CALL(cudaMemcpy(aaa,pa->grad_x,sizeof(float)*(pa->row)*(pa->column),cudaMemcpyDeviceToHost)); for(int y = 0 ; y < yy ;y++){ for(int x = 0 ; x < xx ;x++) { printf("%1.7f ",aaa[IDX2C(y,x,pa->row)]); } cout<<endl; } } void get_mnist_image(const char *name, MY_MATRIX_DEVICE *pa) { int width = 28; int height = 28; int depth = 8; int channels = 1; char temp[100]; float *float_data = new float[pa->row*pa->column]; unsigned char *uch_data = new unsigned char[pa->row*pa->column]; CUDA_CALL(cudaMemcpy(float_data,pa->x,sizeof(float)*(pa->row)*(pa->column),cudaMemcpyDeviceToHost)); for(int i = 0 ; i < pa->row*pa->column ; i++) { uch_data[i] = (unsigned char)(float_data[i]*255.0); } IplImage *img = cvCreateImage(cvSize(width,height),depth,channels); for(int i = 0 ; i < pa->column ; i++) { memcpy(img->imageData,uch_data+i*784,784); sprintf(temp,"%s_%d.jpg",name,i); cvSaveImage(temp,img); } cvReleaseImage(&img); }
147292ddfd5f23bd7d314ec3a569ef0e1d7f9415.hip
// !!! This is a file automatically generated by hipify!!! using namespace std; // permite usar el "cout" #include <iostream> #include <algorithm> #include <stdlib.h>/* srand, rand */ #include "time.h" /* time */ #include <stdio.h> /* printf */ #include <math.h> #include <fstream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include<Cuda.h> #include<hiprand/hiprand.h> #include<hiprand/hiprand_kernel.h> #include "stdlib.h" #include<windows.h> // Variables globales **************************************************************************************** const int nvdec = 16 ; const int nvbin = 4 ; // MULTIPLO nvbin*nvdec DE 2^n !!! const int nvars = nvbin*nvdec ; const int psize = 640 ; // MULTIPLO DE 32!!! 320, 640, 960, 1280, 1600, 1920, 2240, 2560 const int ngen = 5000; //%10; 25; 10 const int nelit = 1 ; //cantidad de individuos del ELIT float mutp = 0.01 ; // Probabiludad de mutacin float tol = 1e-6 ; // *************** ESTRUCTURAS ******************************************************************************* struct indiv { float Sol; int Ind; } ; bool lessthan(const indiv &a, const indiv &b) { return (b.Sol < a.Sol); } // *************** DEVICE FUNCTIONS ************************************************************************** __global__ void setup_rand ( hiprandState_t * state, unsigned long seed ) { int id = blockDim.x * blockIdx.x + threadIdx.x; hiprand_init ( seed, id, 0, &state[id] ); } __device__ float generate( hiprandState_t* globalState, int ind ) { hiprandState_t localState = globalState[ind]; float RANDOM = hiprand_uniform( &localState ); globalState[ind] = localState; return RANDOM; } __device__ float BinDec(int bin[nvbin], int n){ // convierte el vector binario ind a entero int sum = 0, two = 2; for(int i = 0; i<n; i++){ sum = sum + bin[i]*powf(two,i) ; } return sum; } __global__ void InitPop(int *d_Pop, int nvars, hiprandState_t* globalState){ // Genera la poblacion inicial int it = blockDim.x * blockIdx.x + threadIdx.x; // PRUEBA //for(int i=0; i<nvars; i++){ d_Pop[it*nvars + i] = it ; } //for(int i=0; i<nvars; i++){ d_Pop[it*nvars + i] = 1 ; } //for(int i=it; i<nvars+it; i++){ d_Pop[i] = 1 ; } //if(it < 1 ){d_Pop[ 3 ] = 0 ; d_Pop[ 6+it] = 0 ; d_Pop[8+it] = 0 ;d_Pop[ 9+it] = 0 ; d_Pop[22+it] = 0 ; d_Pop[29+it] = 0 ; } //if(it > 1 ){d_Pop[ 2+it*64] = 0 ; d_Pop[5+it*64] = 0 ; d_Pop[7+it*64] = 0 ;d_Pop[ 8+it*64] = 0 ; d_Pop[21+it*64] = 0 ; d_Pop[30+it*64] = 0 ; } //if(it = 3 ){for(int i=0; i<nvars; i++){ d_Pop[it*nvars + i] = 0 ; } } // FIN PRUEBA for(int i=0; i<nvars ; i++){ float k = generate(globalState, i+it)*1 ; //i+it d_Pop[it*nvars + i] = lroundf(k) ; } } __global__ void InitPop_s(int *d_Pop, int ipop, int nvars, hiprandState_t* globalState){ // Genera la poblacion inicial int it = blockDim.x * blockIdx.x + threadIdx.x; // PRUEBA //for(int i=0; i<nvars; i++){ d_Pop[it*nvars + i] = it ; } //for(int i=0; i<nvars; i++){ d_Pop[it*nvars + i] = 1 ; } //for(int i=it; i<nvars+it; i++){ d_Pop[i] = 1 ; } //if(it < 1 ){d_Pop[ 3 ] = 0 ; d_Pop[ 6+it] = 0 ; d_Pop[8+it] = 0 ;d_Pop[ 9+it] = 0 ; d_Pop[22+it] = 0 ; d_Pop[29+it] = 0 ; } //if(it = 1 ){d_Pop[ 2+it*64] = 0 ; d_Pop[5+it*64] = 0 ; d_Pop[7+it*64] = 0 ;d_Pop[ 8+it*64] = 0 ; d_Pop[21+it*64] = 0 ; d_Pop[30+it*64] = 0 ; } // FIN PRUEBA for(int i=0; i<nvars ; i++){ float k = generate(globalState, i+ipop)*1 ; //i+it d_Pop[ipop*nvars + i] = lroundf(k) ; } } __global__ void Func(float *d_Sol, int *d_Pop, int nvbin, int nvdec){ // Genera la poblacion inicial int it = blockDim.x * blockIdx.x + threadIdx.x; int ipop=0, two=2, ten=10, bin[4]; float x=0, y=0, X=0, Y=0, sum = 0, PI=3.141592653; float lim = 9/(powf(2,nvbin)-1) ; for (int inum=1; inum<nvdec/two; inum++){ for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*inum + i] ; } X = lroundf(BinDec(bin, nvbin)*lim) ; for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*nvdec/two + nvbin*inum + i] ; } Y = lroundf(BinDec(bin, nvbin)*lim) ; x = x + powf(ten,(0-inum))*X ; // x = x + powf(ten,(two-inum))*X ; y = y + powf(ten,(0-inum))*Y ; // y = y + powf(ten,(two-inum))*Y ; } for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + i] ; } if(BinDec(bin,nvbin) < (powf(two,nvbin)-1)*0.5 ){ x = (-1)*x ; } for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*nvdec/two + i] ; } if(BinDec(bin,nvbin) < (powf(two,nvbin)-1)*0.5 ){ y = (-1)*y ; } d_Sol[it] = x*x + y*y ; // De Jong //float a=20, b=0.2, c=2*PI; int n=2; //d_Sol[it] = -a*expf(-b*sqrt((x*x + y*y)/n)) - expf((cos(c*x) + cos(c*y))/n) + a + expf(1) ; // Ackley //int c=10 ; d_Sol[it] = c*two + (x*x - c*cos(two*PI*x)) + (y*y - c*cos(two*PI*y)) ; // Rastrigin's function } __global__ void Func_s(float *d_Sol, int *d_Pop, int ipop, int nvbin, int nvdec){ // Genera la poblacion inicial int it = ipop; int two=2, ten=10, bin[4]; float x=0, y=0, X=0, Y=0, sum = 0, PI=3.141592653; float lim = 9/(powf(2,nvbin)-1) ; for (int inum=1; inum<nvdec/two; inum++){ for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*inum + i] ; } X = lroundf(BinDec(bin, nvbin)*lim) ; for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*nvdec/two + nvbin*inum + i] ; } Y = lroundf(BinDec(bin, nvbin)*lim) ; x = x + powf(ten,(two-inum))*X ; y = y + powf(ten,(two-inum))*Y ; } for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + i] ; } if(BinDec(bin,nvbin) < (powf(two,nvbin)-1)*0.5 ){ x = (-1)*x ; } for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*nvdec/two + i] ; } if(BinDec(bin,nvbin) < (powf(two,nvbin)-1)*0.5 ){ y = (-1)*y ; } d_Sol[it] = x*x + y*y ; //d_Sol[it] = X ; // d_Pop[it*nvbin*nvdec]; // Paraboloide eliptico ;//d_Pop[it+3];//[it*nvbin*nvdec];//X;//BinDec(bin, nvbin) ; //sum ; // //return // return 0.01*(x*x + y*y) + pow(sin( x*x + y*y), 2) ; // Rastrigin's function ***/ // return 10*two + (x*x-10*cos(two*PI*x)) + (y*y-10*cos(two*PI*y)) ; } __global__ void Rearrange(int *vec2, int *vec1, int *ind, int nvars, int nvec) { int it = blockDim.x * blockIdx.x + threadIdx.x; //blockDim = cantidad de hilos por bloque, es una constante [nvbin*nvdec] 64 //blockIdx = nombre del bloque [identifica a la pareja] de 1 a psize //threadIdx = enumeracion del hilo dentro del bloque [cada variable de un ind.] de 1 a 64 if (it < nvec) { vec2[it] = vec1[ind[blockIdx.x]*nvars + threadIdx.x] ; } //NPop[it*2] = Pop[ Male[blockIdx.x]*nvars + threadIdx.x*2 ] ; //vec2[it] = it ; } __global__ void Rearrange_s(int *vec2, int *vec1, int *ind, int nvars, int nvec, int it, int iblo, int ithr) { //int it = id; blockDim.x * blockIdx.x + threadIdx.x; //blockDim = cantidad de hilos por bloque, es una constante [nvbin*nvdec] 64 //blockIdx = nombre del bloque [identifica a la pareja] de 1 a psize //threadIdx = enumeracion del hilo dentro del bloque [cada variable de un ind.] de 1 a 64 if (it < nvec) { vec2[it] = vec1[ind[iblo]*nvars + ithr] ; } } __global__ void Probability(float *vec2, float *vec1, int nvec, float alpha) { int it = blockDim.x * blockIdx.x + threadIdx.x; if (it < nvec) { vec2[it]=0 ; for(int ivec=0; ivec<=it; ivec++){ vec2[it]=vec2[it]+vec1[ivec]/alpha ; } } } __global__ void CumSumVec(float *Sum, float *vec, int nvec) { int it = blockDim.x * blockIdx.x + threadIdx.x; if (it < nvec) { __syncthreads(); atomicAdd(Sum, vec[it]) ; } } __global__ void EqualityINT(int *vec2, int *vec1, int ivec, int nvec) { int it = blockDim.x * blockIdx.x + threadIdx.x; if (it < nvec) { vec2[it] = vec1[it+ivec] ; } //vec2[it] = it ; } __global__ void EqualityINT_s(int *vec2, int *vec1, int ivec, int ipop, int nvec) { int it = ipop; if (it < nvec) { vec2[it] = vec1[it+ivec] ; } //vec2[it] = it ; } __global__ void RandomINT(int *vec, int nvec, int psize, hiprandState_t* globalState){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = blockDim.x * blockIdx.x + threadIdx.x; if (it < nvec) { float k = generate(globalState, it)*(psize-0.5) ; vec[it] = lroundf(k) ; } } __global__ void RandomINT_s(int *vec, int nvec, int psize, int ivar, hiprandState_t* globalState){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = ivar; if (it < nvec) { float k = generate(globalState, it)*(psize-0.5) ; vec[it] = lroundf(k) ; } } __global__ void GroupSelection(int *sel, int *group, int gsize, int ngroup){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = blockDim.x * blockIdx.x + threadIdx.x; if(it<ngroup){ sel[it] = group[it*gsize] ; for(int i=0; i<gsize; i++){ if (sel[it] < group[it*gsize + i] ) { sel[it] = group[it*gsize + i] ; } } } } __global__ void GroupSelection_s(int *sel, int *group, int gsize, int ngroup, int ivar){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = ivar; if(it<ngroup){ sel[it] = group[it*gsize] ; for(int i=0; i<gsize; i++){ if (sel[it] < group[it*gsize + i] ) { sel[it] = group[it*gsize + i] ; } } } } __global__ void CrossoverSingle(int *NPop, int *Male, int *Female, int *Pop, int psize, int nvars){ // se llama una vez por cada 2 individuos y genera el cruzamiento entre el padre y la madre int it = blockDim.x * blockIdx.x + threadIdx.x ; //blockDim = cantidad de hilos por bloque, es una constante [nvbin*nvdec] 64 //blockIdx = nombre del bloque [identifica a la pareja] de 1 a psize/2 //threadIdx = enumeracion del hilo dentro del bloque [cada variable de un ind.] de 1 a 64 if(threadIdx.x < nvars/2){ NPop[it*2] = Pop[ Male[blockIdx.x]*nvars + threadIdx.x*2 ] ; NPop[it*2+1] = Pop[Female[blockIdx.x]*nvars + threadIdx.x*2 + 1] ; }else{ NPop[it*2] = Pop[Female[blockIdx.x]*nvars + (threadIdx.x - nvars/2 )*2 ] ; NPop[it*2+1] = Pop[ Male[blockIdx.x]*nvars + (threadIdx.x - nvars/2 )*2 + 1] ; } } __global__ void CrossoverSingle_s(int *NPop, int *Male, int *Female, int *Pop, int psize, int nvars, int it, int iblo, int ithr){ // se llama una vez por cada 2 individuos y genera el cruzamiento entre el padre y la madre //int it = blockDim.x * blockIdx.x + threadIdx.x ; //blockDim = cantidad de hilos por bloque, es una constante [nvbin*nvdec] 64 //blockIdx = nombre del bloque [identifica a la pareja] de 1 a psize/2 //threadIdx = enumeracion del hilo dentro del bloque [cada variable de un ind.] de 1 a 64 if(ithr < nvars/2){ NPop[it*2] = Pop[ Male[iblo]*nvars + ithr*2 ] ; NPop[it*2+1] = Pop[Female[iblo]*nvars + ithr*2 + 1] ; }else{ NPop[it*2] = Pop[Female[iblo]*nvars + (ithr - nvars/2 )*2 ] ; NPop[it*2+1] = Pop[ Male[iblo]*nvars + (ithr - nvars/2 )*2 + 1] ; } } __global__ void Mutation(int *Pop, float mutp, int nvec, int nvars, hiprandState_t* globalState){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = blockDim.x * blockIdx.x + threadIdx.x; if (it < nvec) { float ran = generate(globalState, it)*(1.0) ; if(ran<mutp){ int ivar = lroundf(generate(globalState, it)*(nvars-0.5)) ; if(Pop[it*nvars+ivar]==0){ Pop[it*nvars+ivar] = 1 ; }else{ Pop[it*nvars+ivar] = 0 ; } } } } __global__ void Mutation_s(int *Pop, float mutp, int nvec, int nvars, hiprandState_t* globalState, int ivar){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = ivar; if (it < nvec) { float ran = generate(globalState, it)*(1.0) ; if(ran<mutp){ int ivar = lroundf(generate(globalState, it)*(nvars-0.5)) ; if(Pop[it*nvars+ivar]==0){ Pop[it*nvars+ivar] = 1 ; }else{ Pop[it*nvars+ivar] = 0 ; } } } } //__global__ void EqualityINTprueba(int *vec2, int *vec1, int ivec, int nvec) { // int it = blockDim.x * blockIdx.x + threadIdx.x; // //vec2[it] = it; // if (it < nvec) { vec2[it] = it+ivec ; } //} // //__global__ void EqualityElite(int *vec, int nvec) { // int it = blockDim.x * blockIdx.x + threadIdx.x; // vec[it] = it+nvec ; //} // *************** HOST FUNCTIONS **************************************************************************** //SortArray2(Sol, Pop, nvars2, psize); //void SortArray2(vector<float> &Sol, int Pop[psize][nvars2], int nvars2, int psize){ // indiv POP1[psize]; // for (int j=0; j<psize; j++) // { // POP1[j].Solu = Sol[j]; // // printf("POP1[%d].Solu = %g \n", j, POP1[j].Solu); // POP1[j].vars.resize(nvars2); // for (int k=0; k<nvars2; k++) // { // POP1[j].vars[k] = Pop[j][k]; // } // POP1[j].index = j; // // printf("POP1[%i].index = %d \n", j, POP1[j].index); // } // sort(POP1,POP1+psize,lessthan); // for (int l=0; l<psize; l++) // { // Sol[l]= POP1[l].Solu; // // printf("Sol[%d] = %g \n", l, Sol[l]); // // POP1[j].vars.resize(nvars); // for (int m=0; m<nvars2; m++) // { // Pop[l][m]=POP1[l].vars[m]; // } // // POP1[j].index = j; // } // /* for (int j=0; j<psize; j++) // { // printf("POP1[%d].Solu = %g \n", j, POP1[j].Solu); // printf("POP1[%i].index = %d \n", j, POP1[j].index); // }*/ //} //void SortArray(float Sol[psize], int Pop[psize][nvdec][nvbin], int nvdec, int nvbin, int psize){ // int i,j; //Variables contadoras del ciclo. // //int lista[Nelementos]={6,9,3,1}; //Declaracion e inicializacion de un arreglo de 4 elementos. // float tempS = 0.0 ; //Variable temporal. // float tempP = 0.0 ; //Variable temporal. // // for (i=1; i<psize; i++){ // for (j=0; j <= psize-2; j++){ // if (Sol[j] < Sol[j+1]){ //de Mayor a Menor: < ; de Menor a Mayor: > // tempS = Sol[j] ; // Sol[j] = Sol[j+1]; // Sol[j+1] = tempS; // for(int idec=0; idec<nvdec; idec++){ // for(int ibin=0; ibin<nvbin; ibin++){ // tempP = Pop[j][idec][ibin] ; // Pop[j][idec][ibin] = Pop[j+1][idec][ibin] ; // Pop[j+1][idec][ibin] = tempP ; // } // } // } // } // } //} //********************** MAIN ********************************************************************************** int main() { clock_t start = clock(); hipError_t err = hipSuccess; // Error code to check return values for CUDA calls int igen=0,ipop; float BestSol=1.0, x=0.0, y=0.0 ; for(int iiter=0; iiter<50; iiter++){ start = clock(); // random number on device hiprandState_t* devStates; hipMalloc ( &devStates, psize*nvars*sizeof( hiprandState_t ) ); hipLaunchKernelGGL(( setup_rand) , dim3(psize*nvars/32), dim3(32) , 0, 0, devStates,unsigned(time(NULL)) ); // setup seeds size_t size = psize*nvars*sizeof(int); int *d_Pop = NULL; hipMalloc((void **)&d_Pop, size); hipLaunchKernelGGL(( InitPop), dim3(psize/32), dim3(32) , 0, 0, d_Pop, nvars, devStates); //for(ipop=0; ipop<psize; ipop++){ InitPop_s<<< 1, 1 >>>(d_Pop, ipop, nvars, devStates); } // SECUENCIAL !!! //int *h_Pop = (int *)malloc(size); //prueba //hipMemcpy(h_Pop, d_Pop, size, hipMemcpyDeviceToHost); //prueba //printf("h_Pop \n"); for(int i=0;i<psize*nvars;i++) { cout<<h_Pop[i]<<endl; } //prueba err = hipFree(devStates); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } igen = 0 ; BestSol=1.0 ; x=0.0 ; y=0.0 ; // Bucle de las generaciones ************************************************************ while(BestSol > tol){ //while(igen < ngen){ //if(igen > ngen+1){ BestSol=-1 ; //exit(EXIT_FAILURE); } igen++ ; size = psize*sizeof(float); float *d_Sol = NULL; float *h_Sol = (float *)malloc(size); hipMalloc((void **)&d_Sol, size); // Evaluacion funcion objetivo hipLaunchKernelGGL(( Func), dim3(psize/32), dim3(32), 0, 0, d_Sol, d_Pop, nvbin, nvdec) ; //for(ipop=0; ipop<psize; ipop++){ Func_s<<< 1, 1 >>>(d_Sol, d_Pop, ipop, nvbin, nvdec); } // SECUENCIAL !!! hipMemcpy(h_Sol, d_Sol, size, hipMemcpyDeviceToHost); //printf("h_Sol \n"); for(int i=0;i<psize;i++) { cout<<h_Sol[i]<<endl; } //prueba // Ordena los individuos segun el valor de la solucion indiv Pop[psize]; for (int j=0; j<psize; j++) { Pop[j].Sol = h_Sol[j] ; Pop[j].Ind = j ; } sort(Pop, Pop+psize, lessthan); BestSol = Pop[psize-1].Sol ; if(igen > ngen+1){ BestSol=-1 ; //exit(EXIT_FAILURE); } err = hipFree(d_Sol); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } int h_Ind[psize] ; for(int i=0;i<psize;i++) { h_Ind[i] = Pop[i].Ind ; } int *d_NPop = NULL; int *d_Pind = NULL; hipMalloc((void **)&d_Pind, psize*sizeof(int)) ; hipMalloc((void **)&d_NPop, psize*nvars*sizeof(int)) ; hipLaunchKernelGGL(( EqualityINT), dim3(psize), dim3(nvars), 0, 0, d_NPop, d_Pop, 0, psize*nvars) ; // hace una copia de d_Pop //for(ipop=0; ipop<psize; ipop++){ EqualityINT_s<<< 1, 1 >>>(d_NPop, d_Pop, 0, ipop, psize*nvars); } // SECUENCIAL !!! hipMemcpy(d_Pind, h_Ind, psize*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Rearrange), dim3(psize), dim3(nvars), 0, 0, d_Pop, d_NPop, d_Pind, nvars, psize*nvars) ; // ordena d_Pop // INICIO SECUENCIAL !!! //int it=0; //for(int iblo=0; iblo<psize; iblo++){ // for(int ithr=0; ithr<nvars; ithr++){ // it++ ; // Rearrange_s<<< 1, 1 >>>(d_Pop, d_NPop, d_Pind, nvars, psize*nvars, it, iblo, ithr); // } //} // FIN SECUENCIAL !!! err = hipFree(d_Pind); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_NPop); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } //x=0.0 ; y=0.0 ; //XY(x, y, Best, nvdec, nvbin) ; //printf("igen %i ,Minimo %f ,X: %f ,Y: %f \n",igen,BestSol,x,y); printf("igen %i ,Minimo %f \n",igen,BestSol); // Elige la poblacin ELITE size = nelit*nvars*sizeof(int); int *d_Eli = NULL ; hipMalloc((void **)&d_Eli, size) ; for(ipop=psize-nelit; ipop<psize; ipop++){ hipLaunchKernelGGL(( EqualityINT), dim3(1), dim3(nvars), 0, 0, d_Eli, d_Pop, ipop*nvars, nvars) ; //for(int ivar=0; ivar<nvars; ivar++){ EqualityINT_s<<< 1, 1 >>>(d_Eli, d_Pop, ipop*nvars, ivar, nvars); } // SECUENCIAL !!! } // SELECCION (Simple Roulet) int ngroup = psize ; // cantidad de grupos (de cada grupo sale un individuo) int gsize = 3 ; // 5 tamao de cada grupo (nro. de ind. en cada grupo) hiprandState_t* devStates; hipMalloc ( &devStates, gsize*ngroup*sizeof( hiprandState_t ) ); hipLaunchKernelGGL(( setup_rand) , dim3(ngroup), dim3(gsize) , 0, 0, devStates,unsigned(time(NULL)) ); size = gsize*ngroup*sizeof(int); int *d_Tiro = NULL ; hipMalloc((void **)&d_Tiro, size) ; hipLaunchKernelGGL(( RandomINT), dim3(ngroup), dim3(gsize) , 0, 0, d_Tiro, gsize*ngroup, psize, devStates) ; //for(int ivar=0; ivar<ngroup*gsize; ivar++){ RandomINT_s<<< 1, 1 >>>(d_Tiro, gsize*ngroup, psize, ivar, devStates); } // SECUENCIAL !!! err = hipFree(devStates); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } size = psize*sizeof(int); int *d_Sel = NULL ; hipMalloc((void **)&d_Sel, size) ; hipLaunchKernelGGL(( GroupSelection), dim3(psize/32), dim3(32) , 0, 0, d_Sel, d_Tiro, gsize, ngroup) ; //for(int ivar=0; ivar<psize; ivar++){ GroupSelection_s<<< 1, 1 >>>(d_Sel, d_Tiro, gsize, ngroup, ivar); } // SECUENCIAL !!! err = hipFree(d_Tiro); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // CRUZAMIENTO (Crossover Simple) size = (psize/2)*sizeof(int) ; int *d_Male = NULL ; int *d_Female = NULL ; hipMalloc((void **)&d_Male, size) ; hipMalloc((void **)&d_Female, size) ; hipLaunchKernelGGL(( EqualityINT), dim3(psize/2/32), dim3(32), 0, 0, d_Male, d_Sel, 0, psize/2) ; hipLaunchKernelGGL(( EqualityINT), dim3(psize/2/32), dim3(32), 0, 0, d_Female, d_Sel, psize/2, psize/2) ; // INICIO SECUENCIAL !!! //for(int ivar=0; ivar<psize/2; ivar++){ // EqualityINT_s<<< 1, 1 >>>(d_Male, d_Sel, 0, ivar, psize/2); // EqualityINT_s<<< 1, 1 >>>(d_Female, d_Sel, psize/2, ivar, psize/2); //} // FIN SECUENCIAL !!! err = hipFree(d_Sel); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } size = psize*nvars*sizeof(int); hipMalloc((void **)&d_NPop, size) ; hipLaunchKernelGGL(( CrossoverSingle), dim3(psize/2), dim3(nvars), 0, 0, d_NPop, d_Male, d_Female, d_Pop, psize, nvars) ; // INICIO SECUENCIAL !!! //int itt=0; //for(int iblo=0; iblo<psize/2; iblo++){ // for(int ithr=0; ithr<nvars; ithr++){ // itt++ ; // CrossoverSingle_s<<< 1, 1 >>>(d_NPop, d_Male, d_Female, d_Pop, psize, nvars, itt, iblo, ithr); // } //} // FIN SECUENCIAL !!! err = hipFree(d_Male); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } err = hipFree(d_Female); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } hipLaunchKernelGGL(( EqualityINT), dim3(psize), dim3(nvbin*nvdec), 0, 0, d_Pop, d_NPop, 0, psize*nvars) ; //for(int ivar=0; ivar<psize*nvbin*nvdec; ivar++){ EqualityINT_s<<< 1, 1 >>>(d_Pop, d_NPop, 0, ivar, psize*nvars); } // SECUENCIAL !!! err = hipFree(d_NPop); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // MUTACION (Step mutation) hipMalloc ( &devStates, psize*sizeof( hiprandState_t ) ); hipLaunchKernelGGL(( setup_rand) , dim3(psize/32), dim3(32) , 0, 0, devStates,unsigned(time(NULL)) ); hipLaunchKernelGGL(( Mutation), dim3(psize/32), dim3(32) , 0, 0, d_Pop, mutp, psize, nvars, devStates) ; //for(int ivar=0; ivar<psize; ivar++){ Mutation_s<<< 1, 1 >>>(d_Pop, mutp, psize, nvars, devStates, ivar); } // SECUENCIAL !!! err = hipFree(devStates); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } // Copia la poblacin ELITE for(ipop=psize-nelit; ipop<psize; ipop++){ hipLaunchKernelGGL(( EqualityINT), dim3(1), dim3(nvars), 0, 0, d_Pop, d_Eli, ipop*nvars, nvars) ; //for(int ivar=0; ivar<nvars; ivar++){ EqualityINT_s<<< 1, 1 >>>(d_Pop, d_Eli, ipop*nvars, ivar, nvars); } // SECUENCIAL !!! } err = hipFree(d_Eli); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } } // Free device global memory err = hipFree(d_Pop); if (err != hipSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", hipGetErrorString(err)); exit(EXIT_FAILURE); } printf("Poblacion: %i, Generaciones: %i, Tiempo transcurrido: %f \n",psize , igen, ((double)clock() - start) / CLOCKS_PER_SEC) ; } // iiter printf("Done\n"); }
147292ddfd5f23bd7d314ec3a569ef0e1d7f9415.cu
using namespace std; // permite usar el "cout" #include <iostream> #include <algorithm> #include <stdlib.h>/* srand, rand */ #include "time.h" /* time */ #include <stdio.h> /* printf */ #include <math.h> #include <fstream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include<Cuda.h> #include<curand.h> #include<curand_kernel.h> #include "stdlib.h" #include<windows.h> // Variables globales **************************************************************************************** const int nvdec = 16 ; const int nvbin = 4 ; // MULTIPLO nvbin*nvdec DE 2^n !!! const int nvars = nvbin*nvdec ; const int psize = 640 ; // MULTIPLO DE 32!!! 320, 640, 960, 1280, 1600, 1920, 2240, 2560 const int ngen = 5000; //%10; 25; 10 const int nelit = 1 ; //cantidad de individuos del ELIT float mutp = 0.01 ; // Probabiludad de mutación float tol = 1e-6 ; // *************** ESTRUCTURAS ******************************************************************************* struct indiv { float Sol; int Ind; } ; bool lessthan(const indiv &a, const indiv &b) { return (b.Sol < a.Sol); } // *************** DEVICE FUNCTIONS ************************************************************************** __global__ void setup_rand ( curandState * state, unsigned long seed ) { int id = blockDim.x * blockIdx.x + threadIdx.x; curand_init ( seed, id, 0, &state[id] ); } __device__ float generate( curandState* globalState, int ind ) { curandState localState = globalState[ind]; float RANDOM = curand_uniform( &localState ); globalState[ind] = localState; return RANDOM; } __device__ float BinDec(int bin[nvbin], int n){ // convierte el vector binario ind a entero int sum = 0, two = 2; for(int i = 0; i<n; i++){ sum = sum + bin[i]*powf(two,i) ; } return sum; } __global__ void InitPop(int *d_Pop, int nvars, curandState* globalState){ // Genera la poblacion inicial int it = blockDim.x * blockIdx.x + threadIdx.x; // PRUEBA //for(int i=0; i<nvars; i++){ d_Pop[it*nvars + i] = it ; } //for(int i=0; i<nvars; i++){ d_Pop[it*nvars + i] = 1 ; } //for(int i=it; i<nvars+it; i++){ d_Pop[i] = 1 ; } //if(it < 1 ){d_Pop[ 3 ] = 0 ; d_Pop[ 6+it] = 0 ; d_Pop[8+it] = 0 ;d_Pop[ 9+it] = 0 ; d_Pop[22+it] = 0 ; d_Pop[29+it] = 0 ; } //if(it > 1 ){d_Pop[ 2+it*64] = 0 ; d_Pop[5+it*64] = 0 ; d_Pop[7+it*64] = 0 ;d_Pop[ 8+it*64] = 0 ; d_Pop[21+it*64] = 0 ; d_Pop[30+it*64] = 0 ; } //if(it = 3 ){for(int i=0; i<nvars; i++){ d_Pop[it*nvars + i] = 0 ; } } // FIN PRUEBA for(int i=0; i<nvars ; i++){ float k = generate(globalState, i+it)*1 ; //i+it d_Pop[it*nvars + i] = lroundf(k) ; } } __global__ void InitPop_s(int *d_Pop, int ipop, int nvars, curandState* globalState){ // Genera la poblacion inicial int it = blockDim.x * blockIdx.x + threadIdx.x; // PRUEBA //for(int i=0; i<nvars; i++){ d_Pop[it*nvars + i] = it ; } //for(int i=0; i<nvars; i++){ d_Pop[it*nvars + i] = 1 ; } //for(int i=it; i<nvars+it; i++){ d_Pop[i] = 1 ; } //if(it < 1 ){d_Pop[ 3 ] = 0 ; d_Pop[ 6+it] = 0 ; d_Pop[8+it] = 0 ;d_Pop[ 9+it] = 0 ; d_Pop[22+it] = 0 ; d_Pop[29+it] = 0 ; } //if(it = 1 ){d_Pop[ 2+it*64] = 0 ; d_Pop[5+it*64] = 0 ; d_Pop[7+it*64] = 0 ;d_Pop[ 8+it*64] = 0 ; d_Pop[21+it*64] = 0 ; d_Pop[30+it*64] = 0 ; } // FIN PRUEBA for(int i=0; i<nvars ; i++){ float k = generate(globalState, i+ipop)*1 ; //i+it d_Pop[ipop*nvars + i] = lroundf(k) ; } } __global__ void Func(float *d_Sol, int *d_Pop, int nvbin, int nvdec){ // Genera la poblacion inicial int it = blockDim.x * blockIdx.x + threadIdx.x; int ipop=0, two=2, ten=10, bin[4]; float x=0, y=0, X=0, Y=0, sum = 0, PI=3.141592653; float lim = 9/(powf(2,nvbin)-1) ; for (int inum=1; inum<nvdec/two; inum++){ for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*inum + i] ; } X = lroundf(BinDec(bin, nvbin)*lim) ; for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*nvdec/two + nvbin*inum + i] ; } Y = lroundf(BinDec(bin, nvbin)*lim) ; x = x + powf(ten,(0-inum))*X ; // x = x + powf(ten,(two-inum))*X ; y = y + powf(ten,(0-inum))*Y ; // y = y + powf(ten,(two-inum))*Y ; } for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + i] ; } if(BinDec(bin,nvbin) < (powf(two,nvbin)-1)*0.5 ){ x = (-1)*x ; } for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*nvdec/two + i] ; } if(BinDec(bin,nvbin) < (powf(two,nvbin)-1)*0.5 ){ y = (-1)*y ; } d_Sol[it] = x*x + y*y ; // De Jong //float a=20, b=0.2, c=2*PI; int n=2; //d_Sol[it] = -a*expf(-b*sqrt((x*x + y*y)/n)) - expf((cos(c*x) + cos(c*y))/n) + a + expf(1) ; // Ackley //int c=10 ; d_Sol[it] = c*two + (x*x - c*cos(two*PI*x)) + (y*y - c*cos(two*PI*y)) ; // Rastrigin's function } __global__ void Func_s(float *d_Sol, int *d_Pop, int ipop, int nvbin, int nvdec){ // Genera la poblacion inicial int it = ipop; int two=2, ten=10, bin[4]; float x=0, y=0, X=0, Y=0, sum = 0, PI=3.141592653; float lim = 9/(powf(2,nvbin)-1) ; for (int inum=1; inum<nvdec/two; inum++){ for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*inum + i] ; } X = lroundf(BinDec(bin, nvbin)*lim) ; for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*nvdec/two + nvbin*inum + i] ; } Y = lroundf(BinDec(bin, nvbin)*lim) ; x = x + powf(ten,(two-inum))*X ; y = y + powf(ten,(two-inum))*Y ; } for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + i] ; } if(BinDec(bin,nvbin) < (powf(two,nvbin)-1)*0.5 ){ x = (-1)*x ; } for(int i = 0; i<nvbin; i++){ bin[i] = d_Pop[it*nvbin*nvdec + nvbin*nvdec/two + i] ; } if(BinDec(bin,nvbin) < (powf(two,nvbin)-1)*0.5 ){ y = (-1)*y ; } d_Sol[it] = x*x + y*y ; //d_Sol[it] = X ; // d_Pop[it*nvbin*nvdec]; // Paraboloide eliptico ;//d_Pop[it+3];//[it*nvbin*nvdec];//X;//BinDec(bin, nvbin) ; //sum ; // //return // return 0.01*(x*x + y*y) + pow(sin( x*x + y*y), 2) ; // Rastrigin's function ***/ // return 10*two + (x*x-10*cos(two*PI*x)) + (y*y-10*cos(two*PI*y)) ; } __global__ void Rearrange(int *vec2, int *vec1, int *ind, int nvars, int nvec) { int it = blockDim.x * blockIdx.x + threadIdx.x; //blockDim = cantidad de hilos por bloque, es una constante [nvbin*nvdec] 64 //blockIdx = nombre del bloque [identifica a la pareja] de 1 a psize //threadIdx = enumeracion del hilo dentro del bloque [cada variable de un ind.] de 1 a 64 if (it < nvec) { vec2[it] = vec1[ind[blockIdx.x]*nvars + threadIdx.x] ; } //NPop[it*2] = Pop[ Male[blockIdx.x]*nvars + threadIdx.x*2 ] ; //vec2[it] = it ; } __global__ void Rearrange_s(int *vec2, int *vec1, int *ind, int nvars, int nvec, int it, int iblo, int ithr) { //int it = id; blockDim.x * blockIdx.x + threadIdx.x; //blockDim = cantidad de hilos por bloque, es una constante [nvbin*nvdec] 64 //blockIdx = nombre del bloque [identifica a la pareja] de 1 a psize //threadIdx = enumeracion del hilo dentro del bloque [cada variable de un ind.] de 1 a 64 if (it < nvec) { vec2[it] = vec1[ind[iblo]*nvars + ithr] ; } } __global__ void Probability(float *vec2, float *vec1, int nvec, float alpha) { int it = blockDim.x * blockIdx.x + threadIdx.x; if (it < nvec) { vec2[it]=0 ; for(int ivec=0; ivec<=it; ivec++){ vec2[it]=vec2[it]+vec1[ivec]/alpha ; } } } __global__ void CumSumVec(float *Sum, float *vec, int nvec) { int it = blockDim.x * blockIdx.x + threadIdx.x; if (it < nvec) { __syncthreads(); atomicAdd(Sum, vec[it]) ; } } __global__ void EqualityINT(int *vec2, int *vec1, int ivec, int nvec) { int it = blockDim.x * blockIdx.x + threadIdx.x; if (it < nvec) { vec2[it] = vec1[it+ivec] ; } //vec2[it] = it ; } __global__ void EqualityINT_s(int *vec2, int *vec1, int ivec, int ipop, int nvec) { int it = ipop; if (it < nvec) { vec2[it] = vec1[it+ivec] ; } //vec2[it] = it ; } __global__ void RandomINT(int *vec, int nvec, int psize, curandState* globalState){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = blockDim.x * blockIdx.x + threadIdx.x; if (it < nvec) { float k = generate(globalState, it)*(psize-0.5) ; vec[it] = lroundf(k) ; } } __global__ void RandomINT_s(int *vec, int nvec, int psize, int ivar, curandState* globalState){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = ivar; if (it < nvec) { float k = generate(globalState, it)*(psize-0.5) ; vec[it] = lroundf(k) ; } } __global__ void GroupSelection(int *sel, int *group, int gsize, int ngroup){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = blockDim.x * blockIdx.x + threadIdx.x; if(it<ngroup){ sel[it] = group[it*gsize] ; for(int i=0; i<gsize; i++){ if (sel[it] < group[it*gsize + i] ) { sel[it] = group[it*gsize + i] ; } } } } __global__ void GroupSelection_s(int *sel, int *group, int gsize, int ngroup, int ivar){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = ivar; if(it<ngroup){ sel[it] = group[it*gsize] ; for(int i=0; i<gsize; i++){ if (sel[it] < group[it*gsize + i] ) { sel[it] = group[it*gsize + i] ; } } } } __global__ void CrossoverSingle(int *NPop, int *Male, int *Female, int *Pop, int psize, int nvars){ // se llama una vez por cada 2 individuos y genera el cruzamiento entre el padre y la madre int it = blockDim.x * blockIdx.x + threadIdx.x ; //blockDim = cantidad de hilos por bloque, es una constante [nvbin*nvdec] 64 //blockIdx = nombre del bloque [identifica a la pareja] de 1 a psize/2 //threadIdx = enumeracion del hilo dentro del bloque [cada variable de un ind.] de 1 a 64 if(threadIdx.x < nvars/2){ NPop[it*2] = Pop[ Male[blockIdx.x]*nvars + threadIdx.x*2 ] ; NPop[it*2+1] = Pop[Female[blockIdx.x]*nvars + threadIdx.x*2 + 1] ; }else{ NPop[it*2] = Pop[Female[blockIdx.x]*nvars + (threadIdx.x - nvars/2 )*2 ] ; NPop[it*2+1] = Pop[ Male[blockIdx.x]*nvars + (threadIdx.x - nvars/2 )*2 + 1] ; } } __global__ void CrossoverSingle_s(int *NPop, int *Male, int *Female, int *Pop, int psize, int nvars, int it, int iblo, int ithr){ // se llama una vez por cada 2 individuos y genera el cruzamiento entre el padre y la madre //int it = blockDim.x * blockIdx.x + threadIdx.x ; //blockDim = cantidad de hilos por bloque, es una constante [nvbin*nvdec] 64 //blockIdx = nombre del bloque [identifica a la pareja] de 1 a psize/2 //threadIdx = enumeracion del hilo dentro del bloque [cada variable de un ind.] de 1 a 64 if(ithr < nvars/2){ NPop[it*2] = Pop[ Male[iblo]*nvars + ithr*2 ] ; NPop[it*2+1] = Pop[Female[iblo]*nvars + ithr*2 + 1] ; }else{ NPop[it*2] = Pop[Female[iblo]*nvars + (ithr - nvars/2 )*2 ] ; NPop[it*2+1] = Pop[ Male[iblo]*nvars + (ithr - nvars/2 )*2 + 1] ; } } __global__ void Mutation(int *Pop, float mutp, int nvec, int nvars, curandState* globalState){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = blockDim.x * blockIdx.x + threadIdx.x; if (it < nvec) { float ran = generate(globalState, it)*(1.0) ; if(ran<mutp){ int ivar = lroundf(generate(globalState, it)*(nvars-0.5)) ; if(Pop[it*nvars+ivar]==0){ Pop[it*nvars+ivar] = 1 ; }else{ Pop[it*nvars+ivar] = 0 ; } } } } __global__ void Mutation_s(int *Pop, float mutp, int nvec, int nvars, curandState* globalState, int ivar){ // Genera un vector con "n" valores aleatorios enteros entre 0 y psize int it = ivar; if (it < nvec) { float ran = generate(globalState, it)*(1.0) ; if(ran<mutp){ int ivar = lroundf(generate(globalState, it)*(nvars-0.5)) ; if(Pop[it*nvars+ivar]==0){ Pop[it*nvars+ivar] = 1 ; }else{ Pop[it*nvars+ivar] = 0 ; } } } } //__global__ void EqualityINTprueba(int *vec2, int *vec1, int ivec, int nvec) { // int it = blockDim.x * blockIdx.x + threadIdx.x; // //vec2[it] = it; // if (it < nvec) { vec2[it] = it+ivec ; } //} // //__global__ void EqualityElite(int *vec, int nvec) { // int it = blockDim.x * blockIdx.x + threadIdx.x; // vec[it] = it+nvec ; //} // *************** HOST FUNCTIONS **************************************************************************** //SortArray2(Sol, Pop, nvars2, psize); //void SortArray2(vector<float> &Sol, int Pop[psize][nvars2], int nvars2, int psize){ // indiv POP1[psize]; // for (int j=0; j<psize; j++) // { // POP1[j].Solu = Sol[j]; // // printf("POP1[%d].Solu = %g \n", j, POP1[j].Solu); // POP1[j].vars.resize(nvars2); // for (int k=0; k<nvars2; k++) // { // POP1[j].vars[k] = Pop[j][k]; // } // POP1[j].index = j; // // printf("POP1[%i].index = %d \n", j, POP1[j].index); // } // sort(POP1,POP1+psize,lessthan); // for (int l=0; l<psize; l++) // { // Sol[l]= POP1[l].Solu; // // printf("Sol[%d] = %g \n", l, Sol[l]); // // POP1[j].vars.resize(nvars); // for (int m=0; m<nvars2; m++) // { // Pop[l][m]=POP1[l].vars[m]; // } // // POP1[j].index = j; // } // /* for (int j=0; j<psize; j++) // { // printf("POP1[%d].Solu = %g \n", j, POP1[j].Solu); // printf("POP1[%i].index = %d \n", j, POP1[j].index); // }*/ //} //void SortArray(float Sol[psize], int Pop[psize][nvdec][nvbin], int nvdec, int nvbin, int psize){ // int i,j; //Variables contadoras del ciclo. // //int lista[Nelementos]={6,9,3,1}; //Declaracion e inicializacion de un arreglo de 4 elementos. // float tempS = 0.0 ; //Variable temporal. // float tempP = 0.0 ; //Variable temporal. // // for (i=1; i<psize; i++){ // for (j=0; j <= psize-2; j++){ // if (Sol[j] < Sol[j+1]){ //de Mayor a Menor: < ; de Menor a Mayor: > // tempS = Sol[j] ; // Sol[j] = Sol[j+1]; // Sol[j+1] = tempS; // for(int idec=0; idec<nvdec; idec++){ // for(int ibin=0; ibin<nvbin; ibin++){ // tempP = Pop[j][idec][ibin] ; // Pop[j][idec][ibin] = Pop[j+1][idec][ibin] ; // Pop[j+1][idec][ibin] = tempP ; // } // } // } // } // } //} //********************** MAIN ********************************************************************************** int main() { clock_t start = clock(); cudaError_t err = cudaSuccess; // Error code to check return values for CUDA calls int igen=0,ipop; float BestSol=1.0, x=0.0, y=0.0 ; for(int iiter=0; iiter<50; iiter++){ start = clock(); // random number on device curandState* devStates; cudaMalloc ( &devStates, psize*nvars*sizeof( curandState ) ); setup_rand <<< psize*nvars/32, 32 >>> ( devStates,unsigned(time(NULL)) ); // setup seeds size_t size = psize*nvars*sizeof(int); int *d_Pop = NULL; cudaMalloc((void **)&d_Pop, size); InitPop<<< psize/32, 32 >>>(d_Pop, nvars, devStates); //for(ipop=0; ipop<psize; ipop++){ InitPop_s<<< 1, 1 >>>(d_Pop, ipop, nvars, devStates); } // SECUENCIAL !!! //int *h_Pop = (int *)malloc(size); //prueba //cudaMemcpy(h_Pop, d_Pop, size, cudaMemcpyDeviceToHost); //prueba //printf("h_Pop \n"); for(int i=0;i<psize*nvars;i++) { cout<<h_Pop[i]<<endl; } //prueba err = cudaFree(devStates); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } igen = 0 ; BestSol=1.0 ; x=0.0 ; y=0.0 ; // Bucle de las generaciones ************************************************************ while(BestSol > tol){ //while(igen < ngen){ //if(igen > ngen+1){ BestSol=-1 ; //exit(EXIT_FAILURE); } igen++ ; size = psize*sizeof(float); float *d_Sol = NULL; float *h_Sol = (float *)malloc(size); cudaMalloc((void **)&d_Sol, size); // Evaluacion funcion objetivo Func<<<psize/32, 32>>>(d_Sol, d_Pop, nvbin, nvdec) ; //for(ipop=0; ipop<psize; ipop++){ Func_s<<< 1, 1 >>>(d_Sol, d_Pop, ipop, nvbin, nvdec); } // SECUENCIAL !!! cudaMemcpy(h_Sol, d_Sol, size, cudaMemcpyDeviceToHost); //printf("h_Sol \n"); for(int i=0;i<psize;i++) { cout<<h_Sol[i]<<endl; } //prueba // Ordena los individuos segun el valor de la solucion indiv Pop[psize]; for (int j=0; j<psize; j++) { Pop[j].Sol = h_Sol[j] ; Pop[j].Ind = j ; } sort(Pop, Pop+psize, lessthan); BestSol = Pop[psize-1].Sol ; if(igen > ngen+1){ BestSol=-1 ; //exit(EXIT_FAILURE); } err = cudaFree(d_Sol); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } int h_Ind[psize] ; for(int i=0;i<psize;i++) { h_Ind[i] = Pop[i].Ind ; } int *d_NPop = NULL; int *d_Pind = NULL; cudaMalloc((void **)&d_Pind, psize*sizeof(int)) ; cudaMalloc((void **)&d_NPop, psize*nvars*sizeof(int)) ; EqualityINT<<<psize, nvars>>>(d_NPop, d_Pop, 0, psize*nvars) ; // hace una copia de d_Pop //for(ipop=0; ipop<psize; ipop++){ EqualityINT_s<<< 1, 1 >>>(d_NPop, d_Pop, 0, ipop, psize*nvars); } // SECUENCIAL !!! cudaMemcpy(d_Pind, h_Ind, psize*sizeof(int), cudaMemcpyHostToDevice); Rearrange<<<psize, nvars>>>(d_Pop, d_NPop, d_Pind, nvars, psize*nvars) ; // ordena d_Pop // INICIO SECUENCIAL !!! //int it=0; //for(int iblo=0; iblo<psize; iblo++){ // for(int ithr=0; ithr<nvars; ithr++){ // it++ ; // Rearrange_s<<< 1, 1 >>>(d_Pop, d_NPop, d_Pind, nvars, psize*nvars, it, iblo, ithr); // } //} // FIN SECUENCIAL !!! err = cudaFree(d_Pind); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_NPop); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } //x=0.0 ; y=0.0 ; //XY(x, y, Best, nvdec, nvbin) ; //printf("igen %i ,Minimo %f ,X: %f ,Y: %f \n",igen,BestSol,x,y); printf("igen %i ,Minimo %f \n",igen,BestSol); // Elige la población ELITE size = nelit*nvars*sizeof(int); int *d_Eli = NULL ; cudaMalloc((void **)&d_Eli, size) ; for(ipop=psize-nelit; ipop<psize; ipop++){ EqualityINT<<<1, nvars>>>(d_Eli, d_Pop, ipop*nvars, nvars) ; //for(int ivar=0; ivar<nvars; ivar++){ EqualityINT_s<<< 1, 1 >>>(d_Eli, d_Pop, ipop*nvars, ivar, nvars); } // SECUENCIAL !!! } // SELECCION (Simple Roulet) int ngroup = psize ; // cantidad de grupos (de cada grupo sale un individuo) int gsize = 3 ; // 5 tamaño de cada grupo (nro. de ind. en cada grupo) curandState* devStates; cudaMalloc ( &devStates, gsize*ngroup*sizeof( curandState ) ); setup_rand <<< ngroup, gsize >>> ( devStates,unsigned(time(NULL)) ); size = gsize*ngroup*sizeof(int); int *d_Tiro = NULL ; cudaMalloc((void **)&d_Tiro, size) ; RandomINT<<< ngroup, gsize >>>(d_Tiro, gsize*ngroup, psize, devStates) ; //for(int ivar=0; ivar<ngroup*gsize; ivar++){ RandomINT_s<<< 1, 1 >>>(d_Tiro, gsize*ngroup, psize, ivar, devStates); } // SECUENCIAL !!! err = cudaFree(devStates); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } size = psize*sizeof(int); int *d_Sel = NULL ; cudaMalloc((void **)&d_Sel, size) ; GroupSelection<<< psize/32, 32 >>>(d_Sel, d_Tiro, gsize, ngroup) ; //for(int ivar=0; ivar<psize; ivar++){ GroupSelection_s<<< 1, 1 >>>(d_Sel, d_Tiro, gsize, ngroup, ivar); } // SECUENCIAL !!! err = cudaFree(d_Tiro); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // CRUZAMIENTO (Crossover Simple) size = (psize/2)*sizeof(int) ; int *d_Male = NULL ; int *d_Female = NULL ; cudaMalloc((void **)&d_Male, size) ; cudaMalloc((void **)&d_Female, size) ; EqualityINT<<< psize/2/32, 32>>>(d_Male, d_Sel, 0, psize/2) ; EqualityINT<<< psize/2/32, 32>>>(d_Female, d_Sel, psize/2, psize/2) ; // INICIO SECUENCIAL !!! //for(int ivar=0; ivar<psize/2; ivar++){ // EqualityINT_s<<< 1, 1 >>>(d_Male, d_Sel, 0, ivar, psize/2); // EqualityINT_s<<< 1, 1 >>>(d_Female, d_Sel, psize/2, ivar, psize/2); //} // FIN SECUENCIAL !!! err = cudaFree(d_Sel); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } size = psize*nvars*sizeof(int); cudaMalloc((void **)&d_NPop, size) ; CrossoverSingle<<<psize/2, nvars>>>(d_NPop, d_Male, d_Female, d_Pop, psize, nvars) ; // INICIO SECUENCIAL !!! //int itt=0; //for(int iblo=0; iblo<psize/2; iblo++){ // for(int ithr=0; ithr<nvars; ithr++){ // itt++ ; // CrossoverSingle_s<<< 1, 1 >>>(d_NPop, d_Male, d_Female, d_Pop, psize, nvars, itt, iblo, ithr); // } //} // FIN SECUENCIAL !!! err = cudaFree(d_Male); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } err = cudaFree(d_Female); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } EqualityINT<<<psize, nvbin*nvdec>>>(d_Pop, d_NPop, 0, psize*nvars) ; //for(int ivar=0; ivar<psize*nvbin*nvdec; ivar++){ EqualityINT_s<<< 1, 1 >>>(d_Pop, d_NPop, 0, ivar, psize*nvars); } // SECUENCIAL !!! err = cudaFree(d_NPop); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // MUTACION (Step mutation) cudaMalloc ( &devStates, psize*sizeof( curandState ) ); setup_rand <<< psize/32, 32 >>> ( devStates,unsigned(time(NULL)) ); Mutation<<< psize/32, 32 >>>(d_Pop, mutp, psize, nvars, devStates) ; //for(int ivar=0; ivar<psize; ivar++){ Mutation_s<<< 1, 1 >>>(d_Pop, mutp, psize, nvars, devStates, ivar); } // SECUENCIAL !!! err = cudaFree(devStates); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } // Copia la población ELITE for(ipop=psize-nelit; ipop<psize; ipop++){ EqualityINT<<<1, nvars>>>(d_Pop, d_Eli, ipop*nvars, nvars) ; //for(int ivar=0; ivar<nvars; ivar++){ EqualityINT_s<<< 1, 1 >>>(d_Pop, d_Eli, ipop*nvars, ivar, nvars); } // SECUENCIAL !!! } err = cudaFree(d_Eli); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } } // Free device global memory err = cudaFree(d_Pop); if (err != cudaSuccess) {fprintf(stderr, "Failed to free device vector A (error code %s)!\n", cudaGetErrorString(err)); exit(EXIT_FAILURE); } printf("Poblacion: %i, Generaciones: %i, Tiempo transcurrido: %f \n",psize , igen, ((double)clock() - start) / CLOCKS_PER_SEC) ; } // iiter printf("Done\n"); }
111891cb1d89ab47e819b0ebd409aaa93285344a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" /*This program calculates pi using a Simpson's Rule estimation of the integral of arctangent from 0 to 1. When inputting the number of iterations to perform, more iterations = more precision. The number of iterations is given as a command line argument. If no argument is provided, a default value of 20,000 is used. At 20,000 iterations, the value of pi is guaranteed to be accurate up to 8 decimal places. This version uses NVIDIA CUDA to perform parallel computation of the partial sums on a GPU. The amount of work each core does is given by the two #defines below. These values will need to be tuned for each device this code runs on in order to get maximum performance. For example, on my own personal machine, which has a GeForce GT 650M discrete graphics card, there are now only 12 streaming multiprocessors (SM's), with 32 cores each, for a total of 384 cores. Thus, 384 threads will be created, with each thread performing multiple iterations (total_iterations / (NUM_BLOCKS * THREADS_PER_BLOCK) to be precise). Thus, the more iterations given, the more work each thread does. The number of threads is kept constant in order to make clean-up easier and to not exceed the capabilities (max number of threads or blocks) of any particular GPU device. The last thread might have a few extra iterations if that number doesn't divide evenly. The number of decimal digits to use as the precision of the calculations is also given as a command-line argument. Obviously, the higher the number, the more digits you can successfully calculate. Accuracy still relies on the number of iterations, though: a high number of digits but low number of iterations will still result in a low number of digits of precision. Thus, you should only increase the number of digits when your iterations get too high and you find that your calculations are no longer precise due to internal rounding error. You'll probably find that increasing the digits will decrease performance severely. It is recommended, though, that since error accumulates, the more digits you want to find, the more padding you'll need to add to the end of the word to absorb that error. As a general rule of thumb, if you want to calculate x digits, make your words 2x long. Of course, this also increases the runtime by 2x. Compilation on my own machine actually makes OSC's steps look like child's play. The best way to do this is to download Visual Studio. Then, go to http://developer.nvidia.com/nsight-visual-studio-edition-downloads and follow the steps to install the proper graphics driver, then the CUDA toolkit, then the Nsight Visual Studio Plugin. So, the project you create will be a new CUDA project, which will link to the proper headers. Compile it, then go to NSIGHT in the menu > Start CUDA Debugging. Eventually, the proper result will pop up in the console window that opens. Note that if you want to change the number of iterations/precision, you'll need to edit the VS project settings under PROJECT > "Name" Properties... and then under Configuration Properties > Debugging, add the values you want to the Command Arguments field, since the default is to build the project for Debug, not Release. Also, the sleep function is added just because the debug console window disappears in Visual Studio. Grr. If you want to run this separately... you can't. I tried to compile this from the command line. It worked, even though I had to add extra system variables for NVCC to Window's Environment Variables, and then pass the path to VS's C compiler to NVCC when I ran it. Running it, however, did not. It crashed the graphics driver and returned that an error occurred upon trying to copy the results back from the GPU. Even debugging using DEBUG > Start Debugging in VS crashed it that way. Only debugging via NSIGHT actually worked. But, whatever, we got results. Remember that this solution uses dynamic memory allocation on the device, so only CUDA 2.0+ devices will run this code. */ // Includes. Optimum values for my computer are: // NUM_BLOCKS 12 // THREADS_PER_BLOCK 32 #include <stdio.h> #include <stdlib.h> #include <time.h> #include <Windows.h> #define NUM_BLOCKS 12 #define THREADS_PER_BLOCK 32 // A bignum is stored as all its decimal digits, separated into an array. // Really, it's quite terrible for performance, but it allows infinite digits. // Or at least as many as we can store in memory. The power tells us where to // put the decimal point, and the number of significant digits tells us how // many of the digits in the number are actually used. The precision tells us // the maximum number of digits possible for this particular instance. typedef struct { signed long int power; unsigned long int sig_digs; char * digits; unsigned long int precision; } bignum; // Function pointers, mostly for bignum operations. Note that in our use // below, we assume most of the arithmetic functions don't fail and thus // don't check their return values. Hope they're tested well... Notice // now that we have mirrored versions for the GPU, most of which just // have to call the GPU memory allocation functions. __global__ void calculate(long *, long *, char *, long *, long *, char *, long, long); __host__ bignum * bignum_init(long int); __host__ void bignum_reset(bignum *); __host__ void bignum_clear(bignum *); __host__ int bignum_set_int(bignum *, long int); __host__ void bignum_set(bignum *, bignum *); __host__ void bignum_print(bignum *, long int); __host__ int bignum_add(bignum *, bignum *, bignum *); __host__ int bignum_add_int(bignum *, bignum *, long int); __host__ int bignum_mult(bignum *, bignum *, bignum *); __host__ int bignum_mult_int(bignum *, bignum *, long int); __host__ int bignum_divide(bignum *, bignum *, bignum *); __host__ int bignum_int_divide(bignum *, long int, bignum *); __host__ int bignum_divide_int(bignum *, bignum *, long int); __device__ bignum * bignum_init_gpu(long int); __device__ void bignum_reset_gpu(bignum *); __device__ void bignum_clear_gpu(bignum *); __device__ int bignum_set_int_gpu(bignum *, long int); __device__ void bignum_set_gpu(bignum *, bignum *); __device__ int bignum_add_gpu(bignum *, bignum *, bignum *); __device__ int bignum_add_int_gpu(bignum *, bignum *, long int); __device__ int bignum_mult_gpu(bignum *, bignum *, bignum *); __device__ int bignum_mult_int_gpu(bignum *, bignum *, long int); __device__ int bignum_divide_gpu(bignum *, bignum *, bignum *); __device__ int bignum_int_divide_gpu(bignum *, long int, bignum *); __device__ int bignum_divide_int_gpu(bignum *, bignum *, long int); // Main function int main(int argc, char * argv[]) { // Obtain command line arguments long iterations = 20000L; if (argc > 1) { iterations = atol(argv[1]); if (iterations < 1L) { iterations = 20000L; } } long max_digits = 25L; if (argc > 2) { max_digits = atoi(argv[2]); if (max_digits < 1L) { max_digits = 25L; } } // Initialize global storage. Notice that we now need extra arrays for data // transfer between the GPU and regular RAM. These will hold the partial // sums that each of the threads calculate. Unfortunately, due to the way // bignums are structured, each of their arguments has to be transferred // separately. Luckily, this only happens once. long clock_start = (long)clock(); long int i, j; if (hipDeviceSetLimit(hipLimitMallocHeapSize, (NUM_BLOCKS * THREADS_PER_BLOCK * 16384)) != hipSuccess) { printf("\nError setting GPU heap size.\n"); return 1; } hipDeviceSynchronize(); long * hosttrappower = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); long * hosttrapsig_digs = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); char * hosttrapdigits = (char *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits), sizeof(char)); long * hostmidpower = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); long * hostmidsig_digs = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); char * hostmiddigits = (char *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits), sizeof(char)); if ((hosttrappower == 0) || (hosttrapsig_digs == 0) || (hosttrapdigits == 0) || (hostmidpower == 0) || (hostmidsig_digs == 0) || (hostmiddigits == 0)) { printf("\nError allocating memory on the CPU.\n"); return 1; } long * devicetrappower; long * devicetrapsig_digs; char * devicetrapdigits; long * devicemidpower; long * devicemidsig_digs; char * devicemiddigits; if (hipMalloc((void**)&devicetrappower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) != hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } if (hipMalloc((void**)&devicetrapsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) != hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } if (hipMalloc((void**)&devicetrapdigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char))) != hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } if (hipMalloc((void**)&devicemidpower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) != hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } if (hipMalloc((void**)&devicemidsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) != hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } if (hipMalloc((void**)&devicemiddigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char))) != hipSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } hipDeviceSynchronize(); char * accepted_pi = "3.14159265358979323846264338327950288419716939937510" "58209749445923078164062862089986280348253421170679\0"; char pi_printer[2]; pi_printer[0] = '0'; pi_printer[1] = '\0'; // Split off worker threads. When dividing the work, if the number of // threads does not evenly divide into the desired number of iterations, // give any extra iterations to the final thread. This gives the final // thread at most (num_threads - 1) extra iterations. Notice that this // is a 1D-grid of work, and we use function arguments this time. Also, // remember the number of threads is held constant, thanks to #defines, // at NUM_BLOCKS * THREADS_PER_BLOCK. dim3 numBlocks(NUM_BLOCKS); dim3 threadsPerBlock(THREADS_PER_BLOCK); hipLaunchKernelGGL(( calculate) , dim3(numBlocks), dim3(threadsPerBlock) , 0, 0, devicetrappower, devicetrapsig_digs, devicetrapdigits, devicemidpower, devicemidsig_digs, devicemiddigits, iterations, max_digits); hipDeviceSynchronize(); // Copy results back from GPU if (hipMemcpy(hosttrappower, devicetrappower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } if (hipMemcpy(hosttrapsig_digs, devicetrapsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } if (hipMemcpy(hosttrapdigits, devicetrapdigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)), hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } if (hipMemcpy(hostmidpower, devicemidpower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } if (hipMemcpy(hostmidsig_digs, devicemidsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } if (hipMemcpy(hostmiddigits, devicemiddigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)), hipMemcpyDeviceToHost) != hipSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } hipDeviceSynchronize(); if (hipFree(devicetrappower) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } if (hipFree(devicetrapsig_digs) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } if (hipFree(devicetrapdigits) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } if (hipFree(devicemidpower) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } if (hipFree(devicemidsig_digs) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } if (hipFree(devicemiddigits) != hipSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } // After worker threads end, clean up each of the partial sums bignum * trap = bignum_init(max_digits); bignum * mid = bignum_init(max_digits); bignum * temp = bignum_init(max_digits); bignum * simp = bignum_init(max_digits); if (trap == 0 || mid == 0 || temp == 0 || simp == 0) { printf("Error allocating memory. Now exiting.\n"); return -1; } for (i = 0L; i < (NUM_BLOCKS * THREADS_PER_BLOCK); i++) { simp->power = hosttrappower[i]; simp->sig_digs = hosttrapsig_digs[i]; for (j = 0L; j < max_digits; j++) { simp->digits[(int)j] = hosttrapdigits[(int)((i * max_digits) + j)]; } bignum_add(temp, trap, simp); bignum_reset(trap); bignum_reset(simp); bignum_set(trap, temp); bignum_reset(temp); simp->power = hostmidpower[i]; simp->sig_digs = hostmidsig_digs[i]; for (j = 0L; j < max_digits; j++) { simp->digits[(int)j] = hostmiddigits[(int)((i * max_digits) + j)]; } bignum_add(temp, mid, simp); bignum_reset(mid); bignum_reset(simp); bignum_set(mid, temp); bignum_reset(temp); } // Finally, Simpson's Rule is applied bignum_mult_int(temp, mid, 2L); bignum_reset(mid); bignum_set(mid, temp); bignum_reset(temp); bignum_add(temp, trap, mid); bignum_reset(trap); bignum_set(trap, temp); bignum_reset(temp); bignum_divide_int(temp, trap, 3L); bignum_reset(trap); bignum_set(trap, temp); bignum_reset(temp); bignum_mult_int(simp, trap, 4L); long clock_end = (long)clock(); printf("The calculated value of pi is "); bignum_print(simp, 0L); printf("\nThe actual value of pi is 3."); for (i = 0L; i < (max_digits - 1L); i++) { // This may print an extra digit or two because, somewhere down in the // code, we're losing our last sig dig during normal math, but it's // bubbling back up, and causing the final result to lose a place or // two. It's not a big deal, and I don't want to do anything about it, // so we'll just have the ends of the numbers not line up. Whatever. pi_printer[0] = accepted_pi[(int)(i + 2L)]; printf("%s", pi_printer); } printf("\nThe time taken to calculate this was %.2f seconds\n", ((float)(clock_end - clock_start)) / (float)CLOCKS_PER_SEC); printf("The number of iterations performed was %ld\n", iterations); Sleep(5000); // Free global storage free(hosttrappower); free(hosttrapsig_digs); free(hosttrapdigits); free(hostmidpower); free(hostmidsig_digs); free(hostmiddigits); bignum_clear(trap); bignum_clear(mid); bignum_clear(simp); bignum_clear(temp); return 0; } // Function executed by each thread to incrementally calculate the overall value __global__ void calculate(long * devicetrappower, long * devicetrapsig_digs, char * devicetrapdigits, long * devicemidpower, long * devicemidsig_digs, char * devicemiddigits, long iterations, long max_digits) { // Initialize needed variables and check for errors long threadid = threadIdx.x + (blockIdx.x * THREADS_PER_BLOCK); long lowlimit = threadid * (iterations / (NUM_BLOCKS * THREADS_PER_BLOCK)); long highlimit = (((threadid + 1L) == (NUM_BLOCKS * THREADS_PER_BLOCK)) ? iterations : ((threadid + 1L) * (iterations / (NUM_BLOCKS * THREADS_PER_BLOCK)))); bignum * trap = bignum_init_gpu(max_digits); bignum * mid = bignum_init_gpu(max_digits); bignum * inverseiterations = bignum_init_gpu(max_digits); bignum * temp_holder = bignum_init_gpu(max_digits); bignum * temp_holder2 = bignum_init_gpu(max_digits); bignum * inc = bignum_init_gpu(max_digits); bignum * leftrect = bignum_init_gpu(max_digits); bignum * rightrect = bignum_init_gpu(max_digits); if (trap == 0 || mid == 0 || inverseiterations == 0 || temp_holder == 0 || temp_holder2 == 0 || inc == 0 || leftrect == 0 || rightrect == 0) { return; } // Initialize values of needed variables bignum_set_int_gpu(temp_holder, iterations); bignum_int_divide_gpu(inverseiterations, 1L, temp_holder); bignum_reset_gpu(temp_holder); long i; long k = lowlimit; bignum_divide_int_gpu(temp_holder, inverseiterations, 2L); bignum_set_int_gpu(inc, k); bignum_mult_gpu(temp_holder2, inc, inverseiterations); bignum_reset_gpu(inc); bignum_set_gpu(inc, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_gpu(temp_holder2, inc, temp_holder); bignum_reset_gpu(inc); bignum_set_gpu(inc, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_reset_gpu(temp_holder); // Main iteration loop. Note that the values of inverseiterations, inc, // mid, and trap are preserved across loop iterations, as is counter k. // inverseiterations is a constant that is stored for simplicity. Man, // this is looking more and more like assembly... for (i = lowlimit; i < highlimit; i++) { // First, the trapezoid rule is used to estimate pi bignum_reset_gpu(leftrect); bignum_set_int_gpu(leftrect, k); bignum_mult_gpu(temp_holder2, leftrect, inverseiterations); bignum_reset_gpu(leftrect); bignum_set_gpu(leftrect, temp_holder2); bignum_reset_gpu(temp_holder2); k++; bignum_reset_gpu(rightrect); bignum_set_int_gpu(rightrect, k); bignum_mult_gpu(temp_holder2, rightrect, inverseiterations); bignum_reset_gpu(rightrect); bignum_set_gpu(rightrect, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_gpu(temp_holder, leftrect, rightrect); bignum_divide_int_gpu(temp_holder2, temp_holder, 2L); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_mult_gpu(temp_holder2, temp_holder, temp_holder); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_int_gpu(temp_holder2, temp_holder, 1L); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_int_divide_gpu(temp_holder2, 1L, temp_holder); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_mult_gpu(temp_holder2, temp_holder, inverseiterations); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_gpu(temp_holder2, trap, temp_holder); bignum_reset_gpu(trap); bignum_set_gpu(trap, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_reset_gpu(temp_holder); // Next, the midpoint rule is also used to estimate pi bignum_set_gpu(temp_holder, inc); bignum_add_gpu(temp_holder2, inc, inverseiterations); bignum_reset_gpu(inc); bignum_set_gpu(inc, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_mult_gpu(temp_holder2, temp_holder, temp_holder); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_int_gpu(temp_holder2, temp_holder, 1L); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_int_divide_gpu(temp_holder2, 1L, temp_holder); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_mult_gpu(temp_holder2, temp_holder, inverseiterations); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_gpu(temp_holder2, mid, temp_holder); bignum_reset_gpu(mid); bignum_set_gpu(mid, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_reset_gpu(temp_holder); } // Save partial result, clear memory, and exit devicetrappower[threadid] = trap->power; devicetrapsig_digs[threadid] = trap->sig_digs; for (i = 0; i < max_digits; i++) { devicetrapdigits[(threadid * max_digits) + i] = trap->digits[i]; } devicemidpower[threadid] = mid->power; devicemidsig_digs[threadid] = mid->sig_digs; for (i = 0; i < max_digits; i++) { devicemiddigits[(threadid * max_digits) + i] = mid->digits[i]; } bignum_clear_gpu(trap); bignum_clear_gpu(mid); bignum_clear_gpu(inverseiterations); bignum_clear_gpu(temp_holder); bignum_clear_gpu(temp_holder2); bignum_clear_gpu(inc); bignum_clear_gpu(leftrect); bignum_clear_gpu(rightrect); } // Create space for a bignum with the specified precision. // Technically, it's also initialized if we interpret having zero // significant digits as the number having a value of zero. __host__ bignum * bignum_init(long int precision) { bignum * temp_ptr = (bignum *)calloc(1, sizeof(bignum)); temp_ptr->digits = (char *)calloc((int)precision, sizeof(char)); if ((temp_ptr->digits) == 0) { temp_ptr = 0; } temp_ptr->precision = precision; return temp_ptr; } // Resets a bignum's value to zero. memcpy isn't used because // why bring the string library into this just for this use? __host__ void bignum_reset(bignum * numval) { if ((numval->sig_digs) > 0L) { long int i; for (i = 0L; i < numval->precision; i++) { numval->digits[(int)i] = '\0'; } numval->power = 0L; numval->sig_digs = 0L; } return; } // Free memory used by a bignum when we're done with it __host__ void bignum_clear(bignum * oldnum) { free(oldnum->digits); free(oldnum); return; } // Set an instance of a bignum to an integer value. Note that if we can't // initialize the temp word we need for copying, we return false (value = 0). // We also assume that the number is non-negative since we only store // unsigned numbers. We assume the result is initialized/reset. Finally, // we handle zero specially by just resetting (again?) the result. Note that // we explicitly assume the number to convert fits within the max number of // digits. If we try to convert a number bigger than we can store, it won't work. __host__ int bignum_set_int(bignum * numval, long int intval) { if (intval > 0L) { // Separate out the individual digits (stored backwards) char * temp_word = (char *)calloc((int)(numval->precision), sizeof(char)); if (temp_word == 0) { return 0; } long int temp_int = intval; long int counter = 0L; while (temp_int > 0L) { temp_word[(int)counter] = (char)(temp_int % 10L); temp_int = temp_int / 10L; counter++; } // Detect any trailing zeros that we don't need to store numval->power = counter - 1L; long int leadingzeros = 0L; int hasleading = 1; while (hasleading == 1) { if (temp_word[(int)leadingzeros] != 0) { hasleading = 0; } else { leadingzeros++; } } // Store final result into actual bignum variable for (temp_int = 0L; temp_int < (counter - leadingzeros); temp_int++) { numval->digits[(int)temp_int] = temp_word[(int)(counter - temp_int - 1L)]; } numval->sig_digs = counter - leadingzeros; free(temp_word); return 1; } else { bignum_reset(numval); return 1; } } // Set an instance of a bignum to the value of another bignum. We don't assume // they're both the same precision; just use the precision of the new number. // We do assume that the new number has already been initialized, though. // strncpy is not used since it quits after seeing the first zero. __host__ void bignum_set(bignum * newnum, bignum * oldnum) { if ((oldnum->sig_digs) > 0L) { newnum->power = oldnum->power; newnum->sig_digs = ((oldnum->sig_digs > newnum->precision) ? (newnum->precision) : (oldnum->sig_digs)); long int i; for (i = 0L; i < newnum->sig_digs; i++) { newnum->digits[(int)i] = oldnum->digits[(int)i]; } } else { bignum_reset(newnum); } return; } // Use printf to print the number one digit at a time. There are a few cases: // power > significant digits: pad end with zeros // significant digits > power: fractional digit (non-integer) // power is negative: total value less than 1 // The second argument is the maximum number of significant digits to print. // If it's zero, then all available digits will be printed, maxing out at // the precision of the number (the total amount is could possibly store). // Note that this is different from total digits printed: zeroes after a // decimal point but before the first significant digit don't count, and we // make sure we print at least the integral part of the number (we only // chop off fractional portions). __host__ void bignum_print(bignum * numval, long int maxdigits) { long int i; long int limit = numval->sig_digs; if (numval->sig_digs == 0L) { printf("0"); } else { if ((maxdigits > 0L) && (maxdigits < numval->sig_digs)) { limit = maxdigits; } if (numval->power < 0L) { printf("0."); for (i = 1L; i < (-1L * (numval->power)); i++) { printf("0"); } for (i = 0L; i < limit; i++) { printf("%d", (int)(numval->digits[(int)i])); } } else if (numval->sig_digs >(numval->power + 1L)) { for (i = 0L; i <= numval->power; i++) { printf("%d", (int)(numval->digits[(int)i])); } if (limit >(numval->power + 1L)) { printf("."); } for (i = (numval->power + 1L); i < limit; i++) { printf("%d", (int)(numval->digits[(int)i])); } } else { for (i = 0L; i < numval->sig_digs; i++) { printf("%d", (int)(numval->digits[(int)i])); } } if ((numval->power > 0L) && ((numval->power + 1L) > numval->sig_digs)) { for (i = 0L; i < ((numval->power + 1L) - numval->sig_digs); i++) { printf("0"); } } } fflush(stdout); return; } // Adds two bignums together and stores the result. Uses the functions to // reset and set the location of the result internally, so current contents of // result operand will be overwritten. Like bignum_set_int, returns 1 if // addition was successful or 0 if an error occurred. A special shortcut is // taken if either (or both) of the operands are zero. Note that it is possible // for large additions to cause underflow to zero. In that case, special care is // taken to make sure the proper input operand is used. Note that we assume the // precision of all three operands is the same. If it's not, something terrible // like a seg fault or incorrect answer will probably occur. Most importantly, // the result operand CANNOT be the same as one of the input operands, since // the result is clobbered immediately and used as a scratchpad. Note that this // is also unsigned addition: not only does it not accept negative numbers, it // also doesn't do subtraction (which, for that matter, isn't commutative). __host__ int bignum_add(bignum * resultnum, bignum * leftnum, bignum * rightnum) { bignum_reset(resultnum); if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs > 0L)) { bignum_set(resultnum, rightnum); return 1; } else if ((rightnum->sig_digs == 0L) && (leftnum->sig_digs > 0L)) { bignum_set(resultnum, leftnum); return 1; } else if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs == 0L)) { return 1; } else { // First check for overshift: if the larger number's power is too much // bigger than the smaller number's, the smaller will be completely lost, // and we'll just end up with the large number as the result. if ((((leftnum->power - rightnum->power) > 0) && ((leftnum->power - rightnum->power) > resultnum->precision))) { bignum_set(resultnum, leftnum); return 1; } if ((((rightnum->power - leftnum->power) > 0) && ((rightnum->power - leftnum->power) > resultnum->precision))) { bignum_set(resultnum, rightnum); return 1; } // Next, shift the smaller operand to match the larger one by copying // it into the result operand as a partial sum. Also copy over the // power and total significant digits into the result. bignum * bigger; bignum * smaller; if ((leftnum->power - rightnum->power) >= 0L) { bigger = leftnum; smaller = rightnum; } else { bigger = rightnum; smaller = leftnum; } long int difference = bigger->power - smaller->power; long int startdigit = smaller->sig_digs + difference; long int transfertotal = smaller->sig_digs; if (startdigit > resultnum->precision) { startdigit = resultnum->precision - difference; transfertotal = startdigit; } long int startdigitcopy = startdigit; startdigit--; long int i; for (i = 0L; i < transfertotal; i++) { if ((startdigit - difference) >= 0L) { resultnum->digits[(int)startdigit] = smaller->digits[(int)(startdigit - difference)]; } startdigit--; } // Now the main addition loop: loop through each digit and add it. // The carry from the previous digit will add to the current one. // Note that we detect any trailing zeros to take from the sig_digs. // Also, copy over the power and significant digits resultnum->power = bigger->power; resultnum->sig_digs = startdigitcopy; if (bigger->sig_digs > resultnum->sig_digs) { resultnum->sig_digs = bigger->sig_digs; startdigitcopy = resultnum->sig_digs; } int trailingzeros = 1; long int zerocount = 0L; char carry = 0; for (i = 0L; i < resultnum->sig_digs; i++) { resultnum->digits[(int)(startdigitcopy - i - 1L)] += (bigger->digits[(int)(startdigitcopy - i - 1L)] + carry); if (resultnum->digits[(int)(startdigitcopy - i - 1L)] >= 10) { resultnum->digits[(int)(startdigitcopy - i - 1L)] -= 10; carry = 1; } else { carry = 0; } if (trailingzeros == 1) { if (resultnum->digits[(int)(startdigitcopy - i - 1L)] == '\0') { zerocount++; } else { trailingzeros = 0; } } } // If we've got trailing zeros, subtract them from the final count of // sig_digs. Also, if we have a carry, we need to shift everything... resultnum->sig_digs -= zerocount; if (carry > 0) { transfertotal = resultnum->sig_digs; if (transfertotal == resultnum->precision) { transfertotal--; } startdigitcopy = transfertotal - 1L; for (i = 0L; i < transfertotal; i++) { if (startdigitcopy >= 0L) { resultnum->digits[(int)(startdigitcopy + 1L)] = resultnum->digits[(int)startdigitcopy]; } else if ((startdigitcopy + 1L) >= 0L) { resultnum->digits[(int)(startdigitcopy + 1L)] = 0; } startdigitcopy--; } resultnum->digits[0] = carry; resultnum->power++; resultnum->sig_digs++; } if (resultnum->sig_digs > resultnum->precision) { resultnum->sig_digs = resultnum->precision; } return 1; } } // A convenience wrapper that temporarily creates a new bignum out of the // given integer, calls bignum_add with it and the other operand, and deletes // the temporary bignum before exiting. Any problems that bignum_add encounters // are passed back up through this function and returned to the caller. __host__ int bignum_add_int(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset(resultnum); if ((rightint == 0L) && (leftnum->sig_digs > 0L)) { bignum_set(resultnum, leftnum); return 1; } else if ((leftnum->sig_digs == 0L) && (rightint > 0L)) { return bignum_set_int(resultnum, rightint); } else if ((leftnum->sig_digs == 0L) && (rightint == 0L)) { return 1; } else { bignum * tempnum = bignum_init(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int(tempnum, rightint) == 0) { bignum_clear(tempnum); return 0; } int retval = bignum_add(resultnum, leftnum, tempnum); bignum_clear(tempnum); return retval; } } // Multiplies two bignums together and stores the result. Like add, uses // functions to reset and set the location of the result, and returns 1 upon // success or 0 if an error occurred. A special shortcut is taken if either // operand is zero, since the result will thus also be zero. Note that we assume // the precision of all three operands is the same. If it's not, something // terrible like a seg fault or incorrect answer will probably occur. Most // importantly, the result operand CANNOT be the same as one of the input // operands, since the result is clobbered immediately and used as a scratchpad. // Also, note that this is unsigned: it assumes both operands are positive. __host__ int bignum_mult(bignum * resultnum, bignum * leftnum, bignum * rightnum) { bignum_reset(resultnum); if ((leftnum->sig_digs == 0L) || (rightnum->sig_digs == 0L)) { return 1; } else { // Initialize the scratchpad and find the digit limits char * temp_word = (char *)calloc((int)(2L * (resultnum->precision)), sizeof(char)); if (temp_word == 0) { return 0; } bignum * bigger; bignum * smaller; if (((signed long int)leftnum->sig_digs - (signed long int)rightnum->sig_digs) >= 0L) { bigger = leftnum; smaller = rightnum; } else if ((rightnum->sig_digs - leftnum->sig_digs) > 0L) { bigger = rightnum; smaller = leftnum; } long int bigstart = (bigger->sig_digs) - 1L; long int smallstart = (smaller->sig_digs) - 1L; long int bigcounter, smallcounter; char carry = 0; // Perform the shift-addition loop. We choose to loop over each // digit of the smaller number for fewer overall iterations. If // the current bigloop has a zero, we can just skip that iteration. // Also, record the final carry, power, and sig_digs values. for (bigcounter = 0L; bigcounter < (smaller->sig_digs); bigcounter++) { if (smaller->digits[(int)(smallstart - bigcounter)] != '\0') { carry = 0; for (smallcounter = 0L; smallcounter < (bigger->sig_digs); smallcounter++) { temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] += (carry + (smaller->digits[(int)(smallstart - bigcounter)] * bigger->digits[(int)(bigstart - smallcounter)])); carry = temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] / 10; temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] %= 10; } temp_word[(int)((2L * (resultnum->precision)) - bigcounter - (bigger->sig_digs) - 1L)] = carry; } } resultnum->power = ((bigger->power) + (smaller->power)); resultnum->sig_digs = ((bigger->sig_digs) + (smaller->sig_digs)); // Adjust for lack of a final carry or trailing zeros. if (carry < 1) { (resultnum->sig_digs)--; (resultnum->power)--; } (resultnum->power)++; int trailingzeros = 1; long int zerocount = 0L; long int i = (2L * (resultnum->precision) - 1L); while (trailingzeros == 1) { if (temp_word[(int)i] == '\0') { zerocount++; } else { trailingzeros = 0; } i--; } resultnum->sig_digs -= zerocount; if ((resultnum->sig_digs) > (resultnum->precision)) { resultnum->sig_digs = (resultnum->precision); } // Finally, copy from the temp word into the result, taking into // account any digits we may lose due to precision. long int tempstart = (2L * (resultnum->precision)) - ((bigger->sig_digs) + (smaller->sig_digs)); if (carry < 1) { tempstart++; } for (i = 0L; i < (resultnum->sig_digs); i++) { resultnum->digits[(int)i] = temp_word[(int)(tempstart + i)]; } free(temp_word); return 1; } } // Like bignum_add_int, a convenience wrapper that creates a temporary bignum // out of the integer and passes it to bignum_mult. Any problems encountered // in client functions are passed back up to the original caller. __host__ int bignum_mult_int(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset(resultnum); if ((leftnum->sig_digs == 0L) || (rightint == 0L)) { return 1; } else { bignum * tempnum = bignum_init(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int(tempnum, rightint) == 0) { bignum_clear(tempnum); return 0; } int retval = bignum_mult(resultnum, leftnum, tempnum); bignum_clear(tempnum); return retval; } } // Divides two bignums. Taken in terms of a fraction, leftnum is the numerator // and rightnum is the denominator. Performs an explicit check to make sure // the denominator is not zero, and returns 0 (an error) if it is. Returns 1 upon // success or 0 if an error occurs. A special shortcut is taken if the numerator is // zero. Note that we assume the precision of all three operands is the same. If it's // not, something terrible like a seg fault or incorrect answer will probably occur. // Most importantly, the result operand CANNOT be the same as one of the input // operands, since the result is clobbered immediately and used as a scratchpad. // Also, note that this is unsigned: it assumes both operands are positive. __host__ int bignum_divide(bignum * resultnum, bignum * numerator, bignum * denominator) { bignum_reset(resultnum); if (denominator->sig_digs == 0L) { return 0; } else if (numerator->sig_digs == 0L) { return 1; } else { // Initialize the scratchpad and initially copy the numerator into it. // Also initialize the result's power. char * temp_word = (char *)calloc((int)(2L * (resultnum->precision) + 2L), sizeof(char)); // May only need to be + 1L if (temp_word == 0) { return 0; } long int i; for (i = 0L; i < numerator->sig_digs; i++) { temp_word[(int)(i + 1L)] = numerator->digits[(int)i]; } resultnum->power = (numerator->power - denominator->power); long int sigdigctr = 0L; long int numeratorindex = 0L; // First see if we need to "shift" the numerator by comparing it. i = ((denominator->sig_digs) - 1L); int denom_bigger = 1; while ((i >= 0L) && (denom_bigger == 1)) { if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) > (temp_word[(int)((denominator->sig_digs) - i)])) { i = 0L; } else if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) < (temp_word[(int)((denominator->sig_digs) - i)])) { denom_bigger = 0; } else if (((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) == (temp_word[(int)((denominator->sig_digs) - i)])) && (i == 0L)) { denom_bigger = 0; } i--; } if (denom_bigger == 1) { numeratorindex++; (resultnum->power)--; } // Now the main division loop. Note that there's two ways to terminate: // either we've filled the entire precision of the result word and are // forced to truncate our result, or our answer divides exactly. In the // second case, once we've exhausted the numerator's significant digits // and our temp word contains nothing but zeros, we can end early since // all subsequent iterations would contribute only zeros as well. Note // that special care will be taken to detect extra zeros at the end of // the result so that the sig_digs is recorded correctly. Also, we don't // round, we truncate, which doesn't minimize error. int nonzero = 1; while ((sigdigctr < (resultnum->precision)) && (nonzero == 1)) { // First run the subtraction loop. char current_digit = 0; int numer_bigger = 1; while (numer_bigger == 1) { // To subtract, first run a comparison to see if the numerator // is bigger. If it is, increment the counter and subtract. i = ((denominator->sig_digs) - 1L); denom_bigger = 1; if (temp_word[(int)numeratorindex] > 0) { denom_bigger = 0; } while ((i >= 0L) && (denom_bigger == 1)) { if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) > (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) { i = 0L; } else if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) < (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) { denom_bigger = 0; } else if (((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) == (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) && (i == 0L)) { denom_bigger = 0; } i--; } if (denom_bigger == 1) { numer_bigger = 0; } // Increment counter and perform subtraction loop. if (numer_bigger == 1) { current_digit++; for (i = 0L; i < (denominator->sig_digs); i++) { temp_word[(int)((denominator->sig_digs) + numeratorindex - i)] -= (denominator->digits[ (int)((denominator->sig_digs) - i - 1L)]); if ((temp_word[(int)((denominator->sig_digs) + numeratorindex - i)]) < 0) { temp_word[(int)((denominator->sig_digs) + numeratorindex - i)] += 10L; (temp_word[(int)((denominator->sig_digs) + numeratorindex - i - 1L)]) -= 1L; } } } } // If we're past all of the numerator's significant digits, run // zero detection on it to see if we can end early. if (sigdigctr > (numerator->sig_digs)) { // May only need to be >= long int zerocounter = 0L; i = 0L; while ((i == zerocounter) && (i <= (denominator->sig_digs))) { if ((temp_word[(int)(numeratorindex + i)]) < 1) { zerocounter++; } i++; } if (zerocounter == ((denominator->sig_digs) + 1L)) { nonzero = 0; } } // Once we have obtained the proper digit in the result, save it. if (sigdigctr < resultnum->precision) { resultnum->digits[(int)sigdigctr] = current_digit; } sigdigctr++; numeratorindex++; } // Record the result's sig digs, taking care to detect trailing zeros. resultnum->sig_digs = sigdigctr; int trailingzeros = 1; long int zerocount = 0L; i = sigdigctr - 1L; while (trailingzeros == 1) { if (resultnum->digits[(int)i] == '\0') { zerocount++; } else { trailingzeros = 0; } i--; } (resultnum->sig_digs) -= zerocount; free(temp_word); return 1; } } // A convenience wrapper that creates a temporary bignum out of the integer. // Since division is not commutative, two wrappers are given. Any problems // encountered in client functions are passed back up to the original caller. __host__ int bignum_int_divide(bignum * resultnum, long int leftint, bignum * rightnum) { bignum_reset(resultnum); if (rightnum->sig_digs == 0L) { return 0; } else if (leftint == 0L) { return 1; } else { bignum * tempnum = bignum_init(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int(tempnum, leftint) == 0) { bignum_clear(tempnum); return 0; } int retval = bignum_divide(resultnum, tempnum, rightnum); bignum_clear(tempnum); return retval; } } // A convenience wrapper that creates a temporary bignum out of the integer. // Since division is not commutative, two wrappers are given. Any problems // encountered in client functions are passed back up to the original caller. __host__ int bignum_divide_int(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset(resultnum); if (rightint == 0L) { return 0; } else if (leftnum->sig_digs == 0L) { return 1; } else { bignum * tempnum = bignum_init(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int(tempnum, rightint) == 0) { bignum_clear(tempnum); return 0; } int retval = bignum_divide(resultnum, leftnum, tempnum); bignum_clear(tempnum); return retval; } } // Create space for a bignum with the specified precision. // Technically, it's also initialized if we interpret having zero // significant digits as the number having a value of zero. __device__ bignum * bignum_init_gpu(long int precision) { bignum * temp_ptr = (bignum *)malloc(sizeof(bignum)); if (temp_ptr == 0) { return temp_ptr; } temp_ptr->digits = (char *)malloc((int)(precision * sizeof(char))); if ((temp_ptr->digits) == 0) { temp_ptr = 0; return temp_ptr; } int i; for (i = 0; i < precision; i++) { temp_ptr->digits[i] = '\0'; } temp_ptr->power = 0L; temp_ptr->sig_digs = 0L; temp_ptr->precision = precision; return temp_ptr; } // Resets a bignum's value to zero. memcpy isn't used because // why bring the string library into this just for this use? __device__ void bignum_reset_gpu(bignum * numval) { if ((numval->sig_digs) > 0L) { long int i; for (i = 0L; i < numval->precision; i++) { numval->digits[(int)i] = '\0'; } numval->power = 0L; numval->sig_digs = 0L; } return; } // Free memory used by a bignum when we're done with it __device__ void bignum_clear_gpu(bignum * oldnum) { free(oldnum->digits); free(oldnum); return; } // Set an instance of a bignum to an integer value. Note that if we can't // initialize the temp word we need for copying, we return false (value = 0). // We also assume that the number is non-negative since we only store // unsigned numbers. We assume the result is initialized/reset. Finally, // we handle zero specially by just resetting (again?) the result. Note that // we explicitly assume the number to convert fits within the max number of // digits. If we try to convert a number bigger than we can store, it won't work. __device__ int bignum_set_int_gpu(bignum * numval, long int intval) { if (intval > 0L) { // Separate out the individual digits (stored backwards) char * temp_word = (char *)malloc((int)(numval->precision * sizeof(char))); if (temp_word == 0) { return 0; } long int i; for (i = 0; i < numval->precision; i++) { temp_word[(int)i] = '\0'; } long int temp_int = intval; long int counter = 0L; while (temp_int > 0L) { temp_word[(int)counter] = (char)(temp_int % 10L); temp_int = temp_int / 10L; counter++; } // Detect any trailing zeros that we don't need to store numval->power = counter - 1L; long int leadingzeros = 0L; int hasleading = 1; while (hasleading == 1) { if (temp_word[(int)leadingzeros] != 0) { hasleading = 0; } else { leadingzeros++; } } // Store final result into actual bignum variable for (temp_int = 0L; temp_int < (counter - leadingzeros); temp_int++) { numval->digits[(int)temp_int] = temp_word[(int)(counter - temp_int - 1L)]; } numval->sig_digs = counter - leadingzeros; free(temp_word); return 1; } else { bignum_reset_gpu(numval); return 1; } } // Set an instance of a bignum to the value of another bignum. We don't assume // they're both the same precision; just use the precision of the new number. // We do assume that the new number has already been initialized, though. // strncpy is not used since it quits after seeing the first zero. __device__ void bignum_set_gpu(bignum * newnum, bignum * oldnum) { if ((oldnum->sig_digs) > 0L) { newnum->power = oldnum->power; newnum->sig_digs = ((oldnum->sig_digs > newnum->precision) ? (newnum->precision) : (oldnum->sig_digs)); long int i; for (i = 0L; i < newnum->sig_digs; i++) { newnum->digits[(int)i] = oldnum->digits[(int)i]; } } else { bignum_reset_gpu(newnum); } return; } // Adds two bignums together and stores the result. Uses the functions to // reset and set the location of the result internally, so current contents of // result operand will be overwritten. Like bignum_set_int, returns 1 if // addition was successful or 0 if an error occurred. A special shortcut is // taken if either (or both) of the operands are zero. Note that it is possible // for large additions to cause underflow to zero. In that case, special care is // taken to make sure the proper input operand is used. Note that we assume the // precision of all three operands is the same. If it's not, something terrible // like a seg fault or incorrect answer will probably occur. Most importantly, // the result operand CANNOT be the same as one of the input operands, since // the result is clobbered immediately and used as a scratchpad. Note that this // is also unsigned addition: not only does it not accept negative numbers, it // also doesn't do subtraction (which, for that matter, isn't commutative). __device__ int bignum_add_gpu(bignum * resultnum, bignum * leftnum, bignum * rightnum) { bignum_reset_gpu(resultnum); if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs > 0L)) { bignum_set_gpu(resultnum, rightnum); return 1; } else if ((rightnum->sig_digs == 0L) && (leftnum->sig_digs > 0L)) { bignum_set_gpu(resultnum, leftnum); return 1; } else if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs == 0L)) { return 1; } else { // First check for overshift: if the larger number's power is too much // bigger than the smaller number's, the smaller will be completely lost, // and we'll just end up with the large number as the result. if ((((leftnum->power - rightnum->power) > 0) && ((leftnum->power - rightnum->power) > resultnum->precision))) { bignum_set_gpu(resultnum, leftnum); return 1; } if ((((rightnum->power - leftnum->power) > 0) && ((rightnum->power - leftnum->power) > resultnum->precision))) { bignum_set_gpu(resultnum, rightnum); return 1; } // Next, shift the smaller operand to match the larger one by copying // it into the result operand as a partial sum. Also copy over the // power and total significant digits into the result. bignum * bigger; bignum * smaller; if ((leftnum->power - rightnum->power) >= 0L) { bigger = leftnum; smaller = rightnum; } else { bigger = rightnum; smaller = leftnum; } long int difference = bigger->power - smaller->power; long int startdigit = smaller->sig_digs + difference; long int transfertotal = smaller->sig_digs; if (startdigit > resultnum->precision) { startdigit = resultnum->precision - difference; transfertotal = startdigit; } long int startdigitcopy = startdigit; startdigit--; long int i; for (i = 0L; i < transfertotal; i++) { if ((startdigit - difference) >= 0L) { resultnum->digits[(int)startdigit] = smaller->digits[(int)(startdigit - difference)]; } startdigit--; } // Now the main addition loop: loop through each digit and add it. // The carry from the previous digit will add to the current one. // Note that we detect any trailing zeros to take from the sig_digs. // Also, copy over the power and significant digits resultnum->power = bigger->power; resultnum->sig_digs = startdigitcopy; if (bigger->sig_digs > resultnum->sig_digs) { resultnum->sig_digs = bigger->sig_digs; startdigitcopy = resultnum->sig_digs; } int trailingzeros = 1; long int zerocount = 0L; char carry = 0; for (i = 0L; i < resultnum->sig_digs; i++) { resultnum->digits[(int)(startdigitcopy - i - 1L)] += (bigger->digits[(int)(startdigitcopy - i - 1L)] + carry); if (resultnum->digits[(int)(startdigitcopy - i - 1L)] >= 10) { resultnum->digits[(int)(startdigitcopy - i - 1L)] -= 10; carry = 1; } else { carry = 0; } if (trailingzeros == 1) { if (resultnum->digits[(int)(startdigitcopy - i - 1L)] == '\0') { zerocount++; } else { trailingzeros = 0; } } } // If we've got trailing zeros, subtract them from the final count of // sig_digs. Also, if we have a carry, we need to shift everything... resultnum->sig_digs -= zerocount; if (carry > 0) { transfertotal = resultnum->sig_digs; if (transfertotal == resultnum->precision) { transfertotal--; } startdigitcopy = transfertotal - 1L; for (i = 0L; i < transfertotal; i++) { if (startdigitcopy >= 0L) { resultnum->digits[(int)(startdigitcopy + 1L)] = resultnum->digits[(int)startdigitcopy]; } else if ((startdigitcopy + 1L) >= 0L) { resultnum->digits[(int)(startdigitcopy + 1L)] = '\0'; } startdigitcopy--; } resultnum->digits[0] = carry; resultnum->power++; resultnum->sig_digs++; } if (resultnum->sig_digs > resultnum->precision) { resultnum->sig_digs = resultnum->precision; } return 1; } } // A convenience wrapper that temporarily creates a new bignum out of the // given integer, calls bignum_add with it and the other operand, and deletes // the temporary bignum before exiting. Any problems that bignum_add encounters // are passed back up through this function and returned to the caller. __device__ int bignum_add_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset_gpu(resultnum); if ((rightint == 0L) && (leftnum->sig_digs > 0L)) { bignum_set_gpu(resultnum, leftnum); return 1; } else if ((leftnum->sig_digs == 0L) && (rightint > 0L)) { return bignum_set_int_gpu(resultnum, rightint); } else if ((leftnum->sig_digs == 0L) && (rightint == 0L)) { return 1; } else { bignum * tempnum = bignum_init_gpu(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int_gpu(tempnum, rightint) == 0) { bignum_clear_gpu(tempnum); return 0; } int retval = bignum_add_gpu(resultnum, leftnum, tempnum); bignum_clear_gpu(tempnum); return retval; } } // Multiplies two bignums together and stores the result. Like add, uses // functions to reset and set the location of the result, and returns 1 upon // success or 0 if an error occurred. A special shortcut is taken if either // operand is zero, since the result will thus also be zero. Note that we assume // the precision of all three operands is the same. If it's not, something // terrible like a seg fault or incorrect answer will probably occur. Most // importantly, the result operand CANNOT be the same as one of the input // operands, since the result is clobbered immediately and used as a scratchpad. // Also, note that this is unsigned: it assumes both operands are positive. __device__ int bignum_mult_gpu(bignum * resultnum, bignum * leftnum, bignum * rightnum) { bignum_reset_gpu(resultnum); if ((leftnum->sig_digs == 0L) || (rightnum->sig_digs == 0L)) { return 1; } else { // Initialize the scratchpad and find the digit limits char * temp_word = (char *)malloc((int)(2L * (resultnum->precision) * sizeof(char))); if (temp_word == 0) { return 0; } long int i; for (i = 0; i < (2L * resultnum->precision); i++) { temp_word[(int)i] = '\0'; } bignum * bigger; bignum * smaller; if (((signed long int)leftnum->sig_digs - (signed long int)rightnum->sig_digs) >= 0L) { bigger = leftnum; smaller = rightnum; } else if ((rightnum->sig_digs - leftnum->sig_digs) > 0L) { bigger = rightnum; smaller = leftnum; } long int bigstart = (bigger->sig_digs) - 1L; long int smallstart = (smaller->sig_digs) - 1L; long int bigcounter, smallcounter; char carry = 0; // Perform the shift-addition loop. We choose to loop over each // digit of the smaller number for fewer overall iterations. If // the current bigloop has a zero, we can just skip that iteration. // Also, record the final carry, power, and sig_digs values. for (bigcounter = 0L; bigcounter < (smaller->sig_digs); bigcounter++) { if (smaller->digits[(int)(smallstart - bigcounter)] != '\0') { carry = 0; for (smallcounter = 0L; smallcounter < (bigger->sig_digs); smallcounter++) { temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] += (carry + (smaller->digits[(int)(smallstart - bigcounter)] * bigger->digits[(int)(bigstart - smallcounter)])); carry = temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] / 10; temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] %= 10; } temp_word[(int)((2L * (resultnum->precision)) - bigcounter - (bigger->sig_digs) - 1L)] = carry; } } resultnum->power = ((bigger->power) + (smaller->power)); resultnum->sig_digs = ((bigger->sig_digs) + (smaller->sig_digs)); // Adjust for lack of a final carry or trailing zeros. if (carry < 1) { (resultnum->sig_digs)--; (resultnum->power)--; } (resultnum->power)++; int trailingzeros = 1; long int zerocount = 0L; i = (2L * (resultnum->precision) - 1L); while (trailingzeros == 1) { if (temp_word[(int)i] == '\0') { zerocount++; } else { trailingzeros = 0; } i--; } resultnum->sig_digs -= zerocount; if ((resultnum->sig_digs) > (resultnum->precision)) { resultnum->sig_digs = (resultnum->precision); } // Finally, copy from the temp word into the result, taking into // account any digits we may lose due to precision. long int tempstart = (2L * (resultnum->precision)) - ((bigger->sig_digs) + (smaller->sig_digs)); if (carry < 1) { tempstart++; } for (i = 0L; i < (resultnum->sig_digs); i++) { resultnum->digits[(int)i] = temp_word[(int)(tempstart + i)]; } free(temp_word); return 1; } } // Like bignum_add_int, a convenience wrapper that creates a temporary bignum // out of the integer and passes it to bignum_mult. Any problems encountered // in client functions are passed back up to the original caller. __device__ int bignum_mult_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset_gpu(resultnum); if ((leftnum->sig_digs == 0L) || (rightint == 0L)) { return 1; } else { bignum * tempnum = bignum_init_gpu(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int_gpu(tempnum, rightint) == 0) { bignum_clear_gpu(tempnum); return 0; } int retval = bignum_mult_gpu(resultnum, leftnum, tempnum); bignum_clear_gpu(tempnum); return retval; } } // Divides two bignums. Taken in terms of a fraction, leftnum is the numerator // and rightnum is the denominator. Performs an explicit check to make sure // the denominator is not zero, and returns 0 (an error) if it is. Returns 1 upon // success or 0 if an error occurs. A special shortcut is taken if the numerator is // zero. Note that we assume the precision of all three operands is the same. If it's // not, something terrible like a seg fault or incorrect answer will probably occur. // Most importantly, the result operand CANNOT be the same as one of the input // operands, since the result is clobbered immediately and used as a scratchpad. // Also, note that this is unsigned: it assumes both operands are positive. __device__ int bignum_divide_gpu(bignum * resultnum, bignum * numerator, bignum * denominator) { bignum_reset_gpu(resultnum); if (denominator->sig_digs == 0L) { return 0; } else if (numerator->sig_digs == 0L) { return 1; } else { // Initialize the scratchpad and initially copy the numerator into it. // Also initialize the result's power. char * temp_word = (char *)malloc((int)(2L * (resultnum->precision) + 2L * sizeof(char))); // May only need to be + 1L if (temp_word == 0) { return 0; } long int i; temp_word[0] = '\0'; for (i = 0L; i < numerator->sig_digs; i++) { temp_word[(int)(i + 1L)] = numerator->digits[(int)i]; } for (i = (1L + numerator->sig_digs); i < (2L * resultnum->precision + 2L); i++) { temp_word[(int)i] = '\0'; } resultnum->power = (numerator->power - denominator->power); long int sigdigctr = 0L; long int numeratorindex = 0L; // First see if we need to "shift" the numerator by comparing it. i = ((denominator->sig_digs) - 1L); int denom_bigger = 1; while ((i >= 0L) && (denom_bigger == 1)) { if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) > (temp_word[(int)((denominator->sig_digs) - i)])) { i = 0L; } else if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) < (temp_word[(int)((denominator->sig_digs) - i)])) { denom_bigger = 0; } else if (((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) == (temp_word[(int)((denominator->sig_digs) - i)])) && (i == 0L)) { denom_bigger = 0; } i--; } if (denom_bigger == 1) { numeratorindex++; (resultnum->power)--; } // Now the main division loop. Note that there's two ways to terminate: // either we've filled the entire precision of the result word and are // forced to truncate our result, or our answer divides exactly. In the // second case, once we've exhausted the numerator's significant digits // and our temp word contains nothing but zeros, we can end early since // all subsequent iterations would contribute only zeros as well. Note // that special care will be taken to detect extra zeros at the end of // the result so that the sig_digs is recorded correctly. Also, we don't // round, we truncate, which doesn't minimize error. int nonzero = 1; while ((sigdigctr < (resultnum->precision)) && (nonzero == 1)) { // First run the subtraction loop. char current_digit = 0; int numer_bigger = 1; while (numer_bigger == 1) { // To subtract, first run a comparison to see if the numerator // is bigger. If it is, increment the counter and subtract. i = ((denominator->sig_digs) - 1L); denom_bigger = 1; if (temp_word[(int)numeratorindex] > 0) { denom_bigger = 0; } while ((i >= 0L) && (denom_bigger == 1)) { if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) > (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) { i = 0L; } else if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) < (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) { denom_bigger = 0; } else if (((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) == (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) && (i == 0L)) { denom_bigger = 0; } i--; } if (denom_bigger == 1) { numer_bigger = 0; } // Increment counter and perform subtraction loop. if (numer_bigger == 1) { current_digit++; for (i = 0L; i < (denominator->sig_digs); i++) { temp_word[(int)((denominator->sig_digs) + numeratorindex - i)] -= (denominator->digits[ (int)((denominator->sig_digs) - i - 1L)]); if ((temp_word[(int)((denominator->sig_digs) + numeratorindex - i)]) < 0) { temp_word[(int)((denominator->sig_digs) + numeratorindex - i)] += 10L; (temp_word[(int)((denominator->sig_digs) + numeratorindex - i - 1L)]) -= 1L; } } } } // If we're past all of the numerator's significant digits, run // zero detection on it to see if we can end early. if (sigdigctr > (numerator->sig_digs)) { // May only need to be >= long int zerocounter = 0L; i = 0L; while ((i == zerocounter) && (i <= (denominator->sig_digs))) { if ((temp_word[(int)(numeratorindex + i)]) < 1) { zerocounter++; } i++; } if (zerocounter == ((denominator->sig_digs) + 1L)) { nonzero = 0; } } // Once we have obtained the proper digit in the result, save it. if (sigdigctr < resultnum->precision) { resultnum->digits[(int)sigdigctr] = current_digit; } sigdigctr++; numeratorindex++; } // Record the result's sig digs, taking care to detect trailing zeros. resultnum->sig_digs = sigdigctr; int trailingzeros = 1; long int zerocount = 0L; i = sigdigctr - 1L; while (trailingzeros == 1) { if (resultnum->digits[(int)i] == '\0') { zerocount++; } else { trailingzeros = 0; } i--; } (resultnum->sig_digs) -= zerocount; free(temp_word); return 1; } } // A convenience wrapper that creates a temporary bignum out of the integer. // Since division is not commutative, two wrappers are given. Any problems // encountered in client functions are passed back up to the original caller. __device__ int bignum_int_divide_gpu(bignum * resultnum, long int leftint, bignum * rightnum) { bignum_reset_gpu(resultnum); if (rightnum->sig_digs == 0L) { return 0; } else if (leftint == 0L) { return 1; } else { bignum * tempnum = bignum_init_gpu(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int_gpu(tempnum, leftint) == 0) { bignum_clear_gpu(tempnum); return 0; } int retval = bignum_divide_gpu(resultnum, tempnum, rightnum); bignum_clear_gpu(tempnum); return retval; } } // A convenience wrapper that creates a temporary bignum out of the integer. // Since division is not commutative, two wrappers are given. Any problems // encountered in client functions are passed back up to the original caller. __device__ int bignum_divide_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset_gpu(resultnum); if (rightint == 0L) { return 0; } else if (leftnum->sig_digs == 0L) { return 1; } else { bignum * tempnum = bignum_init_gpu(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int_gpu(tempnum, rightint) == 0) { bignum_clear_gpu(tempnum); return 0; } int retval = bignum_divide_gpu(resultnum, leftnum, tempnum); bignum_clear_gpu(tempnum); return retval; } }
111891cb1d89ab47e819b0ebd409aaa93285344a.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" /*This program calculates pi using a Simpson's Rule estimation of the integral of arctangent from 0 to 1. When inputting the number of iterations to perform, more iterations = more precision. The number of iterations is given as a command line argument. If no argument is provided, a default value of 20,000 is used. At 20,000 iterations, the value of pi is guaranteed to be accurate up to 8 decimal places. This version uses NVIDIA CUDA to perform parallel computation of the partial sums on a GPU. The amount of work each core does is given by the two #defines below. These values will need to be tuned for each device this code runs on in order to get maximum performance. For example, on my own personal machine, which has a GeForce GT 650M discrete graphics card, there are now only 12 streaming multiprocessors (SM's), with 32 cores each, for a total of 384 cores. Thus, 384 threads will be created, with each thread performing multiple iterations (total_iterations / (NUM_BLOCKS * THREADS_PER_BLOCK) to be precise). Thus, the more iterations given, the more work each thread does. The number of threads is kept constant in order to make clean-up easier and to not exceed the capabilities (max number of threads or blocks) of any particular GPU device. The last thread might have a few extra iterations if that number doesn't divide evenly. The number of decimal digits to use as the precision of the calculations is also given as a command-line argument. Obviously, the higher the number, the more digits you can successfully calculate. Accuracy still relies on the number of iterations, though: a high number of digits but low number of iterations will still result in a low number of digits of precision. Thus, you should only increase the number of digits when your iterations get too high and you find that your calculations are no longer precise due to internal rounding error. You'll probably find that increasing the digits will decrease performance severely. It is recommended, though, that since error accumulates, the more digits you want to find, the more padding you'll need to add to the end of the word to absorb that error. As a general rule of thumb, if you want to calculate x digits, make your words 2x long. Of course, this also increases the runtime by 2x. Compilation on my own machine actually makes OSC's steps look like child's play. The best way to do this is to download Visual Studio. Then, go to http://developer.nvidia.com/nsight-visual-studio-edition-downloads and follow the steps to install the proper graphics driver, then the CUDA toolkit, then the Nsight Visual Studio Plugin. So, the project you create will be a new CUDA project, which will link to the proper headers. Compile it, then go to NSIGHT in the menu > Start CUDA Debugging. Eventually, the proper result will pop up in the console window that opens. Note that if you want to change the number of iterations/precision, you'll need to edit the VS project settings under PROJECT > "Name" Properties... and then under Configuration Properties > Debugging, add the values you want to the Command Arguments field, since the default is to build the project for Debug, not Release. Also, the sleep function is added just because the debug console window disappears in Visual Studio. Grr. If you want to run this separately... you can't. I tried to compile this from the command line. It worked, even though I had to add extra system variables for NVCC to Window's Environment Variables, and then pass the path to VS's C compiler to NVCC when I ran it. Running it, however, did not. It crashed the graphics driver and returned that an error occurred upon trying to copy the results back from the GPU. Even debugging using DEBUG > Start Debugging in VS crashed it that way. Only debugging via NSIGHT actually worked. But, whatever, we got results. Remember that this solution uses dynamic memory allocation on the device, so only CUDA 2.0+ devices will run this code. */ // Includes. Optimum values for my computer are: // NUM_BLOCKS 12 // THREADS_PER_BLOCK 32 #include <stdio.h> #include <stdlib.h> #include <time.h> #include <Windows.h> #define NUM_BLOCKS 12 #define THREADS_PER_BLOCK 32 // A bignum is stored as all its decimal digits, separated into an array. // Really, it's quite terrible for performance, but it allows infinite digits. // Or at least as many as we can store in memory. The power tells us where to // put the decimal point, and the number of significant digits tells us how // many of the digits in the number are actually used. The precision tells us // the maximum number of digits possible for this particular instance. typedef struct { signed long int power; unsigned long int sig_digs; char * digits; unsigned long int precision; } bignum; // Function pointers, mostly for bignum operations. Note that in our use // below, we assume most of the arithmetic functions don't fail and thus // don't check their return values. Hope they're tested well... Notice // now that we have mirrored versions for the GPU, most of which just // have to call the GPU memory allocation functions. __global__ void calculate(long *, long *, char *, long *, long *, char *, long, long); __host__ bignum * bignum_init(long int); __host__ void bignum_reset(bignum *); __host__ void bignum_clear(bignum *); __host__ int bignum_set_int(bignum *, long int); __host__ void bignum_set(bignum *, bignum *); __host__ void bignum_print(bignum *, long int); __host__ int bignum_add(bignum *, bignum *, bignum *); __host__ int bignum_add_int(bignum *, bignum *, long int); __host__ int bignum_mult(bignum *, bignum *, bignum *); __host__ int bignum_mult_int(bignum *, bignum *, long int); __host__ int bignum_divide(bignum *, bignum *, bignum *); __host__ int bignum_int_divide(bignum *, long int, bignum *); __host__ int bignum_divide_int(bignum *, bignum *, long int); __device__ bignum * bignum_init_gpu(long int); __device__ void bignum_reset_gpu(bignum *); __device__ void bignum_clear_gpu(bignum *); __device__ int bignum_set_int_gpu(bignum *, long int); __device__ void bignum_set_gpu(bignum *, bignum *); __device__ int bignum_add_gpu(bignum *, bignum *, bignum *); __device__ int bignum_add_int_gpu(bignum *, bignum *, long int); __device__ int bignum_mult_gpu(bignum *, bignum *, bignum *); __device__ int bignum_mult_int_gpu(bignum *, bignum *, long int); __device__ int bignum_divide_gpu(bignum *, bignum *, bignum *); __device__ int bignum_int_divide_gpu(bignum *, long int, bignum *); __device__ int bignum_divide_int_gpu(bignum *, bignum *, long int); // Main function int main(int argc, char * argv[]) { // Obtain command line arguments long iterations = 20000L; if (argc > 1) { iterations = atol(argv[1]); if (iterations < 1L) { iterations = 20000L; } } long max_digits = 25L; if (argc > 2) { max_digits = atoi(argv[2]); if (max_digits < 1L) { max_digits = 25L; } } // Initialize global storage. Notice that we now need extra arrays for data // transfer between the GPU and regular RAM. These will hold the partial // sums that each of the threads calculate. Unfortunately, due to the way // bignums are structured, each of their arguments has to be transferred // separately. Luckily, this only happens once. long clock_start = (long)clock(); long int i, j; if (cudaDeviceSetLimit(cudaLimitMallocHeapSize, (NUM_BLOCKS * THREADS_PER_BLOCK * 16384)) != cudaSuccess) { printf("\nError setting GPU heap size.\n"); return 1; } cudaDeviceSynchronize(); long * hosttrappower = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); long * hosttrapsig_digs = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); char * hosttrapdigits = (char *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits), sizeof(char)); long * hostmidpower = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); long * hostmidsig_digs = (long *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK), sizeof(long)); char * hostmiddigits = (char *)calloc((int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits), sizeof(char)); if ((hosttrappower == 0) || (hosttrapsig_digs == 0) || (hosttrapdigits == 0) || (hostmidpower == 0) || (hostmidsig_digs == 0) || (hostmiddigits == 0)) { printf("\nError allocating memory on the CPU.\n"); return 1; } long * devicetrappower; long * devicetrapsig_digs; char * devicetrapdigits; long * devicemidpower; long * devicemidsig_digs; char * devicemiddigits; if (cudaMalloc((void**)&devicetrappower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) != cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } if (cudaMalloc((void**)&devicetrapsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) != cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } if (cudaMalloc((void**)&devicetrapdigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char))) != cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } if (cudaMalloc((void**)&devicemidpower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) != cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } if (cudaMalloc((void**)&devicemidsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long))) != cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } if (cudaMalloc((void**)&devicemiddigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char))) != cudaSuccess) { printf("\nError allocating memory on GPU.\n"); return 1; } cudaDeviceSynchronize(); char * accepted_pi = "3.14159265358979323846264338327950288419716939937510" "58209749445923078164062862089986280348253421170679\0"; char pi_printer[2]; pi_printer[0] = '0'; pi_printer[1] = '\0'; // Split off worker threads. When dividing the work, if the number of // threads does not evenly divide into the desired number of iterations, // give any extra iterations to the final thread. This gives the final // thread at most (num_threads - 1) extra iterations. Notice that this // is a 1D-grid of work, and we use function arguments this time. Also, // remember the number of threads is held constant, thanks to #defines, // at NUM_BLOCKS * THREADS_PER_BLOCK. dim3 numBlocks(NUM_BLOCKS); dim3 threadsPerBlock(THREADS_PER_BLOCK); calculate <<<numBlocks, threadsPerBlock >>>(devicetrappower, devicetrapsig_digs, devicetrapdigits, devicemidpower, devicemidsig_digs, devicemiddigits, iterations, max_digits); cudaDeviceSynchronize(); // Copy results back from GPU if (cudaMemcpy(hosttrappower, devicetrappower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } if (cudaMemcpy(hosttrapsig_digs, devicetrapsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } if (cudaMemcpy(hosttrapdigits, devicetrapdigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)), cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } if (cudaMemcpy(hostmidpower, devicemidpower, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } if (cudaMemcpy(hostmidsig_digs, devicemidsig_digs, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * sizeof(long)), cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } if (cudaMemcpy(hostmiddigits, devicemiddigits, (int)(NUM_BLOCKS * THREADS_PER_BLOCK * max_digits * sizeof(char)), cudaMemcpyDeviceToHost) != cudaSuccess) { printf("\nError copying memory from GPU.\n"); return 3; } cudaDeviceSynchronize(); if (cudaFree(devicetrappower) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } if (cudaFree(devicetrapsig_digs) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } if (cudaFree(devicetrapdigits) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } if (cudaFree(devicemidpower) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } if (cudaFree(devicemidsig_digs) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } if (cudaFree(devicemiddigits) != cudaSuccess) { printf("\nError freeing GPU memory.\n"); return 3; } // After worker threads end, clean up each of the partial sums bignum * trap = bignum_init(max_digits); bignum * mid = bignum_init(max_digits); bignum * temp = bignum_init(max_digits); bignum * simp = bignum_init(max_digits); if (trap == 0 || mid == 0 || temp == 0 || simp == 0) { printf("Error allocating memory. Now exiting.\n"); return -1; } for (i = 0L; i < (NUM_BLOCKS * THREADS_PER_BLOCK); i++) { simp->power = hosttrappower[i]; simp->sig_digs = hosttrapsig_digs[i]; for (j = 0L; j < max_digits; j++) { simp->digits[(int)j] = hosttrapdigits[(int)((i * max_digits) + j)]; } bignum_add(temp, trap, simp); bignum_reset(trap); bignum_reset(simp); bignum_set(trap, temp); bignum_reset(temp); simp->power = hostmidpower[i]; simp->sig_digs = hostmidsig_digs[i]; for (j = 0L; j < max_digits; j++) { simp->digits[(int)j] = hostmiddigits[(int)((i * max_digits) + j)]; } bignum_add(temp, mid, simp); bignum_reset(mid); bignum_reset(simp); bignum_set(mid, temp); bignum_reset(temp); } // Finally, Simpson's Rule is applied bignum_mult_int(temp, mid, 2L); bignum_reset(mid); bignum_set(mid, temp); bignum_reset(temp); bignum_add(temp, trap, mid); bignum_reset(trap); bignum_set(trap, temp); bignum_reset(temp); bignum_divide_int(temp, trap, 3L); bignum_reset(trap); bignum_set(trap, temp); bignum_reset(temp); bignum_mult_int(simp, trap, 4L); long clock_end = (long)clock(); printf("The calculated value of pi is "); bignum_print(simp, 0L); printf("\nThe actual value of pi is 3."); for (i = 0L; i < (max_digits - 1L); i++) { // This may print an extra digit or two because, somewhere down in the // code, we're losing our last sig dig during normal math, but it's // bubbling back up, and causing the final result to lose a place or // two. It's not a big deal, and I don't want to do anything about it, // so we'll just have the ends of the numbers not line up. Whatever. pi_printer[0] = accepted_pi[(int)(i + 2L)]; printf("%s", pi_printer); } printf("\nThe time taken to calculate this was %.2f seconds\n", ((float)(clock_end - clock_start)) / (float)CLOCKS_PER_SEC); printf("The number of iterations performed was %ld\n", iterations); Sleep(5000); // Free global storage free(hosttrappower); free(hosttrapsig_digs); free(hosttrapdigits); free(hostmidpower); free(hostmidsig_digs); free(hostmiddigits); bignum_clear(trap); bignum_clear(mid); bignum_clear(simp); bignum_clear(temp); return 0; } // Function executed by each thread to incrementally calculate the overall value __global__ void calculate(long * devicetrappower, long * devicetrapsig_digs, char * devicetrapdigits, long * devicemidpower, long * devicemidsig_digs, char * devicemiddigits, long iterations, long max_digits) { // Initialize needed variables and check for errors long threadid = threadIdx.x + (blockIdx.x * THREADS_PER_BLOCK); long lowlimit = threadid * (iterations / (NUM_BLOCKS * THREADS_PER_BLOCK)); long highlimit = (((threadid + 1L) == (NUM_BLOCKS * THREADS_PER_BLOCK)) ? iterations : ((threadid + 1L) * (iterations / (NUM_BLOCKS * THREADS_PER_BLOCK)))); bignum * trap = bignum_init_gpu(max_digits); bignum * mid = bignum_init_gpu(max_digits); bignum * inverseiterations = bignum_init_gpu(max_digits); bignum * temp_holder = bignum_init_gpu(max_digits); bignum * temp_holder2 = bignum_init_gpu(max_digits); bignum * inc = bignum_init_gpu(max_digits); bignum * leftrect = bignum_init_gpu(max_digits); bignum * rightrect = bignum_init_gpu(max_digits); if (trap == 0 || mid == 0 || inverseiterations == 0 || temp_holder == 0 || temp_holder2 == 0 || inc == 0 || leftrect == 0 || rightrect == 0) { return; } // Initialize values of needed variables bignum_set_int_gpu(temp_holder, iterations); bignum_int_divide_gpu(inverseiterations, 1L, temp_holder); bignum_reset_gpu(temp_holder); long i; long k = lowlimit; bignum_divide_int_gpu(temp_holder, inverseiterations, 2L); bignum_set_int_gpu(inc, k); bignum_mult_gpu(temp_holder2, inc, inverseiterations); bignum_reset_gpu(inc); bignum_set_gpu(inc, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_gpu(temp_holder2, inc, temp_holder); bignum_reset_gpu(inc); bignum_set_gpu(inc, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_reset_gpu(temp_holder); // Main iteration loop. Note that the values of inverseiterations, inc, // mid, and trap are preserved across loop iterations, as is counter k. // inverseiterations is a constant that is stored for simplicity. Man, // this is looking more and more like assembly... for (i = lowlimit; i < highlimit; i++) { // First, the trapezoid rule is used to estimate pi bignum_reset_gpu(leftrect); bignum_set_int_gpu(leftrect, k); bignum_mult_gpu(temp_holder2, leftrect, inverseiterations); bignum_reset_gpu(leftrect); bignum_set_gpu(leftrect, temp_holder2); bignum_reset_gpu(temp_holder2); k++; bignum_reset_gpu(rightrect); bignum_set_int_gpu(rightrect, k); bignum_mult_gpu(temp_holder2, rightrect, inverseiterations); bignum_reset_gpu(rightrect); bignum_set_gpu(rightrect, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_gpu(temp_holder, leftrect, rightrect); bignum_divide_int_gpu(temp_holder2, temp_holder, 2L); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_mult_gpu(temp_holder2, temp_holder, temp_holder); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_int_gpu(temp_holder2, temp_holder, 1L); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_int_divide_gpu(temp_holder2, 1L, temp_holder); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_mult_gpu(temp_holder2, temp_holder, inverseiterations); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_gpu(temp_holder2, trap, temp_holder); bignum_reset_gpu(trap); bignum_set_gpu(trap, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_reset_gpu(temp_holder); // Next, the midpoint rule is also used to estimate pi bignum_set_gpu(temp_holder, inc); bignum_add_gpu(temp_holder2, inc, inverseiterations); bignum_reset_gpu(inc); bignum_set_gpu(inc, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_mult_gpu(temp_holder2, temp_holder, temp_holder); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_int_gpu(temp_holder2, temp_holder, 1L); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_int_divide_gpu(temp_holder2, 1L, temp_holder); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_mult_gpu(temp_holder2, temp_holder, inverseiterations); bignum_reset_gpu(temp_holder); bignum_set_gpu(temp_holder, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_add_gpu(temp_holder2, mid, temp_holder); bignum_reset_gpu(mid); bignum_set_gpu(mid, temp_holder2); bignum_reset_gpu(temp_holder2); bignum_reset_gpu(temp_holder); } // Save partial result, clear memory, and exit devicetrappower[threadid] = trap->power; devicetrapsig_digs[threadid] = trap->sig_digs; for (i = 0; i < max_digits; i++) { devicetrapdigits[(threadid * max_digits) + i] = trap->digits[i]; } devicemidpower[threadid] = mid->power; devicemidsig_digs[threadid] = mid->sig_digs; for (i = 0; i < max_digits; i++) { devicemiddigits[(threadid * max_digits) + i] = mid->digits[i]; } bignum_clear_gpu(trap); bignum_clear_gpu(mid); bignum_clear_gpu(inverseiterations); bignum_clear_gpu(temp_holder); bignum_clear_gpu(temp_holder2); bignum_clear_gpu(inc); bignum_clear_gpu(leftrect); bignum_clear_gpu(rightrect); } // Create space for a bignum with the specified precision. // Technically, it's also initialized if we interpret having zero // significant digits as the number having a value of zero. __host__ bignum * bignum_init(long int precision) { bignum * temp_ptr = (bignum *)calloc(1, sizeof(bignum)); temp_ptr->digits = (char *)calloc((int)precision, sizeof(char)); if ((temp_ptr->digits) == 0) { temp_ptr = 0; } temp_ptr->precision = precision; return temp_ptr; } // Resets a bignum's value to zero. memcpy isn't used because // why bring the string library into this just for this use? __host__ void bignum_reset(bignum * numval) { if ((numval->sig_digs) > 0L) { long int i; for (i = 0L; i < numval->precision; i++) { numval->digits[(int)i] = '\0'; } numval->power = 0L; numval->sig_digs = 0L; } return; } // Free memory used by a bignum when we're done with it __host__ void bignum_clear(bignum * oldnum) { free(oldnum->digits); free(oldnum); return; } // Set an instance of a bignum to an integer value. Note that if we can't // initialize the temp word we need for copying, we return false (value = 0). // We also assume that the number is non-negative since we only store // unsigned numbers. We assume the result is initialized/reset. Finally, // we handle zero specially by just resetting (again?) the result. Note that // we explicitly assume the number to convert fits within the max number of // digits. If we try to convert a number bigger than we can store, it won't work. __host__ int bignum_set_int(bignum * numval, long int intval) { if (intval > 0L) { // Separate out the individual digits (stored backwards) char * temp_word = (char *)calloc((int)(numval->precision), sizeof(char)); if (temp_word == 0) { return 0; } long int temp_int = intval; long int counter = 0L; while (temp_int > 0L) { temp_word[(int)counter] = (char)(temp_int % 10L); temp_int = temp_int / 10L; counter++; } // Detect any trailing zeros that we don't need to store numval->power = counter - 1L; long int leadingzeros = 0L; int hasleading = 1; while (hasleading == 1) { if (temp_word[(int)leadingzeros] != 0) { hasleading = 0; } else { leadingzeros++; } } // Store final result into actual bignum variable for (temp_int = 0L; temp_int < (counter - leadingzeros); temp_int++) { numval->digits[(int)temp_int] = temp_word[(int)(counter - temp_int - 1L)]; } numval->sig_digs = counter - leadingzeros; free(temp_word); return 1; } else { bignum_reset(numval); return 1; } } // Set an instance of a bignum to the value of another bignum. We don't assume // they're both the same precision; just use the precision of the new number. // We do assume that the new number has already been initialized, though. // strncpy is not used since it quits after seeing the first zero. __host__ void bignum_set(bignum * newnum, bignum * oldnum) { if ((oldnum->sig_digs) > 0L) { newnum->power = oldnum->power; newnum->sig_digs = ((oldnum->sig_digs > newnum->precision) ? (newnum->precision) : (oldnum->sig_digs)); long int i; for (i = 0L; i < newnum->sig_digs; i++) { newnum->digits[(int)i] = oldnum->digits[(int)i]; } } else { bignum_reset(newnum); } return; } // Use printf to print the number one digit at a time. There are a few cases: // power > significant digits: pad end with zeros // significant digits > power: fractional digit (non-integer) // power is negative: total value less than 1 // The second argument is the maximum number of significant digits to print. // If it's zero, then all available digits will be printed, maxing out at // the precision of the number (the total amount is could possibly store). // Note that this is different from total digits printed: zeroes after a // decimal point but before the first significant digit don't count, and we // make sure we print at least the integral part of the number (we only // chop off fractional portions). __host__ void bignum_print(bignum * numval, long int maxdigits) { long int i; long int limit = numval->sig_digs; if (numval->sig_digs == 0L) { printf("0"); } else { if ((maxdigits > 0L) && (maxdigits < numval->sig_digs)) { limit = maxdigits; } if (numval->power < 0L) { printf("0."); for (i = 1L; i < (-1L * (numval->power)); i++) { printf("0"); } for (i = 0L; i < limit; i++) { printf("%d", (int)(numval->digits[(int)i])); } } else if (numval->sig_digs >(numval->power + 1L)) { for (i = 0L; i <= numval->power; i++) { printf("%d", (int)(numval->digits[(int)i])); } if (limit >(numval->power + 1L)) { printf("."); } for (i = (numval->power + 1L); i < limit; i++) { printf("%d", (int)(numval->digits[(int)i])); } } else { for (i = 0L; i < numval->sig_digs; i++) { printf("%d", (int)(numval->digits[(int)i])); } } if ((numval->power > 0L) && ((numval->power + 1L) > numval->sig_digs)) { for (i = 0L; i < ((numval->power + 1L) - numval->sig_digs); i++) { printf("0"); } } } fflush(stdout); return; } // Adds two bignums together and stores the result. Uses the functions to // reset and set the location of the result internally, so current contents of // result operand will be overwritten. Like bignum_set_int, returns 1 if // addition was successful or 0 if an error occurred. A special shortcut is // taken if either (or both) of the operands are zero. Note that it is possible // for large additions to cause underflow to zero. In that case, special care is // taken to make sure the proper input operand is used. Note that we assume the // precision of all three operands is the same. If it's not, something terrible // like a seg fault or incorrect answer will probably occur. Most importantly, // the result operand CANNOT be the same as one of the input operands, since // the result is clobbered immediately and used as a scratchpad. Note that this // is also unsigned addition: not only does it not accept negative numbers, it // also doesn't do subtraction (which, for that matter, isn't commutative). __host__ int bignum_add(bignum * resultnum, bignum * leftnum, bignum * rightnum) { bignum_reset(resultnum); if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs > 0L)) { bignum_set(resultnum, rightnum); return 1; } else if ((rightnum->sig_digs == 0L) && (leftnum->sig_digs > 0L)) { bignum_set(resultnum, leftnum); return 1; } else if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs == 0L)) { return 1; } else { // First check for overshift: if the larger number's power is too much // bigger than the smaller number's, the smaller will be completely lost, // and we'll just end up with the large number as the result. if ((((leftnum->power - rightnum->power) > 0) && ((leftnum->power - rightnum->power) > resultnum->precision))) { bignum_set(resultnum, leftnum); return 1; } if ((((rightnum->power - leftnum->power) > 0) && ((rightnum->power - leftnum->power) > resultnum->precision))) { bignum_set(resultnum, rightnum); return 1; } // Next, shift the smaller operand to match the larger one by copying // it into the result operand as a partial sum. Also copy over the // power and total significant digits into the result. bignum * bigger; bignum * smaller; if ((leftnum->power - rightnum->power) >= 0L) { bigger = leftnum; smaller = rightnum; } else { bigger = rightnum; smaller = leftnum; } long int difference = bigger->power - smaller->power; long int startdigit = smaller->sig_digs + difference; long int transfertotal = smaller->sig_digs; if (startdigit > resultnum->precision) { startdigit = resultnum->precision - difference; transfertotal = startdigit; } long int startdigitcopy = startdigit; startdigit--; long int i; for (i = 0L; i < transfertotal; i++) { if ((startdigit - difference) >= 0L) { resultnum->digits[(int)startdigit] = smaller->digits[(int)(startdigit - difference)]; } startdigit--; } // Now the main addition loop: loop through each digit and add it. // The carry from the previous digit will add to the current one. // Note that we detect any trailing zeros to take from the sig_digs. // Also, copy over the power and significant digits resultnum->power = bigger->power; resultnum->sig_digs = startdigitcopy; if (bigger->sig_digs > resultnum->sig_digs) { resultnum->sig_digs = bigger->sig_digs; startdigitcopy = resultnum->sig_digs; } int trailingzeros = 1; long int zerocount = 0L; char carry = 0; for (i = 0L; i < resultnum->sig_digs; i++) { resultnum->digits[(int)(startdigitcopy - i - 1L)] += (bigger->digits[(int)(startdigitcopy - i - 1L)] + carry); if (resultnum->digits[(int)(startdigitcopy - i - 1L)] >= 10) { resultnum->digits[(int)(startdigitcopy - i - 1L)] -= 10; carry = 1; } else { carry = 0; } if (trailingzeros == 1) { if (resultnum->digits[(int)(startdigitcopy - i - 1L)] == '\0') { zerocount++; } else { trailingzeros = 0; } } } // If we've got trailing zeros, subtract them from the final count of // sig_digs. Also, if we have a carry, we need to shift everything... resultnum->sig_digs -= zerocount; if (carry > 0) { transfertotal = resultnum->sig_digs; if (transfertotal == resultnum->precision) { transfertotal--; } startdigitcopy = transfertotal - 1L; for (i = 0L; i < transfertotal; i++) { if (startdigitcopy >= 0L) { resultnum->digits[(int)(startdigitcopy + 1L)] = resultnum->digits[(int)startdigitcopy]; } else if ((startdigitcopy + 1L) >= 0L) { resultnum->digits[(int)(startdigitcopy + 1L)] = 0; } startdigitcopy--; } resultnum->digits[0] = carry; resultnum->power++; resultnum->sig_digs++; } if (resultnum->sig_digs > resultnum->precision) { resultnum->sig_digs = resultnum->precision; } return 1; } } // A convenience wrapper that temporarily creates a new bignum out of the // given integer, calls bignum_add with it and the other operand, and deletes // the temporary bignum before exiting. Any problems that bignum_add encounters // are passed back up through this function and returned to the caller. __host__ int bignum_add_int(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset(resultnum); if ((rightint == 0L) && (leftnum->sig_digs > 0L)) { bignum_set(resultnum, leftnum); return 1; } else if ((leftnum->sig_digs == 0L) && (rightint > 0L)) { return bignum_set_int(resultnum, rightint); } else if ((leftnum->sig_digs == 0L) && (rightint == 0L)) { return 1; } else { bignum * tempnum = bignum_init(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int(tempnum, rightint) == 0) { bignum_clear(tempnum); return 0; } int retval = bignum_add(resultnum, leftnum, tempnum); bignum_clear(tempnum); return retval; } } // Multiplies two bignums together and stores the result. Like add, uses // functions to reset and set the location of the result, and returns 1 upon // success or 0 if an error occurred. A special shortcut is taken if either // operand is zero, since the result will thus also be zero. Note that we assume // the precision of all three operands is the same. If it's not, something // terrible like a seg fault or incorrect answer will probably occur. Most // importantly, the result operand CANNOT be the same as one of the input // operands, since the result is clobbered immediately and used as a scratchpad. // Also, note that this is unsigned: it assumes both operands are positive. __host__ int bignum_mult(bignum * resultnum, bignum * leftnum, bignum * rightnum) { bignum_reset(resultnum); if ((leftnum->sig_digs == 0L) || (rightnum->sig_digs == 0L)) { return 1; } else { // Initialize the scratchpad and find the digit limits char * temp_word = (char *)calloc((int)(2L * (resultnum->precision)), sizeof(char)); if (temp_word == 0) { return 0; } bignum * bigger; bignum * smaller; if (((signed long int)leftnum->sig_digs - (signed long int)rightnum->sig_digs) >= 0L) { bigger = leftnum; smaller = rightnum; } else if ((rightnum->sig_digs - leftnum->sig_digs) > 0L) { bigger = rightnum; smaller = leftnum; } long int bigstart = (bigger->sig_digs) - 1L; long int smallstart = (smaller->sig_digs) - 1L; long int bigcounter, smallcounter; char carry = 0; // Perform the shift-addition loop. We choose to loop over each // digit of the smaller number for fewer overall iterations. If // the current bigloop has a zero, we can just skip that iteration. // Also, record the final carry, power, and sig_digs values. for (bigcounter = 0L; bigcounter < (smaller->sig_digs); bigcounter++) { if (smaller->digits[(int)(smallstart - bigcounter)] != '\0') { carry = 0; for (smallcounter = 0L; smallcounter < (bigger->sig_digs); smallcounter++) { temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] += (carry + (smaller->digits[(int)(smallstart - bigcounter)] * bigger->digits[(int)(bigstart - smallcounter)])); carry = temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] / 10; temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] %= 10; } temp_word[(int)((2L * (resultnum->precision)) - bigcounter - (bigger->sig_digs) - 1L)] = carry; } } resultnum->power = ((bigger->power) + (smaller->power)); resultnum->sig_digs = ((bigger->sig_digs) + (smaller->sig_digs)); // Adjust for lack of a final carry or trailing zeros. if (carry < 1) { (resultnum->sig_digs)--; (resultnum->power)--; } (resultnum->power)++; int trailingzeros = 1; long int zerocount = 0L; long int i = (2L * (resultnum->precision) - 1L); while (trailingzeros == 1) { if (temp_word[(int)i] == '\0') { zerocount++; } else { trailingzeros = 0; } i--; } resultnum->sig_digs -= zerocount; if ((resultnum->sig_digs) > (resultnum->precision)) { resultnum->sig_digs = (resultnum->precision); } // Finally, copy from the temp word into the result, taking into // account any digits we may lose due to precision. long int tempstart = (2L * (resultnum->precision)) - ((bigger->sig_digs) + (smaller->sig_digs)); if (carry < 1) { tempstart++; } for (i = 0L; i < (resultnum->sig_digs); i++) { resultnum->digits[(int)i] = temp_word[(int)(tempstart + i)]; } free(temp_word); return 1; } } // Like bignum_add_int, a convenience wrapper that creates a temporary bignum // out of the integer and passes it to bignum_mult. Any problems encountered // in client functions are passed back up to the original caller. __host__ int bignum_mult_int(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset(resultnum); if ((leftnum->sig_digs == 0L) || (rightint == 0L)) { return 1; } else { bignum * tempnum = bignum_init(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int(tempnum, rightint) == 0) { bignum_clear(tempnum); return 0; } int retval = bignum_mult(resultnum, leftnum, tempnum); bignum_clear(tempnum); return retval; } } // Divides two bignums. Taken in terms of a fraction, leftnum is the numerator // and rightnum is the denominator. Performs an explicit check to make sure // the denominator is not zero, and returns 0 (an error) if it is. Returns 1 upon // success or 0 if an error occurs. A special shortcut is taken if the numerator is // zero. Note that we assume the precision of all three operands is the same. If it's // not, something terrible like a seg fault or incorrect answer will probably occur. // Most importantly, the result operand CANNOT be the same as one of the input // operands, since the result is clobbered immediately and used as a scratchpad. // Also, note that this is unsigned: it assumes both operands are positive. __host__ int bignum_divide(bignum * resultnum, bignum * numerator, bignum * denominator) { bignum_reset(resultnum); if (denominator->sig_digs == 0L) { return 0; } else if (numerator->sig_digs == 0L) { return 1; } else { // Initialize the scratchpad and initially copy the numerator into it. // Also initialize the result's power. char * temp_word = (char *)calloc((int)(2L * (resultnum->precision) + 2L), sizeof(char)); // May only need to be + 1L if (temp_word == 0) { return 0; } long int i; for (i = 0L; i < numerator->sig_digs; i++) { temp_word[(int)(i + 1L)] = numerator->digits[(int)i]; } resultnum->power = (numerator->power - denominator->power); long int sigdigctr = 0L; long int numeratorindex = 0L; // First see if we need to "shift" the numerator by comparing it. i = ((denominator->sig_digs) - 1L); int denom_bigger = 1; while ((i >= 0L) && (denom_bigger == 1)) { if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) > (temp_word[(int)((denominator->sig_digs) - i)])) { i = 0L; } else if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) < (temp_word[(int)((denominator->sig_digs) - i)])) { denom_bigger = 0; } else if (((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) == (temp_word[(int)((denominator->sig_digs) - i)])) && (i == 0L)) { denom_bigger = 0; } i--; } if (denom_bigger == 1) { numeratorindex++; (resultnum->power)--; } // Now the main division loop. Note that there's two ways to terminate: // either we've filled the entire precision of the result word and are // forced to truncate our result, or our answer divides exactly. In the // second case, once we've exhausted the numerator's significant digits // and our temp word contains nothing but zeros, we can end early since // all subsequent iterations would contribute only zeros as well. Note // that special care will be taken to detect extra zeros at the end of // the result so that the sig_digs is recorded correctly. Also, we don't // round, we truncate, which doesn't minimize error. int nonzero = 1; while ((sigdigctr < (resultnum->precision)) && (nonzero == 1)) { // First run the subtraction loop. char current_digit = 0; int numer_bigger = 1; while (numer_bigger == 1) { // To subtract, first run a comparison to see if the numerator // is bigger. If it is, increment the counter and subtract. i = ((denominator->sig_digs) - 1L); denom_bigger = 1; if (temp_word[(int)numeratorindex] > 0) { denom_bigger = 0; } while ((i >= 0L) && (denom_bigger == 1)) { if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) > (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) { i = 0L; } else if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) < (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) { denom_bigger = 0; } else if (((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) == (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) && (i == 0L)) { denom_bigger = 0; } i--; } if (denom_bigger == 1) { numer_bigger = 0; } // Increment counter and perform subtraction loop. if (numer_bigger == 1) { current_digit++; for (i = 0L; i < (denominator->sig_digs); i++) { temp_word[(int)((denominator->sig_digs) + numeratorindex - i)] -= (denominator->digits[ (int)((denominator->sig_digs) - i - 1L)]); if ((temp_word[(int)((denominator->sig_digs) + numeratorindex - i)]) < 0) { temp_word[(int)((denominator->sig_digs) + numeratorindex - i)] += 10L; (temp_word[(int)((denominator->sig_digs) + numeratorindex - i - 1L)]) -= 1L; } } } } // If we're past all of the numerator's significant digits, run // zero detection on it to see if we can end early. if (sigdigctr > (numerator->sig_digs)) { // May only need to be >= long int zerocounter = 0L; i = 0L; while ((i == zerocounter) && (i <= (denominator->sig_digs))) { if ((temp_word[(int)(numeratorindex + i)]) < 1) { zerocounter++; } i++; } if (zerocounter == ((denominator->sig_digs) + 1L)) { nonzero = 0; } } // Once we have obtained the proper digit in the result, save it. if (sigdigctr < resultnum->precision) { resultnum->digits[(int)sigdigctr] = current_digit; } sigdigctr++; numeratorindex++; } // Record the result's sig digs, taking care to detect trailing zeros. resultnum->sig_digs = sigdigctr; int trailingzeros = 1; long int zerocount = 0L; i = sigdigctr - 1L; while (trailingzeros == 1) { if (resultnum->digits[(int)i] == '\0') { zerocount++; } else { trailingzeros = 0; } i--; } (resultnum->sig_digs) -= zerocount; free(temp_word); return 1; } } // A convenience wrapper that creates a temporary bignum out of the integer. // Since division is not commutative, two wrappers are given. Any problems // encountered in client functions are passed back up to the original caller. __host__ int bignum_int_divide(bignum * resultnum, long int leftint, bignum * rightnum) { bignum_reset(resultnum); if (rightnum->sig_digs == 0L) { return 0; } else if (leftint == 0L) { return 1; } else { bignum * tempnum = bignum_init(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int(tempnum, leftint) == 0) { bignum_clear(tempnum); return 0; } int retval = bignum_divide(resultnum, tempnum, rightnum); bignum_clear(tempnum); return retval; } } // A convenience wrapper that creates a temporary bignum out of the integer. // Since division is not commutative, two wrappers are given. Any problems // encountered in client functions are passed back up to the original caller. __host__ int bignum_divide_int(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset(resultnum); if (rightint == 0L) { return 0; } else if (leftnum->sig_digs == 0L) { return 1; } else { bignum * tempnum = bignum_init(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int(tempnum, rightint) == 0) { bignum_clear(tempnum); return 0; } int retval = bignum_divide(resultnum, leftnum, tempnum); bignum_clear(tempnum); return retval; } } // Create space for a bignum with the specified precision. // Technically, it's also initialized if we interpret having zero // significant digits as the number having a value of zero. __device__ bignum * bignum_init_gpu(long int precision) { bignum * temp_ptr = (bignum *)malloc(sizeof(bignum)); if (temp_ptr == 0) { return temp_ptr; } temp_ptr->digits = (char *)malloc((int)(precision * sizeof(char))); if ((temp_ptr->digits) == 0) { temp_ptr = 0; return temp_ptr; } int i; for (i = 0; i < precision; i++) { temp_ptr->digits[i] = '\0'; } temp_ptr->power = 0L; temp_ptr->sig_digs = 0L; temp_ptr->precision = precision; return temp_ptr; } // Resets a bignum's value to zero. memcpy isn't used because // why bring the string library into this just for this use? __device__ void bignum_reset_gpu(bignum * numval) { if ((numval->sig_digs) > 0L) { long int i; for (i = 0L; i < numval->precision; i++) { numval->digits[(int)i] = '\0'; } numval->power = 0L; numval->sig_digs = 0L; } return; } // Free memory used by a bignum when we're done with it __device__ void bignum_clear_gpu(bignum * oldnum) { free(oldnum->digits); free(oldnum); return; } // Set an instance of a bignum to an integer value. Note that if we can't // initialize the temp word we need for copying, we return false (value = 0). // We also assume that the number is non-negative since we only store // unsigned numbers. We assume the result is initialized/reset. Finally, // we handle zero specially by just resetting (again?) the result. Note that // we explicitly assume the number to convert fits within the max number of // digits. If we try to convert a number bigger than we can store, it won't work. __device__ int bignum_set_int_gpu(bignum * numval, long int intval) { if (intval > 0L) { // Separate out the individual digits (stored backwards) char * temp_word = (char *)malloc((int)(numval->precision * sizeof(char))); if (temp_word == 0) { return 0; } long int i; for (i = 0; i < numval->precision; i++) { temp_word[(int)i] = '\0'; } long int temp_int = intval; long int counter = 0L; while (temp_int > 0L) { temp_word[(int)counter] = (char)(temp_int % 10L); temp_int = temp_int / 10L; counter++; } // Detect any trailing zeros that we don't need to store numval->power = counter - 1L; long int leadingzeros = 0L; int hasleading = 1; while (hasleading == 1) { if (temp_word[(int)leadingzeros] != 0) { hasleading = 0; } else { leadingzeros++; } } // Store final result into actual bignum variable for (temp_int = 0L; temp_int < (counter - leadingzeros); temp_int++) { numval->digits[(int)temp_int] = temp_word[(int)(counter - temp_int - 1L)]; } numval->sig_digs = counter - leadingzeros; free(temp_word); return 1; } else { bignum_reset_gpu(numval); return 1; } } // Set an instance of a bignum to the value of another bignum. We don't assume // they're both the same precision; just use the precision of the new number. // We do assume that the new number has already been initialized, though. // strncpy is not used since it quits after seeing the first zero. __device__ void bignum_set_gpu(bignum * newnum, bignum * oldnum) { if ((oldnum->sig_digs) > 0L) { newnum->power = oldnum->power; newnum->sig_digs = ((oldnum->sig_digs > newnum->precision) ? (newnum->precision) : (oldnum->sig_digs)); long int i; for (i = 0L; i < newnum->sig_digs; i++) { newnum->digits[(int)i] = oldnum->digits[(int)i]; } } else { bignum_reset_gpu(newnum); } return; } // Adds two bignums together and stores the result. Uses the functions to // reset and set the location of the result internally, so current contents of // result operand will be overwritten. Like bignum_set_int, returns 1 if // addition was successful or 0 if an error occurred. A special shortcut is // taken if either (or both) of the operands are zero. Note that it is possible // for large additions to cause underflow to zero. In that case, special care is // taken to make sure the proper input operand is used. Note that we assume the // precision of all three operands is the same. If it's not, something terrible // like a seg fault or incorrect answer will probably occur. Most importantly, // the result operand CANNOT be the same as one of the input operands, since // the result is clobbered immediately and used as a scratchpad. Note that this // is also unsigned addition: not only does it not accept negative numbers, it // also doesn't do subtraction (which, for that matter, isn't commutative). __device__ int bignum_add_gpu(bignum * resultnum, bignum * leftnum, bignum * rightnum) { bignum_reset_gpu(resultnum); if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs > 0L)) { bignum_set_gpu(resultnum, rightnum); return 1; } else if ((rightnum->sig_digs == 0L) && (leftnum->sig_digs > 0L)) { bignum_set_gpu(resultnum, leftnum); return 1; } else if ((leftnum->sig_digs == 0L) && (rightnum->sig_digs == 0L)) { return 1; } else { // First check for overshift: if the larger number's power is too much // bigger than the smaller number's, the smaller will be completely lost, // and we'll just end up with the large number as the result. if ((((leftnum->power - rightnum->power) > 0) && ((leftnum->power - rightnum->power) > resultnum->precision))) { bignum_set_gpu(resultnum, leftnum); return 1; } if ((((rightnum->power - leftnum->power) > 0) && ((rightnum->power - leftnum->power) > resultnum->precision))) { bignum_set_gpu(resultnum, rightnum); return 1; } // Next, shift the smaller operand to match the larger one by copying // it into the result operand as a partial sum. Also copy over the // power and total significant digits into the result. bignum * bigger; bignum * smaller; if ((leftnum->power - rightnum->power) >= 0L) { bigger = leftnum; smaller = rightnum; } else { bigger = rightnum; smaller = leftnum; } long int difference = bigger->power - smaller->power; long int startdigit = smaller->sig_digs + difference; long int transfertotal = smaller->sig_digs; if (startdigit > resultnum->precision) { startdigit = resultnum->precision - difference; transfertotal = startdigit; } long int startdigitcopy = startdigit; startdigit--; long int i; for (i = 0L; i < transfertotal; i++) { if ((startdigit - difference) >= 0L) { resultnum->digits[(int)startdigit] = smaller->digits[(int)(startdigit - difference)]; } startdigit--; } // Now the main addition loop: loop through each digit and add it. // The carry from the previous digit will add to the current one. // Note that we detect any trailing zeros to take from the sig_digs. // Also, copy over the power and significant digits resultnum->power = bigger->power; resultnum->sig_digs = startdigitcopy; if (bigger->sig_digs > resultnum->sig_digs) { resultnum->sig_digs = bigger->sig_digs; startdigitcopy = resultnum->sig_digs; } int trailingzeros = 1; long int zerocount = 0L; char carry = 0; for (i = 0L; i < resultnum->sig_digs; i++) { resultnum->digits[(int)(startdigitcopy - i - 1L)] += (bigger->digits[(int)(startdigitcopy - i - 1L)] + carry); if (resultnum->digits[(int)(startdigitcopy - i - 1L)] >= 10) { resultnum->digits[(int)(startdigitcopy - i - 1L)] -= 10; carry = 1; } else { carry = 0; } if (trailingzeros == 1) { if (resultnum->digits[(int)(startdigitcopy - i - 1L)] == '\0') { zerocount++; } else { trailingzeros = 0; } } } // If we've got trailing zeros, subtract them from the final count of // sig_digs. Also, if we have a carry, we need to shift everything... resultnum->sig_digs -= zerocount; if (carry > 0) { transfertotal = resultnum->sig_digs; if (transfertotal == resultnum->precision) { transfertotal--; } startdigitcopy = transfertotal - 1L; for (i = 0L; i < transfertotal; i++) { if (startdigitcopy >= 0L) { resultnum->digits[(int)(startdigitcopy + 1L)] = resultnum->digits[(int)startdigitcopy]; } else if ((startdigitcopy + 1L) >= 0L) { resultnum->digits[(int)(startdigitcopy + 1L)] = '\0'; } startdigitcopy--; } resultnum->digits[0] = carry; resultnum->power++; resultnum->sig_digs++; } if (resultnum->sig_digs > resultnum->precision) { resultnum->sig_digs = resultnum->precision; } return 1; } } // A convenience wrapper that temporarily creates a new bignum out of the // given integer, calls bignum_add with it and the other operand, and deletes // the temporary bignum before exiting. Any problems that bignum_add encounters // are passed back up through this function and returned to the caller. __device__ int bignum_add_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset_gpu(resultnum); if ((rightint == 0L) && (leftnum->sig_digs > 0L)) { bignum_set_gpu(resultnum, leftnum); return 1; } else if ((leftnum->sig_digs == 0L) && (rightint > 0L)) { return bignum_set_int_gpu(resultnum, rightint); } else if ((leftnum->sig_digs == 0L) && (rightint == 0L)) { return 1; } else { bignum * tempnum = bignum_init_gpu(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int_gpu(tempnum, rightint) == 0) { bignum_clear_gpu(tempnum); return 0; } int retval = bignum_add_gpu(resultnum, leftnum, tempnum); bignum_clear_gpu(tempnum); return retval; } } // Multiplies two bignums together and stores the result. Like add, uses // functions to reset and set the location of the result, and returns 1 upon // success or 0 if an error occurred. A special shortcut is taken if either // operand is zero, since the result will thus also be zero. Note that we assume // the precision of all three operands is the same. If it's not, something // terrible like a seg fault or incorrect answer will probably occur. Most // importantly, the result operand CANNOT be the same as one of the input // operands, since the result is clobbered immediately and used as a scratchpad. // Also, note that this is unsigned: it assumes both operands are positive. __device__ int bignum_mult_gpu(bignum * resultnum, bignum * leftnum, bignum * rightnum) { bignum_reset_gpu(resultnum); if ((leftnum->sig_digs == 0L) || (rightnum->sig_digs == 0L)) { return 1; } else { // Initialize the scratchpad and find the digit limits char * temp_word = (char *)malloc((int)(2L * (resultnum->precision) * sizeof(char))); if (temp_word == 0) { return 0; } long int i; for (i = 0; i < (2L * resultnum->precision); i++) { temp_word[(int)i] = '\0'; } bignum * bigger; bignum * smaller; if (((signed long int)leftnum->sig_digs - (signed long int)rightnum->sig_digs) >= 0L) { bigger = leftnum; smaller = rightnum; } else if ((rightnum->sig_digs - leftnum->sig_digs) > 0L) { bigger = rightnum; smaller = leftnum; } long int bigstart = (bigger->sig_digs) - 1L; long int smallstart = (smaller->sig_digs) - 1L; long int bigcounter, smallcounter; char carry = 0; // Perform the shift-addition loop. We choose to loop over each // digit of the smaller number for fewer overall iterations. If // the current bigloop has a zero, we can just skip that iteration. // Also, record the final carry, power, and sig_digs values. for (bigcounter = 0L; bigcounter < (smaller->sig_digs); bigcounter++) { if (smaller->digits[(int)(smallstart - bigcounter)] != '\0') { carry = 0; for (smallcounter = 0L; smallcounter < (bigger->sig_digs); smallcounter++) { temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] += (carry + (smaller->digits[(int)(smallstart - bigcounter)] * bigger->digits[(int)(bigstart - smallcounter)])); carry = temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] / 10; temp_word[(int)((2L * (resultnum->precision)) - smallcounter - bigcounter - 1L)] %= 10; } temp_word[(int)((2L * (resultnum->precision)) - bigcounter - (bigger->sig_digs) - 1L)] = carry; } } resultnum->power = ((bigger->power) + (smaller->power)); resultnum->sig_digs = ((bigger->sig_digs) + (smaller->sig_digs)); // Adjust for lack of a final carry or trailing zeros. if (carry < 1) { (resultnum->sig_digs)--; (resultnum->power)--; } (resultnum->power)++; int trailingzeros = 1; long int zerocount = 0L; i = (2L * (resultnum->precision) - 1L); while (trailingzeros == 1) { if (temp_word[(int)i] == '\0') { zerocount++; } else { trailingzeros = 0; } i--; } resultnum->sig_digs -= zerocount; if ((resultnum->sig_digs) > (resultnum->precision)) { resultnum->sig_digs = (resultnum->precision); } // Finally, copy from the temp word into the result, taking into // account any digits we may lose due to precision. long int tempstart = (2L * (resultnum->precision)) - ((bigger->sig_digs) + (smaller->sig_digs)); if (carry < 1) { tempstart++; } for (i = 0L; i < (resultnum->sig_digs); i++) { resultnum->digits[(int)i] = temp_word[(int)(tempstart + i)]; } free(temp_word); return 1; } } // Like bignum_add_int, a convenience wrapper that creates a temporary bignum // out of the integer and passes it to bignum_mult. Any problems encountered // in client functions are passed back up to the original caller. __device__ int bignum_mult_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset_gpu(resultnum); if ((leftnum->sig_digs == 0L) || (rightint == 0L)) { return 1; } else { bignum * tempnum = bignum_init_gpu(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int_gpu(tempnum, rightint) == 0) { bignum_clear_gpu(tempnum); return 0; } int retval = bignum_mult_gpu(resultnum, leftnum, tempnum); bignum_clear_gpu(tempnum); return retval; } } // Divides two bignums. Taken in terms of a fraction, leftnum is the numerator // and rightnum is the denominator. Performs an explicit check to make sure // the denominator is not zero, and returns 0 (an error) if it is. Returns 1 upon // success or 0 if an error occurs. A special shortcut is taken if the numerator is // zero. Note that we assume the precision of all three operands is the same. If it's // not, something terrible like a seg fault or incorrect answer will probably occur. // Most importantly, the result operand CANNOT be the same as one of the input // operands, since the result is clobbered immediately and used as a scratchpad. // Also, note that this is unsigned: it assumes both operands are positive. __device__ int bignum_divide_gpu(bignum * resultnum, bignum * numerator, bignum * denominator) { bignum_reset_gpu(resultnum); if (denominator->sig_digs == 0L) { return 0; } else if (numerator->sig_digs == 0L) { return 1; } else { // Initialize the scratchpad and initially copy the numerator into it. // Also initialize the result's power. char * temp_word = (char *)malloc((int)(2L * (resultnum->precision) + 2L * sizeof(char))); // May only need to be + 1L if (temp_word == 0) { return 0; } long int i; temp_word[0] = '\0'; for (i = 0L; i < numerator->sig_digs; i++) { temp_word[(int)(i + 1L)] = numerator->digits[(int)i]; } for (i = (1L + numerator->sig_digs); i < (2L * resultnum->precision + 2L); i++) { temp_word[(int)i] = '\0'; } resultnum->power = (numerator->power - denominator->power); long int sigdigctr = 0L; long int numeratorindex = 0L; // First see if we need to "shift" the numerator by comparing it. i = ((denominator->sig_digs) - 1L); int denom_bigger = 1; while ((i >= 0L) && (denom_bigger == 1)) { if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) > (temp_word[(int)((denominator->sig_digs) - i)])) { i = 0L; } else if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) < (temp_word[(int)((denominator->sig_digs) - i)])) { denom_bigger = 0; } else if (((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) == (temp_word[(int)((denominator->sig_digs) - i)])) && (i == 0L)) { denom_bigger = 0; } i--; } if (denom_bigger == 1) { numeratorindex++; (resultnum->power)--; } // Now the main division loop. Note that there's two ways to terminate: // either we've filled the entire precision of the result word and are // forced to truncate our result, or our answer divides exactly. In the // second case, once we've exhausted the numerator's significant digits // and our temp word contains nothing but zeros, we can end early since // all subsequent iterations would contribute only zeros as well. Note // that special care will be taken to detect extra zeros at the end of // the result so that the sig_digs is recorded correctly. Also, we don't // round, we truncate, which doesn't minimize error. int nonzero = 1; while ((sigdigctr < (resultnum->precision)) && (nonzero == 1)) { // First run the subtraction loop. char current_digit = 0; int numer_bigger = 1; while (numer_bigger == 1) { // To subtract, first run a comparison to see if the numerator // is bigger. If it is, increment the counter and subtract. i = ((denominator->sig_digs) - 1L); denom_bigger = 1; if (temp_word[(int)numeratorindex] > 0) { denom_bigger = 0; } while ((i >= 0L) && (denom_bigger == 1)) { if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) > (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) { i = 0L; } else if ((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) < (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) { denom_bigger = 0; } else if (((denominator->digits[(int)((denominator->sig_digs) - i - 1L)]) == (temp_word[(int)((denominator->sig_digs) + numeratorindex - i)])) && (i == 0L)) { denom_bigger = 0; } i--; } if (denom_bigger == 1) { numer_bigger = 0; } // Increment counter and perform subtraction loop. if (numer_bigger == 1) { current_digit++; for (i = 0L; i < (denominator->sig_digs); i++) { temp_word[(int)((denominator->sig_digs) + numeratorindex - i)] -= (denominator->digits[ (int)((denominator->sig_digs) - i - 1L)]); if ((temp_word[(int)((denominator->sig_digs) + numeratorindex - i)]) < 0) { temp_word[(int)((denominator->sig_digs) + numeratorindex - i)] += 10L; (temp_word[(int)((denominator->sig_digs) + numeratorindex - i - 1L)]) -= 1L; } } } } // If we're past all of the numerator's significant digits, run // zero detection on it to see if we can end early. if (sigdigctr > (numerator->sig_digs)) { // May only need to be >= long int zerocounter = 0L; i = 0L; while ((i == zerocounter) && (i <= (denominator->sig_digs))) { if ((temp_word[(int)(numeratorindex + i)]) < 1) { zerocounter++; } i++; } if (zerocounter == ((denominator->sig_digs) + 1L)) { nonzero = 0; } } // Once we have obtained the proper digit in the result, save it. if (sigdigctr < resultnum->precision) { resultnum->digits[(int)sigdigctr] = current_digit; } sigdigctr++; numeratorindex++; } // Record the result's sig digs, taking care to detect trailing zeros. resultnum->sig_digs = sigdigctr; int trailingzeros = 1; long int zerocount = 0L; i = sigdigctr - 1L; while (trailingzeros == 1) { if (resultnum->digits[(int)i] == '\0') { zerocount++; } else { trailingzeros = 0; } i--; } (resultnum->sig_digs) -= zerocount; free(temp_word); return 1; } } // A convenience wrapper that creates a temporary bignum out of the integer. // Since division is not commutative, two wrappers are given. Any problems // encountered in client functions are passed back up to the original caller. __device__ int bignum_int_divide_gpu(bignum * resultnum, long int leftint, bignum * rightnum) { bignum_reset_gpu(resultnum); if (rightnum->sig_digs == 0L) { return 0; } else if (leftint == 0L) { return 1; } else { bignum * tempnum = bignum_init_gpu(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int_gpu(tempnum, leftint) == 0) { bignum_clear_gpu(tempnum); return 0; } int retval = bignum_divide_gpu(resultnum, tempnum, rightnum); bignum_clear_gpu(tempnum); return retval; } } // A convenience wrapper that creates a temporary bignum out of the integer. // Since division is not commutative, two wrappers are given. Any problems // encountered in client functions are passed back up to the original caller. __device__ int bignum_divide_int_gpu(bignum * resultnum, bignum * leftnum, long int rightint) { bignum_reset_gpu(resultnum); if (rightint == 0L) { return 0; } else if (leftnum->sig_digs == 0L) { return 1; } else { bignum * tempnum = bignum_init_gpu(resultnum->precision); if (tempnum == 0) { return 0; } if (bignum_set_int_gpu(tempnum, rightint) == 0) { bignum_clear_gpu(tempnum); return 0; } int retval = bignum_divide_gpu(resultnum, leftnum, tempnum); bignum_clear_gpu(tempnum); return retval; } }
16427e54b7707702fa86be027023dafbd4fd017e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Code to do one dimensional spiking model from Mayte's note */ #include <iostream> #include <cstdlib> #include <cmath> #include "parameters.hpp" #include "hip/hip_vector_types.h" #define CUDA_ERROR_CHECK #define CUDA_CALL( err) __cudaCall( err, __FILE__, __LINE__ ) #define CURAND_CALL( err) __curandCall( err, __FILE__, __LINE__) inline void __cudaCall( hipError_t err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( hipSuccess != err ) { fprintf( stderr, "cudaCall() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK hipError_t err = hipGetLastError(); if ( hipSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, hipGetErrorString( err ) ); exit( -1 ); } #endif } struct firing { float time; unsigned int index; unsigned short cross; }; __device__ float fun1( float t, float v0, float n0, float u0, float y0) { return v0*expf(-t/tau) +(beta_left*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h) -(beta_left*gh*tau*expf(-t/tau))/(tau-tau_h) -(gh*n0*tau_h*expf(-t/tau_h))/(tau-tau_h) +(gh*n0*tau_h*expf(-t/tau))/(tau-tau_h) +(gs*expf(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+powf(alpha,2)*t*tau*y0))/powf(alpha*tau-1,2) -(gs*expf(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/powf(alpha*tau-1,2) -I+I*expf(-t/tau)-V_left; } __device__ float fun2( float t, float v0, float n0, float u0, float y0, float thresh) { return 1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(-v0+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(n0+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau) - thresh; } __device__ float fun3( float t, float v0, float n0, float u0, float y0, float thresh) { return v0*expf(-t/tau) +(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h) -(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h) -(gh*n0*tau_h*expf(-t/tau_h))/(tau-tau_h) +(gh*n0*tau_h*expf(-t/tau))/(tau-tau_h) +(gs*expf(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+powf(alpha,2)*t*tau*y0))/powf(alpha*tau-1,2) -(gs*expf(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/powf(alpha*tau-1,2) -I+I*expf(-t/tau) - thresh; } __device__ float dfun1( float t, float v0, float n0, float u0, float y0) { return (beta_right*gh*exp(-t/tau))/(tau-tau_h)-(v0*exp(-t/tau))/tau-(I*exp(-t/tau))/tau-(beta_right*gh*exp(-t/tau_h))/(tau-tau_h)+(gh*n0*exp(-t/tau_h))/(tau-tau_h)-(gs*exp(-alpha*t)*(-tau*y0*alpha*alpha+y0*alpha))/powf(alpha*tau-1.0f,2.0f)+(gs*exp(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/(tau*powf(alpha*tau-1.0f,2.0f))-(alpha*gs*exp(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+alpha*alpha*t*tau*y0))/powf(alpha*tau-1.0f,2.0f)-(gh*n0*tau_h*exp(-t/tau))/(tau*(tau-tau_h)); } __device__ float dfun2( float t, float v0, float n0, float u0, float y0) { return 1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(I*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-1.0/2.0)+(I*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(I*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(I*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/gamma_centre-(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)-alpha*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)-(alpha*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0))/tau+(I*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-alpha*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0))/tau-(gs*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau*tau_h)-(I*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau_h)-(beta_centre*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau)+(beta_centre*tau*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau_h*(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0))+(beta_centre*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0))+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau*tau_h)+(alpha*gs*tau_h*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(alpha*gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(alpha*gs*tau_h*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(alpha*gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(gs*tau*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(alpha*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(-v0+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-1.0/2.0)+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau_h+(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau_h+(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+I*gamma_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-gamma_centre*gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau_h+alpha*gamma_centre*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(beta_centre*tau*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau_h*(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(alpha*gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h+(tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau-(tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h-(tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(n0+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau); } __device__ float dfun3( float t, float v0, float n0, float u0, float y0) { return (beta_right*gh*exp(-t/tau))/(tau-tau_h)-(v0*exp(-t/tau))/tau-(I*exp(-t/tau))/tau-(beta_right*gh*exp(-t/tau_h))/(tau-tau_h)+(gh*n0*exp(-t/tau_h))/(tau-tau_h)-(gs*exp(-alpha*t)*(-tau*y0*alpha*alpha+y0*alpha))/powf(alpha*tau-1.0f,2.0f)+(gs*exp(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/(tau*powf(alpha*tau-1.0f,2.0f))-(alpha*gs*exp(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+alpha*alpha*t*tau*y0))/powf(alpha*tau-1.0f,2.0f)-(gh*n0*tau_h*exp(-t/tau))/(tau*(tau-tau_h)); } __device__ float eventTimeZone1( float v0, float n0, float u0, float y0) { float f, df, estimatedTime = 0.0f; f = fun1( estimatedTime, v0, n0, u0, y0); df = dfun1( estimatedTime, v0, n0, u0, y0); while (fabs(f)>tol) { estimatedTime -= f/df; f = fun1( estimatedTime, v0, n0, u0, y0); df = dfun1( estimatedTime, v0, n0, u0, y0); } return estimatedTime; } __device__ void eventTimeZone2( float v0, float n0, float u0, float y0, float *t, unsigned short *cross) { float f, df; float estimatedTimeLeft = 0.0f; float estimatedTimeRight = 0.0f; f = fun2( estimatedTimeLeft, v0, n0, u0, y0, V_left); df = dfun2( estimatedTimeLeft, v0, n0, u0, y0); while (fabs(f)>tol) { estimatedTimeLeft -= f/df; f = fun2( estimatedTimeLeft, v0, n0, u0, y0, V_left); df = dfun2( estimatedTimeLeft, v0, n0, u0, y0); } f = fun2( estimatedTimeRight, v0, n0, u0, y0, V_right); df = dfun2( estimatedTimeRight, v0, n0, u0, y0); while (fabs(f)>tol) { estimatedTimeRight -= f/df; f = fun2( estimatedTimeRight, v0, n0, u0, y0, V_right); df = dfun2( estimatedTimeRight, v0, n0, u0, y0); } *cross = 2; if (estimatedTimeRight<estimatedTimeLeft) { estimatedTimeLeft = estimatedTimeRight; *cross = 3; } *t = estimatedTimeLeft; } __device__ void eventTimeZone3( float v0, float n0, float u0, float y0, float *t, unsigned short *cross) { float f, df; float estimatedTimeLeft = 0.0f; float estimatedTimeRight = 0.0f; f = fun3( estimatedTimeLeft, v0, n0, u0, y0, V_right); df = dfun3( estimatedTimeLeft, v0, n0, u0, y0); while (fabs(f)>tol) { estimatedTimeLeft -= f/df; f = fun3( estimatedTimeLeft, v0, n0, u0, y0, V_right); df = dfun3( estimatedTimeLeft, v0, n0, u0, y0); } f = fun3( estimatedTimeRight, v0, n0, u0, y0, V_th); df = dfun3( estimatedTimeRight, v0, n0, u0, y0); while (fabs(f)>tol) { estimatedTimeRight -= f/df; f = fun3( estimatedTimeRight, v0, n0, u0, y0, V_th); df = dfun3( estimatedTimeRight, v0, n0, u0, y0); } *cross = 4; if (estimatedTimeRight<estimatedTimeLeft) { estimatedTimeLeft = estimatedTimeRight; *cross = 5; } *t = estimatedTimeLeft; } __global__ void eventTimeZone1Kernel( const float4* pGlobal_state, const unsigned short* pGlobalZone, struct firing* pVal) { unsigned int index = threadIdx.x+blockDim.x*blockIdx.x; bool correct_zone = (pGlobalZone[index] == 1); (*pVal).time = 100000.0f; if (correct_zone) { float4 local_state = pGlobal_state[index]; pVal[index].time = eventTimeZone1(local_state.x,local_state.y,local_state.z,local_state.w); pVal[index].index = index; pVal[index].cross = 1; } } __global__ void eventTimeZone2Kernel( const float4* pGlobal_state, const unsigned short* pGlobalZone, struct firing* pVal) { unsigned int index = threadIdx.x+blockDim.x*blockIdx.x; bool correct_zone = (pGlobalZone[index] == 2); float local_v = pGlobal_v[index]; float local_n = pGlobal_n[index]; float local_u = pGlobal_u[index]; float local_y = pGlobal_y[index]; float local_time = 1000000.0f; unsigned short cross; if (correct_zone) { eventTimeZone2(local_v,local_n,local_u,local_y,&local_time,&cross); pVal[index].time = local_time; pVal[index].index = index; pVal[index].cross = cross; } } __global__ void eventTimeZone3Kernel( const float* pGlobal_state, const unsigned short* pGlobalZone, struct firing* pVal) { unsigned int index = threadIdx.x+blockDim.x*blockIdx.x; bool correct_zone = (pGlobalZone[index] == 3); float local_v = pGlobal_v[index]; float local_n = pGlobal_n[index]; float local_u = pGlobal_u[index]; float local_y = pGlobal_y[index]; float local_time = 1000000.0f; unsigned short cross; if (correct_zone) { eventTimeZone3(local_v,local_n,local_u,local_y,&local_time,&cross); pVal[index].time = local_time; pVal[index].index = index; pVal[index].cross = cross; } } __global__ void eventTimeZone4Kernel( const float *pRefractTime, const unsigned short* pGlobalZone, struct firing* pVal) { unsigned int index = threadIdx.x+blockDim.x*blockIdx.x; bool correct_zone = (pGlobalZone[index] == 4); if (correct_zone) { pVal[index].time = tau_r-pRefractTime[index]; pVal[index].index = index; pVal[index].cross = 6; } } __inline__ __device__ struct firing warpReduceMin( struct firing val) { float dummyTime; unsigned int dummyIndex; unsigned short dummyCross; for (int offset = warpSize/2; offset>0; offset/=2) { dummyTime = __shfl_down( val.time, offset); dummyIndex = __shfl_down( val.index, offset); dummyCross = __shfl_down( val.cross, offset); if (dummyTime<val.time) { val.time = dummyTime; val.index = dummyIndex; val.cross = dummyCross; } } return val; } __inline__ __device__ struct firing blockReduceMin( struct firing val) { __shared__ struct EventDrivenMap::firing shared[32]; int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceMin( val); if (lane==0) { shared[wid] = val; } __syncthreads(); val.time = (threadIdx.x<blockDim.x/warpSize) ? shared[lane].time : 0.0f; val.index = (threadIdx.x<blockDim.x/warpSize) ? shared[lane].index : 0; val.cross = (threadIdx.x<blockDim.x/warpSize) ? shared[lane].cross : 0; if (wid==0) { val = warpReduceMin( val); } return val; } __global__ void deviceReduceMinKernel( const struct firing* in, const unsigned int npts, struct firing* out); { float time = 1000000.0f struct firing dummy; //reduce multiple elements per thread for (int i=blockIdx.x*blockDim.x+threadIdx.x;i<npts;i+=blockDim.x*gridDim.x) { dummy = in[i]; if (dummy.time < time) { val = dummy; time = dummy.time; } } val = blockReduceMin( val); if (threadIdx.x==0) { out[blockIdx.x] = val; } } __global__ void updateZone1Kernel( float* p_global_v, float* p_global_n, float* p_global_u, float* p_global_y, float eventTime) { unsigned int index = threadIdx.x+blockDim.x*blockIdx.x; bool correct_zone = (pGlobalZone[index] == 1); if (correct_zone) { } } int main( int argc , char *argv[]) { // Allocate memory float4* p_global_state; float* p_refract_time; struct firing* p_firing_val; struct firing* p_firing_val_temp; unsigned short* p_global_zone; unsigned int noThreads = 512; unsigned int noBlocks = (N+noThreads-1)/noThreads; // Allocate memory CUDA_CALL( hipMalloc( &p_global_state, N*sizeof(float4))); CUDA_CALL( hipMalloc( &p_refract_time, N*sizeof(float))); CUDA_CALL( hipMalloc( &p_global_zone, N*sizeof(short))); CUDA_CALL( hipMalloc( &p_firing_val, N*sizeof(firing))); CUDA_CALL( hipMalloc( &p_firing_val_temp, noBlocks*sizeof(firing))); hipLaunchKernelGGL(( InitialiseKernel), dim3(noBlocks),dim3(noThreads), 0, 0, p_global_v, p_global_n, p_global_u, p_global_y); float finalTime = 100.0f; float currentTime = 0.0f; // use pinned memory for this while (currentTime<finalTime) { hipLaunchKernelGGL(( eventTimeZone1Kernel), dim3(noBlocks),dim3(noThreads), 0, 0, p_global_state, p_global_zone, p_firing_val); hipLaunchKernelGGL(( eventTimeZone2Kernel), dim3(noBlocks),dim3(noThreads), 0, 0, p_global_state, p_global_zone, p_firing_val); hipLaunchKernelGGL(( eventTimeZone3Kernel), dim3(noBlocks),dim3(noThreads), 0, 0, p_global_state, p_global_zone, p_firing_val); hipLaunchKernelGGL(( eventTimeZone4Kernel), dim3(noBlocks),dim3(noThreads), 0, 0, p_refract_time, p_global_zone, p_firing_val); // Find minimum spike time hipLaunchKernelGGL(( deviceReduceMinKernel), dim3(noBlocks),dim3(noThreads), 0, 0, p_firing_val, N, p_firing_val_temp); hipLaunchKernelGGL(( deviceReduceMinKernel), dim3(1),dim3(noThreads), 0, 0, p_firing_val_temp, noBlocks, p_firing_val_temp); // Update - assume transfer to page-locked memory hipLaunchKernelGGL(( updateZone1Kernel), dim3(noBlocks),dim3(noThreads), 0, 0, if (crossType!= } hipFree( global_v); hipFree( global_n); hipFree( global_u); hipFree( global_y); }
16427e54b7707702fa86be027023dafbd4fd017e.cu
/* Code to do one dimensional spiking model from Mayte's note */ #include <iostream> #include <cstdlib> #include <cmath> #include "parameters.hpp" #include "vector_types.h" #define CUDA_ERROR_CHECK #define CUDA_CALL( err) __cudaCall( err, __FILE__, __LINE__ ) #define CURAND_CALL( err) __curandCall( err, __FILE__, __LINE__) inline void __cudaCall( cudaError err, const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK if ( cudaSuccess != err ) { fprintf( stderr, "cudaCall() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif return; } inline void __cudaCheckError( const char *file, const int line ) { #ifdef CUDA_ERROR_CHECK cudaError err = cudaGetLastError(); if ( cudaSuccess != err ) { fprintf( stderr, "cudaCheckError() failed at %s:%i : %s\n", file, line, cudaGetErrorString( err ) ); exit( -1 ); } #endif } struct firing { float time; unsigned int index; unsigned short cross; }; __device__ float fun1( float t, float v0, float n0, float u0, float y0) { return v0*expf(-t/tau) +(beta_left*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h) -(beta_left*gh*tau*expf(-t/tau))/(tau-tau_h) -(gh*n0*tau_h*expf(-t/tau_h))/(tau-tau_h) +(gh*n0*tau_h*expf(-t/tau))/(tau-tau_h) +(gs*expf(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+powf(alpha,2)*t*tau*y0))/powf(alpha*tau-1,2) -(gs*expf(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/powf(alpha*tau-1,2) -I+I*expf(-t/tau)-V_left; } __device__ float fun2( float t, float v0, float n0, float u0, float y0, float thresh) { return 1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(-v0+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(n0+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau) - thresh; } __device__ float fun3( float t, float v0, float n0, float u0, float y0, float thresh) { return v0*expf(-t/tau) +(beta_right*gh*(tau-tau_h+tau_h*expf(-t/tau_h)))/(tau-tau_h) -(beta_right*gh*tau*expf(-t/tau))/(tau-tau_h) -(gh*n0*tau_h*expf(-t/tau_h))/(tau-tau_h) +(gh*n0*tau_h*expf(-t/tau))/(tau-tau_h) +(gs*expf(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+powf(alpha,2)*t*tau*y0))/powf(alpha*tau-1,2) -(gs*expf(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/powf(alpha*tau-1,2) -I+I*expf(-t/tau) - thresh; } __device__ float dfun1( float t, float v0, float n0, float u0, float y0) { return (beta_right*gh*exp(-t/tau))/(tau-tau_h)-(v0*exp(-t/tau))/tau-(I*exp(-t/tau))/tau-(beta_right*gh*exp(-t/tau_h))/(tau-tau_h)+(gh*n0*exp(-t/tau_h))/(tau-tau_h)-(gs*exp(-alpha*t)*(-tau*y0*alpha*alpha+y0*alpha))/powf(alpha*tau-1.0f,2.0f)+(gs*exp(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/(tau*powf(alpha*tau-1.0f,2.0f))-(alpha*gs*exp(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+alpha*alpha*t*tau*y0))/powf(alpha*tau-1.0f,2.0f)-(gh*n0*tau_h*exp(-t/tau))/(tau*(tau-tau_h)); } __device__ float dfun2( float t, float v0, float n0, float u0, float y0) { return 1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)))*(I*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-1.0/2.0)+(I*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(I*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau+(I*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/gamma_centre-(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)-alpha*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)-(alpha*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0))/tau+(I*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-alpha*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0))/tau-(gs*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau*tau_h)-(I*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau_h)-(beta_centre*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau)+(beta_centre*tau*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau_h*(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0))+(beta_centre*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0))+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/4.0))/(gamma_centre*tau*tau_h)+(alpha*gs*tau_h*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(alpha*gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(alpha*gs*tau_h*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(alpha*gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(gs*tau*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(alpha*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(-1.0/2.0)+1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(-v0+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*-2.0+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)+(I*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(I*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*(tau_h*tau_h)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(-gamma_centre*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+gamma_centre*tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(gs*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0)+(alpha*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*(1.0/2.0))/tau+(I*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(I*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gs*(tau_h*tau_h)*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))-(beta_centre*(tau_h*tau_h)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau-(alpha*gs*tau_h*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau+(gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gs*(tau_h*tau_h)*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(gamma_centre*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))+(gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/2.0)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*((tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau*tau)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))+(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-(tau_h*tau_h)*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0+tau*tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*2.0)*(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(-1.0/2.0)+(beta_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau_h+(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(1.0/2.0))/tau_h+(beta_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+I*gamma_centre*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-gamma_centre*gs*u0*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/tau_h+alpha*gamma_centre*gs*y0*((exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(beta_centre*tau*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau_h*(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(alpha*exp(alpha*t)-(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)+(alpha*gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau)+(1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h+(tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau-(tau*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau_h-(tau_h*exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/tau+(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)-t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h)-(exp((t*tau*(-1.0/2.0)-t*tau_h*(1.0/2.0)+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*(1.0/2.0))/(tau*tau_h))*(n0+(beta_centre*tau*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+(beta_centre*tau*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*(tau*tau)*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0))/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(beta_centre*(tau*tau)*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(beta_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))+alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0+tau*tau_h*exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(t*tau+t*tau_h-tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*gamma_centre*gs*y0*((tau*tau)*(tau_h*tau_h)*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*4.0-tau*tau_h*exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))*1.0/pow(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0,2.0)*(-t*tau-t*tau_h+tau*tau_h*2.0+t*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+alpha*t*tau*tau_h*2.0)*2.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*2.0)/(tau*tau_h*2.0+tau*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)+tau_h*sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-tau*tau-tau_h*tau_h-gamma_centre*gh*tau*tau_h*4.0)+(I*gamma_centre*tau*tau_h*(exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))-(gamma_centre*gs*tau*tau_h*u0*(exp((t*(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)*(1.0/2.0))/(tau*tau_h))-1.0)*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h-sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0)-(gamma_centre*gs*tau*tau_h*u0*exp(-alpha*t)*(exp(alpha*t)-exp((t*(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0))*(1.0/2.0))/(tau*tau_h)))*1.0/sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)*2.0)/(tau+tau_h+sqrt(tau*tau_h*-2.0+tau*tau+tau_h*tau_h+gamma_centre*gh*tau*tau_h*4.0)-alpha*tau*tau_h*2.0))*(1.0/4.0))/(gamma_centre*tau); } __device__ float dfun3( float t, float v0, float n0, float u0, float y0) { return (beta_right*gh*exp(-t/tau))/(tau-tau_h)-(v0*exp(-t/tau))/tau-(I*exp(-t/tau))/tau-(beta_right*gh*exp(-t/tau_h))/(tau-tau_h)+(gh*n0*exp(-t/tau_h))/(tau-tau_h)-(gs*exp(-alpha*t)*(-tau*y0*alpha*alpha+y0*alpha))/powf(alpha*tau-1.0f,2.0f)+(gs*exp(-t/tau)*(alpha*tau*u0-u0+alpha*tau*y0))/(tau*powf(alpha*tau-1.0f,2.0f))-(alpha*gs*exp(-alpha*t)*(alpha*tau*u0-u0-alpha*t*y0+alpha*tau*y0+alpha*alpha*t*tau*y0))/powf(alpha*tau-1.0f,2.0f)-(gh*n0*tau_h*exp(-t/tau))/(tau*(tau-tau_h)); } __device__ float eventTimeZone1( float v0, float n0, float u0, float y0) { float f, df, estimatedTime = 0.0f; f = fun1( estimatedTime, v0, n0, u0, y0); df = dfun1( estimatedTime, v0, n0, u0, y0); while (fabs(f)>tol) { estimatedTime -= f/df; f = fun1( estimatedTime, v0, n0, u0, y0); df = dfun1( estimatedTime, v0, n0, u0, y0); } return estimatedTime; } __device__ void eventTimeZone2( float v0, float n0, float u0, float y0, float *t, unsigned short *cross) { float f, df; float estimatedTimeLeft = 0.0f; float estimatedTimeRight = 0.0f; f = fun2( estimatedTimeLeft, v0, n0, u0, y0, V_left); df = dfun2( estimatedTimeLeft, v0, n0, u0, y0); while (fabs(f)>tol) { estimatedTimeLeft -= f/df; f = fun2( estimatedTimeLeft, v0, n0, u0, y0, V_left); df = dfun2( estimatedTimeLeft, v0, n0, u0, y0); } f = fun2( estimatedTimeRight, v0, n0, u0, y0, V_right); df = dfun2( estimatedTimeRight, v0, n0, u0, y0); while (fabs(f)>tol) { estimatedTimeRight -= f/df; f = fun2( estimatedTimeRight, v0, n0, u0, y0, V_right); df = dfun2( estimatedTimeRight, v0, n0, u0, y0); } *cross = 2; if (estimatedTimeRight<estimatedTimeLeft) { estimatedTimeLeft = estimatedTimeRight; *cross = 3; } *t = estimatedTimeLeft; } __device__ void eventTimeZone3( float v0, float n0, float u0, float y0, float *t, unsigned short *cross) { float f, df; float estimatedTimeLeft = 0.0f; float estimatedTimeRight = 0.0f; f = fun3( estimatedTimeLeft, v0, n0, u0, y0, V_right); df = dfun3( estimatedTimeLeft, v0, n0, u0, y0); while (fabs(f)>tol) { estimatedTimeLeft -= f/df; f = fun3( estimatedTimeLeft, v0, n0, u0, y0, V_right); df = dfun3( estimatedTimeLeft, v0, n0, u0, y0); } f = fun3( estimatedTimeRight, v0, n0, u0, y0, V_th); df = dfun3( estimatedTimeRight, v0, n0, u0, y0); while (fabs(f)>tol) { estimatedTimeRight -= f/df; f = fun3( estimatedTimeRight, v0, n0, u0, y0, V_th); df = dfun3( estimatedTimeRight, v0, n0, u0, y0); } *cross = 4; if (estimatedTimeRight<estimatedTimeLeft) { estimatedTimeLeft = estimatedTimeRight; *cross = 5; } *t = estimatedTimeLeft; } __global__ void eventTimeZone1Kernel( const float4* pGlobal_state, const unsigned short* pGlobalZone, struct firing* pVal) { unsigned int index = threadIdx.x+blockDim.x*blockIdx.x; bool correct_zone = (pGlobalZone[index] == 1); (*pVal).time = 100000.0f; if (correct_zone) { float4 local_state = pGlobal_state[index]; pVal[index].time = eventTimeZone1(local_state.x,local_state.y,local_state.z,local_state.w); pVal[index].index = index; pVal[index].cross = 1; } } __global__ void eventTimeZone2Kernel( const float4* pGlobal_state, const unsigned short* pGlobalZone, struct firing* pVal) { unsigned int index = threadIdx.x+blockDim.x*blockIdx.x; bool correct_zone = (pGlobalZone[index] == 2); float local_v = pGlobal_v[index]; float local_n = pGlobal_n[index]; float local_u = pGlobal_u[index]; float local_y = pGlobal_y[index]; float local_time = 1000000.0f; unsigned short cross; if (correct_zone) { eventTimeZone2(local_v,local_n,local_u,local_y,&local_time,&cross); pVal[index].time = local_time; pVal[index].index = index; pVal[index].cross = cross; } } __global__ void eventTimeZone3Kernel( const float* pGlobal_state, const unsigned short* pGlobalZone, struct firing* pVal) { unsigned int index = threadIdx.x+blockDim.x*blockIdx.x; bool correct_zone = (pGlobalZone[index] == 3); float local_v = pGlobal_v[index]; float local_n = pGlobal_n[index]; float local_u = pGlobal_u[index]; float local_y = pGlobal_y[index]; float local_time = 1000000.0f; unsigned short cross; if (correct_zone) { eventTimeZone3(local_v,local_n,local_u,local_y,&local_time,&cross); pVal[index].time = local_time; pVal[index].index = index; pVal[index].cross = cross; } } __global__ void eventTimeZone4Kernel( const float *pRefractTime, const unsigned short* pGlobalZone, struct firing* pVal) { unsigned int index = threadIdx.x+blockDim.x*blockIdx.x; bool correct_zone = (pGlobalZone[index] == 4); if (correct_zone) { pVal[index].time = tau_r-pRefractTime[index]; pVal[index].index = index; pVal[index].cross = 6; } } __inline__ __device__ struct firing warpReduceMin( struct firing val) { float dummyTime; unsigned int dummyIndex; unsigned short dummyCross; for (int offset = warpSize/2; offset>0; offset/=2) { dummyTime = __shfl_down( val.time, offset); dummyIndex = __shfl_down( val.index, offset); dummyCross = __shfl_down( val.cross, offset); if (dummyTime<val.time) { val.time = dummyTime; val.index = dummyIndex; val.cross = dummyCross; } } return val; } __inline__ __device__ struct firing blockReduceMin( struct firing val) { __shared__ struct EventDrivenMap::firing shared[32]; int lane = threadIdx.x % warpSize; int wid = threadIdx.x / warpSize; val = warpReduceMin( val); if (lane==0) { shared[wid] = val; } __syncthreads(); val.time = (threadIdx.x<blockDim.x/warpSize) ? shared[lane].time : 0.0f; val.index = (threadIdx.x<blockDim.x/warpSize) ? shared[lane].index : 0; val.cross = (threadIdx.x<blockDim.x/warpSize) ? shared[lane].cross : 0; if (wid==0) { val = warpReduceMin( val); } return val; } __global__ void deviceReduceMinKernel( const struct firing* in, const unsigned int npts, struct firing* out); { float time = 1000000.0f struct firing dummy; //reduce multiple elements per thread for (int i=blockIdx.x*blockDim.x+threadIdx.x;i<npts;i+=blockDim.x*gridDim.x) { dummy = in[i]; if (dummy.time < time) { val = dummy; time = dummy.time; } } val = blockReduceMin( val); if (threadIdx.x==0) { out[blockIdx.x] = val; } } __global__ void updateZone1Kernel( float* p_global_v, float* p_global_n, float* p_global_u, float* p_global_y, float eventTime) { unsigned int index = threadIdx.x+blockDim.x*blockIdx.x; bool correct_zone = (pGlobalZone[index] == 1); if (correct_zone) { } } int main( int argc , char *argv[]) { // Allocate memory float4* p_global_state; float* p_refract_time; struct firing* p_firing_val; struct firing* p_firing_val_temp; unsigned short* p_global_zone; unsigned int noThreads = 512; unsigned int noBlocks = (N+noThreads-1)/noThreads; // Allocate memory CUDA_CALL( cudaMalloc( &p_global_state, N*sizeof(float4))); CUDA_CALL( cudaMalloc( &p_refract_time, N*sizeof(float))); CUDA_CALL( cudaMalloc( &p_global_zone, N*sizeof(short))); CUDA_CALL( cudaMalloc( &p_firing_val, N*sizeof(firing))); CUDA_CALL( cudaMalloc( &p_firing_val_temp, noBlocks*sizeof(firing))); InitialiseKernel<<<noBlocks,noThreads>>>(p_global_v, p_global_n, p_global_u, p_global_y); float finalTime = 100.0f; float currentTime = 0.0f; // use pinned memory for this while (currentTime<finalTime) { eventTimeZone1Kernel<<<noBlocks,noThreads>>> (p_global_state, p_global_zone, p_firing_val); eventTimeZone2Kernel<<<noBlocks,noThreads>>> (p_global_state, p_global_zone, p_firing_val); eventTimeZone3Kernel<<<noBlocks,noThreads>>> (p_global_state, p_global_zone, p_firing_val); eventTimeZone4Kernel<<<noBlocks,noThreads>>> ( p_refract_time, p_global_zone, p_firing_val); // Find minimum spike time deviceReduceMinKernel<<<noBlocks,noThreads>>> ( p_firing_val, N, p_firing_val_temp); deviceReduceMinKernel<<<1,noThreads>>> ( p_firing_val_temp, noBlocks, p_firing_val_temp); // Update - assume transfer to page-locked memory updateZone1Kernel<<<noBlocks,noThreads>>> ( if (crossType!= } cudaFree( global_v); cudaFree( global_n); cudaFree( global_u); cudaFree( global_y); }
bb2656164457b2bf8ff1c0b4624a0b4444bf0d40.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "KerSortDataParticles.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned n = XSIZE*YSIZE; unsigned pini = 1; const unsigned *sortpart = NULL; hipMalloc(&sortpart, XSIZE*YSIZE); const float4 *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float4 *a2 = NULL; hipMalloc(&a2, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( KerSortDataParticles), dim3(gridBlock),dim3(threadBlock), 0, 0, n,pini,sortpart,a,a2); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( KerSortDataParticles), dim3(gridBlock),dim3(threadBlock), 0, 0, n,pini,sortpart,a,a2); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( KerSortDataParticles), dim3(gridBlock),dim3(threadBlock), 0, 0, n,pini,sortpart,a,a2); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
bb2656164457b2bf8ff1c0b4624a0b4444bf0d40.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "KerSortDataParticles.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; unsigned n = XSIZE*YSIZE; unsigned pini = 1; const unsigned *sortpart = NULL; cudaMalloc(&sortpart, XSIZE*YSIZE); const float4 *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float4 *a2 = NULL; cudaMalloc(&a2, XSIZE*YSIZE); int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); KerSortDataParticles<<<gridBlock,threadBlock>>>(n,pini,sortpart,a,a2); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { KerSortDataParticles<<<gridBlock,threadBlock>>>(n,pini,sortpart,a,a2); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { KerSortDataParticles<<<gridBlock,threadBlock>>>(n,pini,sortpart,a,a2); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
96427ab62d8b9acaeb3360786bc2fdd8c0b1bb5b.hip
// !!! This is a file automatically generated by hipify!!! #include "ATen/ATen.h" #include "ATen/hip/HIPContext.h" #include "ATen/native/hip/Resize.cuh" namespace at { namespace native { Tensor& resize_cuda_(Tensor& self, IntList size) { auto* self_ = self.unsafeGetTensorImpl(); resize_impl_cuda_(self_, size, /*strides=*/c10::nullopt); self_->maybe_zero_dim(size.size() == 0); return self; } }}
96427ab62d8b9acaeb3360786bc2fdd8c0b1bb5b.cu
#include "ATen/ATen.h" #include "ATen/cuda/CUDAContext.h" #include "ATen/native/cuda/Resize.cuh" namespace at { namespace native { Tensor& resize_cuda_(Tensor& self, IntList size) { auto* self_ = self.unsafeGetTensorImpl(); resize_impl_cuda_(self_, size, /*strides=*/c10::nullopt); self_->maybe_zero_dim(size.size() == 0); return self; } }}
edb2fde83fb07a6a6eb03f326469da81034daff6.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 2015-2016 NVIDIA Corporation. All rights reserved. * * Sample to demonstrate use of NVlink CUPTI APIs * * This version is significantly changed to use PAPI and the CUDA component to * handle access and reporting. As of 10/05/2018, I have deleted all CUPTI_ONLY * references, for clarity. The file nvlink_bandwidth_cupti_only.cu contains * the cupti-only code. I also deleted the #if PAPI; there is no option * without PAPI. Also, before my changes, the makefile did not even have a * build option that set CUPTI_ONLY for this file. * * -TonyC. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <hip/hip_runtime.h> #include <cupti.h> #include "papi.h" // THIS MACRO EXITS if the papi call does not return PAPI_OK. Do not use for routines that // return anything else; e.g. PAPI_num_components, PAPI_get_component_info, PAPI_library_init. #define CALL_PAPI_OK(papi_routine) \ do { \ int _papiret = papi_routine; \ if (_papiret != PAPI_OK) { \ fprintf(stderr, "%s:%d: PAPI Error: function %s failed with ret=%d [%s].\n", \ __FILE__, __LINE__, #papi_routine, _papiret, PAPI_strerror(_papiret));\ exit(-1); \ } \ } while (0); #define CUPTI_CALL(call) \ do { \ CUptiResult _status = call; \ if (_status != CUPTI_SUCCESS) { \ const char *errstr; \ cuptiGetResultString(_status, &errstr); \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #call, errstr); \ exit(-1); \ } \ } while (0); #define DRIVER_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ const char *errName=NULL, *errStr=NULL; \ hipError_t _e1 = hipGetErrorName(_status, &errName); \ hipError_t _e2 = hipGetErrorString(_status, &errStr); \ fprintf(stderr, "%s:%d: error: function %s failed with error %d [%s]='%s'.\n", \ __FILE__, __LINE__, #apiFuncCall, _status, errName, errStr); \ exit(-1); \ } \ } while (0); #define RUNTIME_API_CALL(apiFuncCall) \ do { \ hipError_t _status = apiFuncCall; \ if (_status != hipSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #apiFuncCall, hipGetErrorString(_status)); \ exit(-1); \ } \ } while (0); #define MEMORY_ALLOCATION_CALL(var) \ do { \ if (var == NULL) { \ fprintf(stderr, "%s:%d: Error: Memory Allocation Failed \n", \ __FILE__, __LINE__); \ exit(-1); \ } \ } while (0); #define MAX_DEVICES (32) #define BLOCK_SIZE (1024) #define GRID_SIZE (512) #define BUF_SIZE (32 * 1024) #define ALIGN_SIZE (8) #define SUCCESS (0) #define MAX_SIZE (64*1024*1024) // 64 MB int Streams; // Number of physical copy engines to use; taken from Device Properties asyncEngineCount. int cpuToGpu = 0; int gpuToGpu = 0; //----------------------------------------------------------------------------- // This is the GPU routine to move a block from dst (on one GPU) to src (on // another GPU. This is no longer used in this code; we use hipMemcpyAsync(). // Typical invocation (depends on #defines above): // // for(i = 0; i < Streams; i++) { // hipLaunchKernelGGL(( test_nvlink_bandwidth) , dim3(GRID_SIZE), dim3(BLOCK_SIZE) , 0, 0, (float *) pDevBuffer1[i], (float *) pDevBuffer0[i]); // printf("test_nvlink_bandwidth stream %d \n", i); // } //----------------------------------------------------------------------------- extern "C" __global__ void test_nvlink_bandwidth(float *src, float *dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx % 2) dst[idx] = src[idx] * 2.0f; else dst[idx] = src[idx] * 1.5f; // dst[idx] = src[idx] * 2.0f; } // end routine #define DIM(x) (sizeof(x)/sizeof(*(x))) //----------------------------------------------------------------------------- // Return a text version with B, KB, MB, GB or TB. //----------------------------------------------------------------------------- void calculateSize(char *result, uint64_t size) { int i; const char *sizes[] = { "TB", "GB", "MB", "KB", "B" }; uint64_t exbibytes = 1024ULL * 1024ULL * 1024ULL * 1024ULL; uint64_t multiplier = exbibytes; for(i = 0; (unsigned) i < DIM(sizes); i++, multiplier /= (uint64_t) 1024) { if(size < multiplier) continue; sprintf(result, "%.1f %s", (float) size / multiplier, sizes[i]); return; } strcpy(result, "0"); return; } // end routine //----------------------------------------------------------------------------- // We use Async copies (returns while operation is still in progress) with // multiple streams; hipDeviceSynchronize waits for them to complete. //----------------------------------------------------------------------------- void testCpuToGpu(CUpti_EventGroup * eventGroup, hipDeviceptr_t * pDevBuffer, float **pHostBuffer, size_t bufferSize, hipStream_t * cudaStreams, uint64_t * timeDuration, int numEventGroup) { int i; fprintf(stderr, "Streams = %d.\n", Streams); // Unidirectional copy H2D (Host to Device). for(i = 0; i < Streams; i++) { // RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); hipLaunchKernelGGL(( test_nvlink_bandwidth) , dim3(GRID_SIZE), dim3(BLOCK_SIZE) , 0, 0, (float *) pDevBuffer[i], (float *) pHostBuffer[i]); } RUNTIME_API_CALL(hipDeviceSynchronize()); // Unidirectional copy D2H (Device to Host). for(i = 0; i < Streams; i++) { // RUNTIME_API_CALL(hipMemcpyAsync(pHostBuffer[i], (void *) pDevBuffer[i], bufferSize, hipMemcpyDeviceToHost, cudaStreams[i])); hipLaunchKernelGGL(( test_nvlink_bandwidth) , dim3(GRID_SIZE), dim3(BLOCK_SIZE) , 0, 0, (float *) pHostBuffer[i], (float *) pDevBuffer[i]); } RUNTIME_API_CALL(hipDeviceSynchronize()); // Bidirectional copy for(i = 0; i < Streams; i += 2) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); RUNTIME_API_CALL(hipMemcpyAsync(pHostBuffer[i + 1], (void *) pDevBuffer[i + 1], bufferSize, hipMemcpyDeviceToHost, cudaStreams[i + 1])); } RUNTIME_API_CALL(hipDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy buffers from the host to each device, in preparation for a transfer // between devices. // We use Async copies (returns while operation is still in progress) with // multiple streams; hipDeviceSynchronize waits for them to complete. //----------------------------------------------------------------------------- void testGpuToGpu_part1(CUpti_EventGroup * eventGroup, hipDeviceptr_t * pDevBuffer0, hipDeviceptr_t * pDevBuffer1, float **pHostBuffer, size_t bufferSize, hipStream_t * cudaStreams, uint64_t * timeDuration, int numEventGroup) { int i; RUNTIME_API_CALL(hipSetDevice(0)); RUNTIME_API_CALL(hipDeviceEnablePeerAccess(1, 0)); RUNTIME_API_CALL(hipSetDevice(1)); RUNTIME_API_CALL(hipDeviceEnablePeerAccess(0, 0)); // Unidirectional copy H2D for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer0[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer1[i], pHostBuffer[i], bufferSize, hipMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy from device zero to device 1, then from device 1 to device 0. //----------------------------------------------------------------------------- void testGpuToGpu_part2(CUpti_EventGroup * eventGroup, hipDeviceptr_t * pDevBuffer0, hipDeviceptr_t * pDevBuffer1, float **pHostBuffer, size_t bufferSize, hipStream_t * cudaStreams, uint64_t * timeDuration, int numEventGroup) { int i; for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer0[i], (void *) pDevBuffer1[i], bufferSize, hipMemcpyDeviceToDevice, cudaStreams[i])); printf("Copy %zu stream %d to devBuffer0 from devBuffer1 \n", bufferSize, i); } RUNTIME_API_CALL(hipDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMemcpyAsync((void *) pDevBuffer1[i], (void *) pDevBuffer0[i], bufferSize, hipMemcpyDeviceToDevice, cudaStreams[i])); printf("Copy %zu stream %d to devBuffer1 from devBuffer0 \n", bufferSize, i); } RUNTIME_API_CALL(hipDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- static void printUsage() { printf("usage: Demonstrate use of NVlink CUPTI APIs\n"); printf(" -help : display help message\n"); printf(" --cpu-to-gpu : Show results for data transfer between CPU and GPU \n"); printf(" --gpu-to-gpu : Show results for data transfer between two GPUs \n"); } // end routine. //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- void parseCommandLineArgs(int argc, char *argv[]) { if(argc != 2) { printf("Invalid number of options\n"); exit(0); } if(strcmp(argv[1], "--cpu-to-gpu") == 0) { cpuToGpu = 1; } else if(strcmp(argv[1], "--gpu-to-gpu") == 0) { gpuToGpu = 1; } else if((strcmp(argv[1], "--help") == 0) || (strcmp(argv[1], "-help") == 0) || (strcmp(argv[1], "-h") == 0)) { printUsage(); exit(0); } else { cpuToGpu = 1; } } // end routine. //----------------------------------------------------------------------------- // Main program. //----------------------------------------------------------------------------- int main(int argc, char *argv[]) { int deviceCount = 0, i = 0, numEventGroup = 0, eventsRead=0; size_t bufferSize = 0, freeMemory = 0, totalMemory = 0; char str[64]; hipDeviceptr_t *pDevBuffer0 = NULL; hipDeviceptr_t *pDevBuffer1 = NULL; float **pHostBuffer = NULL; hipStream_t *cudaStreams = NULL; hipDeviceProp_t prop[MAX_DEVICES]; uint64_t timeDuration; CUpti_EventGroup eventGroup[32]; // This contains the original set of 'bandwidth' events to read. This // program attempts to read them together within a single EventSet. PAPI // allows them all, and the component does not complain, but it always // returns 0 for the two "receive" events below. That is not the result // when we read each of these separately for the same program; which is the // approach taken in the program nvlink_all.cu. I suspect nvlink has a // conflict of some sort, but haven't tracked down the documentation to // prove that. -Tony C. #define NUM_METRIC ( 4) const char *MetricBase[NUM_METRIC] = { "cuda:::metric:nvlink_total_data_transmitted" , // okay Group NVLINK. "cuda:::metric:nvlink_transmit_throughput" , // okay Group NVLINK. "cuda:::metric:nvlink_total_data_received" , // okay Group NVLINK. "cuda:::metric:nvlink_receive_throughput" , // okay Group NVLINK. // "cuda:::metric:inst_per_warp" , // okay group A. // "cuda:::metric:warp_execution_efficiency" , // okay Group A. // "cuda:::metric:warp_nonpred_execution_efficiency" , // okay Group A. // "cuda:::metric:shared_load_transactions_per_request" , // okay Group A. // "cuda:::metric:shared_store_transactions_per_request", // okay Group A. // "cuda:::metric:shared_store_transactions" , // okay Group A. // "cuda:::metric:shared_load_transactions" , // okay Group A. // "cuda:::metric:inst_replay_overhead" , // Group B // "cuda:::metric:local_load_transactions" , // Group B. // "cuda:::metric:local_load_transactions_per_request" , // Group NONE. Bad Combo, even by itself requires 2 passes. // "cuda:::metric:local_store_transactions_per_request" , // Group NONE. Bad Combo, even by itself. // "cuda:::metric:gld_transactions_per_request" , // Group NONE. Bad Combo, even by itself. // "cuda:::metric:gst_transactions_per_request" , // Group NONE. Bad Combo, even by itself. // "cuda:::event:active_cycles" , // "cuda:::event:active_warps" , // "cuda:::event:active_cycles" , // "cuda:::event:active_warps" , // "cuda:::event:inst_executed" , // "cuda:::event:warps_launched" , // "cuda:::metric:branch_efficiency" , // Even by itself, causes signal 11 (seg fault) on SECOND read. }; // Parse command line arguments parseCommandLineArgs(argc, argv); if (cpuToGpu) printf("TEST: CPU to GPU transfer.\n"); else printf("TEST: GPU to GPU transfer.\n"); DRIVER_API_CALL(hipInit(0)); RUNTIME_API_CALL(hipGetDeviceCount(&deviceCount)); printf("There are %d devices.\n", deviceCount); if(deviceCount == 0) { printf("There is no device supporting CUDA.\n"); exit(-1); } Streams = 1; // Always use at least ONE stream. for(i = 0; i < deviceCount; i++) { RUNTIME_API_CALL(hipGetDeviceProperties(&prop[i], i)); printf("CUDA Device %d Name: %s", i, prop[i].name); printf(", AsyncEngineCount=%i", prop[i].asyncEngineCount); printf(", MultiProcessors=%i", prop[i].multiProcessorCount); printf(", MaxThreadsPerMP=%i", prop[i].maxThreadsPerMultiProcessor); printf("\n"); if (prop[i].asyncEngineCount > Streams) { // If a new high, Streams = prop[i].asyncEngineCount; // Always use the maximum. } } printf("Streams to use: %i (= max Copy Engines).\n", Streams); // allocate space pDevBuffer0 = (hipDeviceptr_t*) calloc(Streams, sizeof(hipDeviceptr_t)); pDevBuffer1 = (hipDeviceptr_t*) calloc(Streams, sizeof(hipDeviceptr_t)); pHostBuffer = (float **) calloc(Streams, sizeof(float*)); cudaStreams = (hipStream_t*) calloc(Streams, sizeof(hipStream_t)); // Set memcpy size based on available device memory RUNTIME_API_CALL(hipMemGetInfo(&freeMemory, &totalMemory)); printf("Total Device Memory available : "); calculateSize(str, (uint64_t) totalMemory); printf("%s\n", str); bufferSize = MAX_SIZE < (freeMemory / 4) ? MAX_SIZE : (freeMemory / 4); bufferSize = bufferSize/2; printf("Memcpy size is set to %llu B (%llu MB)\n", (unsigned long long) bufferSize, (unsigned long long) bufferSize / (1024 * 1024)); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipStreamCreate(&cudaStreams[i])); } RUNTIME_API_CALL(hipDeviceSynchronize()); // Nvlink-topology Records are generated even before hipMemcpy API is called. CUPTI_CALL(cuptiActivityFlushAll(0x7fffffff)); // flag covers every kind of record. fprintf(stderr, "Setup PAPI counters internally (PAPI)\n"); int EventSet = PAPI_NULL; long long values[MAX_DEVICES * NUM_METRIC]; char *EventName[MAX_DEVICES * NUM_METRIC]; int eventCount; int retval, ee; int k, cid=-1; /* PAPI Initialization */ retval = PAPI_library_init(PAPI_VER_CURRENT); if(retval != PAPI_VER_CURRENT) { fprintf(stderr, "PAPI_library_init failed, ret=%i [%s]\n", retval, PAPI_strerror(retval)); exit(-1); } fprintf(stderr, "PAPI version: %d.%d.%d\n", PAPI_VERSION_MAJOR(PAPI_VERSION), PAPI_VERSION_MINOR(PAPI_VERSION), PAPI_VERSION_REVISION(PAPI_VERSION)); // Find cuda component index. k = PAPI_num_components(); // get number of components. for (i=0; i<k && cid<0; i++) { // while not found, PAPI_component_info_t *aComponent = (PAPI_component_info_t*) PAPI_get_component_info(i); // get the component info. if (aComponent == NULL) { // if we failed, fprintf(stderr, "PAPI_get_component_info(%i) failed, " "returned NULL. %i components reported.\n", i,k); exit(-1); } if (strcmp("cuda", aComponent->name) == 0) cid=i; // If we found our match, record it. } // end search components. if (cid < 0) { // if no PCP component found, fprintf(stderr, "Failed to find pcp component among %i " "reported components.\n", k); exit(-1); } fprintf(stderr, "Found CUDA Component at id %d\n",cid); CALL_PAPI_OK(PAPI_create_eventset(&EventSet)); CALL_PAPI_OK(PAPI_assign_eventset_component(EventSet, cid)); // ===== Allocate Memory ===================================== for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(hipMalloc((void **) &pDevBuffer0[i], bufferSize)); pHostBuffer[i] = (float *) malloc(bufferSize); MEMORY_ALLOCATION_CALL(pHostBuffer[i]); } // Add events at a GPU specific level ... eg cuda:::metric:nvlink_total_data_transmitted:device=0 char tmpEventName[1024]; eventCount = 0; for(i = 0; i < deviceCount; i++) { // Profile all devices. fprintf(stderr, "Set device to %d\n", i); for(ee = 0; ee < NUM_METRIC; ee++) { snprintf(tmpEventName, 1024, "%s:device=%d\0", MetricBase[ee], i); retval = PAPI_add_named_event(EventSet, tmpEventName); // Don't want to fail program if name not found... if(retval == PAPI_OK) { EventName[eventCount] = strdup(tmpEventName); eventCount++; } else { fprintf(stderr, "Failed to add event %s to GPU %i; ret=%d [%s].\n", tmpEventName, i, retval, PAPI_strerror(retval)); } } } if (eventCount > 0) { // If we have events... for(i = 0; i < eventCount; i++) values[i] = -1; // init. if(cpuToGpu) { RUNTIME_API_CALL(hipSetDevice(1)); for(i = 0; i < Streams; i++) RUNTIME_API_CALL(hipMalloc((void **) &pDevBuffer1[i], bufferSize)); CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. RUNTIME_API_CALL(hipSetDevice(0)); testCpuToGpu(eventGroup, pDevBuffer0, pHostBuffer, bufferSize, cudaStreams, &timeDuration, numEventGroup); RUNTIME_API_CALL(hipSetDevice(1)); testCpuToGpu(eventGroup, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams, &timeDuration, numEventGroup); RUNTIME_API_CALL(hipSetDevice(0)); CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read values. } else if(gpuToGpu) { RUNTIME_API_CALL(hipSetDevice(1)); for(i = 0; i < Streams; i++) RUNTIME_API_CALL(hipMalloc((void **) &pDevBuffer1[i], bufferSize)); // Prepare the copy, load up buffers on each device from the host. testGpuToGpu_part1(eventGroup, pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams, &timeDuration, numEventGroup); // Copy from device 0->1, then device 1->0. CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testGpuToGpu_part2(eventGroup, pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams, &timeDuration, numEventGroup); CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read values. } // report each event counted. for(i = 0; i < eventCount; i++) { if (values[i] >= 0) { // If not still -1, eventsRead++; // .. count and report. calculateSize(str, (uint64_t) values[i] ); printf("PAPI %64s: %s \n", EventName[i], str); } } } // Program cleanup. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // Release PAPI memory. PAPI_shutdown(); // Has no return. if (eventsRead > 0) { // If we succeeded with any, report. printf("%i bandwidth events successfully reported.\n", eventsRead); return(0); // exit OK. } printf("Failed to read any bandwidth events.\n"); // report a failure. return (-1); // Exit with error. } // end MAIN.
edb2fde83fb07a6a6eb03f326469da81034daff6.cu
/* * Copyright 2015-2016 NVIDIA Corporation. All rights reserved. * * Sample to demonstrate use of NVlink CUPTI APIs * * This version is significantly changed to use PAPI and the CUDA component to * handle access and reporting. As of 10/05/2018, I have deleted all CUPTI_ONLY * references, for clarity. The file nvlink_bandwidth_cupti_only.cu contains * the cupti-only code. I also deleted the #if PAPI; there is no option * without PAPI. Also, before my changes, the makefile did not even have a * build option that set CUPTI_ONLY for this file. * * -TonyC. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <cuda.h> #include <cupti.h> #include "papi.h" // THIS MACRO EXITS if the papi call does not return PAPI_OK. Do not use for routines that // return anything else; e.g. PAPI_num_components, PAPI_get_component_info, PAPI_library_init. #define CALL_PAPI_OK(papi_routine) \ do { \ int _papiret = papi_routine; \ if (_papiret != PAPI_OK) { \ fprintf(stderr, "%s:%d: PAPI Error: function %s failed with ret=%d [%s].\n", \ __FILE__, __LINE__, #papi_routine, _papiret, PAPI_strerror(_papiret));\ exit(-1); \ } \ } while (0); #define CUPTI_CALL(call) \ do { \ CUptiResult _status = call; \ if (_status != CUPTI_SUCCESS) { \ const char *errstr; \ cuptiGetResultString(_status, &errstr); \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #call, errstr); \ exit(-1); \ } \ } while (0); #define DRIVER_API_CALL(apiFuncCall) \ do { \ CUresult _status = apiFuncCall; \ if (_status != CUDA_SUCCESS) { \ const char *errName=NULL, *errStr=NULL; \ CUresult _e1 = cuGetErrorName(_status, &errName); \ CUresult _e2 = cuGetErrorString(_status, &errStr); \ fprintf(stderr, "%s:%d: error: function %s failed with error %d [%s]='%s'.\n", \ __FILE__, __LINE__, #apiFuncCall, _status, errName, errStr); \ exit(-1); \ } \ } while (0); #define RUNTIME_API_CALL(apiFuncCall) \ do { \ cudaError_t _status = apiFuncCall; \ if (_status != cudaSuccess) { \ fprintf(stderr, "%s:%d: error: function %s failed with error %s.\n", \ __FILE__, __LINE__, #apiFuncCall, cudaGetErrorString(_status)); \ exit(-1); \ } \ } while (0); #define MEMORY_ALLOCATION_CALL(var) \ do { \ if (var == NULL) { \ fprintf(stderr, "%s:%d: Error: Memory Allocation Failed \n", \ __FILE__, __LINE__); \ exit(-1); \ } \ } while (0); #define MAX_DEVICES (32) #define BLOCK_SIZE (1024) #define GRID_SIZE (512) #define BUF_SIZE (32 * 1024) #define ALIGN_SIZE (8) #define SUCCESS (0) #define MAX_SIZE (64*1024*1024) // 64 MB int Streams; // Number of physical copy engines to use; taken from Device Properties asyncEngineCount. int cpuToGpu = 0; int gpuToGpu = 0; //----------------------------------------------------------------------------- // This is the GPU routine to move a block from dst (on one GPU) to src (on // another GPU. This is no longer used in this code; we use cudaMemcpyAsync(). // Typical invocation (depends on #defines above): // // for(i = 0; i < Streams; i++) { // test_nvlink_bandwidth <<< GRID_SIZE, BLOCK_SIZE >>> ((float *) pDevBuffer1[i], (float *) pDevBuffer0[i]); // printf("test_nvlink_bandwidth stream %d \n", i); // } //----------------------------------------------------------------------------- extern "C" __global__ void test_nvlink_bandwidth(float *src, float *dst) { int idx = blockIdx.x * blockDim.x + threadIdx.x; if (idx % 2) dst[idx] = src[idx] * 2.0f; else dst[idx] = src[idx] * 1.5f; // dst[idx] = src[idx] * 2.0f; } // end routine #define DIM(x) (sizeof(x)/sizeof(*(x))) //----------------------------------------------------------------------------- // Return a text version with B, KB, MB, GB or TB. //----------------------------------------------------------------------------- void calculateSize(char *result, uint64_t size) { int i; const char *sizes[] = { "TB", "GB", "MB", "KB", "B" }; uint64_t exbibytes = 1024ULL * 1024ULL * 1024ULL * 1024ULL; uint64_t multiplier = exbibytes; for(i = 0; (unsigned) i < DIM(sizes); i++, multiplier /= (uint64_t) 1024) { if(size < multiplier) continue; sprintf(result, "%.1f %s", (float) size / multiplier, sizes[i]); return; } strcpy(result, "0"); return; } // end routine //----------------------------------------------------------------------------- // We use Async copies (returns while operation is still in progress) with // multiple streams; cudaDeviceSynchronize waits for them to complete. //----------------------------------------------------------------------------- void testCpuToGpu(CUpti_EventGroup * eventGroup, CUdeviceptr * pDevBuffer, float **pHostBuffer, size_t bufferSize, cudaStream_t * cudaStreams, uint64_t * timeDuration, int numEventGroup) { int i; fprintf(stderr, "Streams = %d.\n", Streams); // Unidirectional copy H2D (Host to Device). for(i = 0; i < Streams; i++) { // RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); test_nvlink_bandwidth <<< GRID_SIZE, BLOCK_SIZE >>> ((float *) pDevBuffer[i], (float *) pHostBuffer[i]); } RUNTIME_API_CALL(cudaDeviceSynchronize()); // Unidirectional copy D2H (Device to Host). for(i = 0; i < Streams; i++) { // RUNTIME_API_CALL(cudaMemcpyAsync(pHostBuffer[i], (void *) pDevBuffer[i], bufferSize, cudaMemcpyDeviceToHost, cudaStreams[i])); test_nvlink_bandwidth <<< GRID_SIZE, BLOCK_SIZE >>> ((float *) pHostBuffer[i], (float *) pDevBuffer[i]); } RUNTIME_API_CALL(cudaDeviceSynchronize()); // Bidirectional copy for(i = 0; i < Streams; i += 2) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); RUNTIME_API_CALL(cudaMemcpyAsync(pHostBuffer[i + 1], (void *) pDevBuffer[i + 1], bufferSize, cudaMemcpyDeviceToHost, cudaStreams[i + 1])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy buffers from the host to each device, in preparation for a transfer // between devices. // We use Async copies (returns while operation is still in progress) with // multiple streams; cudaDeviceSynchronize waits for them to complete. //----------------------------------------------------------------------------- void testGpuToGpu_part1(CUpti_EventGroup * eventGroup, CUdeviceptr * pDevBuffer0, CUdeviceptr * pDevBuffer1, float **pHostBuffer, size_t bufferSize, cudaStream_t * cudaStreams, uint64_t * timeDuration, int numEventGroup) { int i; RUNTIME_API_CALL(cudaSetDevice(0)); RUNTIME_API_CALL(cudaDeviceEnablePeerAccess(1, 0)); RUNTIME_API_CALL(cudaSetDevice(1)); RUNTIME_API_CALL(cudaDeviceEnablePeerAccess(0, 0)); // Unidirectional copy H2D for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer0[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer1[i], pHostBuffer[i], bufferSize, cudaMemcpyHostToDevice, cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- // Copy from device zero to device 1, then from device 1 to device 0. //----------------------------------------------------------------------------- void testGpuToGpu_part2(CUpti_EventGroup * eventGroup, CUdeviceptr * pDevBuffer0, CUdeviceptr * pDevBuffer1, float **pHostBuffer, size_t bufferSize, cudaStream_t * cudaStreams, uint64_t * timeDuration, int numEventGroup) { int i; for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer0[i], (void *) pDevBuffer1[i], bufferSize, cudaMemcpyDeviceToDevice, cudaStreams[i])); printf("Copy %zu stream %d to devBuffer0 from devBuffer1 \n", bufferSize, i); } RUNTIME_API_CALL(cudaDeviceSynchronize()); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMemcpyAsync((void *) pDevBuffer1[i], (void *) pDevBuffer0[i], bufferSize, cudaMemcpyDeviceToDevice, cudaStreams[i])); printf("Copy %zu stream %d to devBuffer1 from devBuffer0 \n", bufferSize, i); } RUNTIME_API_CALL(cudaDeviceSynchronize()); } // end routine. //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- static void printUsage() { printf("usage: Demonstrate use of NVlink CUPTI APIs\n"); printf(" -help : display help message\n"); printf(" --cpu-to-gpu : Show results for data transfer between CPU and GPU \n"); printf(" --gpu-to-gpu : Show results for data transfer between two GPUs \n"); } // end routine. //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- //----------------------------------------------------------------------------- void parseCommandLineArgs(int argc, char *argv[]) { if(argc != 2) { printf("Invalid number of options\n"); exit(0); } if(strcmp(argv[1], "--cpu-to-gpu") == 0) { cpuToGpu = 1; } else if(strcmp(argv[1], "--gpu-to-gpu") == 0) { gpuToGpu = 1; } else if((strcmp(argv[1], "--help") == 0) || (strcmp(argv[1], "-help") == 0) || (strcmp(argv[1], "-h") == 0)) { printUsage(); exit(0); } else { cpuToGpu = 1; } } // end routine. //----------------------------------------------------------------------------- // Main program. //----------------------------------------------------------------------------- int main(int argc, char *argv[]) { int deviceCount = 0, i = 0, numEventGroup = 0, eventsRead=0; size_t bufferSize = 0, freeMemory = 0, totalMemory = 0; char str[64]; CUdeviceptr *pDevBuffer0 = NULL; CUdeviceptr *pDevBuffer1 = NULL; float **pHostBuffer = NULL; cudaStream_t *cudaStreams = NULL; cudaDeviceProp prop[MAX_DEVICES]; uint64_t timeDuration; CUpti_EventGroup eventGroup[32]; // This contains the original set of 'bandwidth' events to read. This // program attempts to read them together within a single EventSet. PAPI // allows them all, and the component does not complain, but it always // returns 0 for the two "receive" events below. That is not the result // when we read each of these separately for the same program; which is the // approach taken in the program nvlink_all.cu. I suspect nvlink has a // conflict of some sort, but haven't tracked down the documentation to // prove that. -Tony C. #define NUM_METRIC ( 4) const char *MetricBase[NUM_METRIC] = { "cuda:::metric:nvlink_total_data_transmitted" , // okay Group NVLINK. "cuda:::metric:nvlink_transmit_throughput" , // okay Group NVLINK. "cuda:::metric:nvlink_total_data_received" , // okay Group NVLINK. "cuda:::metric:nvlink_receive_throughput" , // okay Group NVLINK. // "cuda:::metric:inst_per_warp" , // okay group A. // "cuda:::metric:warp_execution_efficiency" , // okay Group A. // "cuda:::metric:warp_nonpred_execution_efficiency" , // okay Group A. // "cuda:::metric:shared_load_transactions_per_request" , // okay Group A. // "cuda:::metric:shared_store_transactions_per_request", // okay Group A. // "cuda:::metric:shared_store_transactions" , // okay Group A. // "cuda:::metric:shared_load_transactions" , // okay Group A. // "cuda:::metric:inst_replay_overhead" , // Group B // "cuda:::metric:local_load_transactions" , // Group B. // "cuda:::metric:local_load_transactions_per_request" , // Group NONE. Bad Combo, even by itself requires 2 passes. // "cuda:::metric:local_store_transactions_per_request" , // Group NONE. Bad Combo, even by itself. // "cuda:::metric:gld_transactions_per_request" , // Group NONE. Bad Combo, even by itself. // "cuda:::metric:gst_transactions_per_request" , // Group NONE. Bad Combo, even by itself. // "cuda:::event:active_cycles" , // "cuda:::event:active_warps" , // "cuda:::event:active_cycles" , // "cuda:::event:active_warps" , // "cuda:::event:inst_executed" , // "cuda:::event:warps_launched" , // "cuda:::metric:branch_efficiency" , // Even by itself, causes signal 11 (seg fault) on SECOND read. }; // Parse command line arguments parseCommandLineArgs(argc, argv); if (cpuToGpu) printf("TEST: CPU to GPU transfer.\n"); else printf("TEST: GPU to GPU transfer.\n"); DRIVER_API_CALL(cuInit(0)); RUNTIME_API_CALL(cudaGetDeviceCount(&deviceCount)); printf("There are %d devices.\n", deviceCount); if(deviceCount == 0) { printf("There is no device supporting CUDA.\n"); exit(-1); } Streams = 1; // Always use at least ONE stream. for(i = 0; i < deviceCount; i++) { RUNTIME_API_CALL(cudaGetDeviceProperties(&prop[i], i)); printf("CUDA Device %d Name: %s", i, prop[i].name); printf(", AsyncEngineCount=%i", prop[i].asyncEngineCount); printf(", MultiProcessors=%i", prop[i].multiProcessorCount); printf(", MaxThreadsPerMP=%i", prop[i].maxThreadsPerMultiProcessor); printf("\n"); if (prop[i].asyncEngineCount > Streams) { // If a new high, Streams = prop[i].asyncEngineCount; // Always use the maximum. } } printf("Streams to use: %i (= max Copy Engines).\n", Streams); // allocate space pDevBuffer0 = (CUdeviceptr*) calloc(Streams, sizeof(CUdeviceptr)); pDevBuffer1 = (CUdeviceptr*) calloc(Streams, sizeof(CUdeviceptr)); pHostBuffer = (float **) calloc(Streams, sizeof(float*)); cudaStreams = (cudaStream_t*) calloc(Streams, sizeof(cudaStream_t)); // Set memcpy size based on available device memory RUNTIME_API_CALL(cudaMemGetInfo(&freeMemory, &totalMemory)); printf("Total Device Memory available : "); calculateSize(str, (uint64_t) totalMemory); printf("%s\n", str); bufferSize = MAX_SIZE < (freeMemory / 4) ? MAX_SIZE : (freeMemory / 4); bufferSize = bufferSize/2; printf("Memcpy size is set to %llu B (%llu MB)\n", (unsigned long long) bufferSize, (unsigned long long) bufferSize / (1024 * 1024)); for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaStreamCreate(&cudaStreams[i])); } RUNTIME_API_CALL(cudaDeviceSynchronize()); // Nvlink-topology Records are generated even before cudaMemcpy API is called. CUPTI_CALL(cuptiActivityFlushAll(0x7fffffff)); // flag covers every kind of record. fprintf(stderr, "Setup PAPI counters internally (PAPI)\n"); int EventSet = PAPI_NULL; long long values[MAX_DEVICES * NUM_METRIC]; char *EventName[MAX_DEVICES * NUM_METRIC]; int eventCount; int retval, ee; int k, cid=-1; /* PAPI Initialization */ retval = PAPI_library_init(PAPI_VER_CURRENT); if(retval != PAPI_VER_CURRENT) { fprintf(stderr, "PAPI_library_init failed, ret=%i [%s]\n", retval, PAPI_strerror(retval)); exit(-1); } fprintf(stderr, "PAPI version: %d.%d.%d\n", PAPI_VERSION_MAJOR(PAPI_VERSION), PAPI_VERSION_MINOR(PAPI_VERSION), PAPI_VERSION_REVISION(PAPI_VERSION)); // Find cuda component index. k = PAPI_num_components(); // get number of components. for (i=0; i<k && cid<0; i++) { // while not found, PAPI_component_info_t *aComponent = (PAPI_component_info_t*) PAPI_get_component_info(i); // get the component info. if (aComponent == NULL) { // if we failed, fprintf(stderr, "PAPI_get_component_info(%i) failed, " "returned NULL. %i components reported.\n", i,k); exit(-1); } if (strcmp("cuda", aComponent->name) == 0) cid=i; // If we found our match, record it. } // end search components. if (cid < 0) { // if no PCP component found, fprintf(stderr, "Failed to find pcp component among %i " "reported components.\n", k); exit(-1); } fprintf(stderr, "Found CUDA Component at id %d\n",cid); CALL_PAPI_OK(PAPI_create_eventset(&EventSet)); CALL_PAPI_OK(PAPI_assign_eventset_component(EventSet, cid)); // ===== Allocate Memory ===================================== for(i = 0; i < Streams; i++) { RUNTIME_API_CALL(cudaMalloc((void **) &pDevBuffer0[i], bufferSize)); pHostBuffer[i] = (float *) malloc(bufferSize); MEMORY_ALLOCATION_CALL(pHostBuffer[i]); } // Add events at a GPU specific level ... eg cuda:::metric:nvlink_total_data_transmitted:device=0 char tmpEventName[1024]; eventCount = 0; for(i = 0; i < deviceCount; i++) { // Profile all devices. fprintf(stderr, "Set device to %d\n", i); for(ee = 0; ee < NUM_METRIC; ee++) { snprintf(tmpEventName, 1024, "%s:device=%d\0", MetricBase[ee], i); retval = PAPI_add_named_event(EventSet, tmpEventName); // Don't want to fail program if name not found... if(retval == PAPI_OK) { EventName[eventCount] = strdup(tmpEventName); eventCount++; } else { fprintf(stderr, "Failed to add event %s to GPU %i; ret=%d [%s].\n", tmpEventName, i, retval, PAPI_strerror(retval)); } } } if (eventCount > 0) { // If we have events... for(i = 0; i < eventCount; i++) values[i] = -1; // init. if(cpuToGpu) { RUNTIME_API_CALL(cudaSetDevice(1)); for(i = 0; i < Streams; i++) RUNTIME_API_CALL(cudaMalloc((void **) &pDevBuffer1[i], bufferSize)); CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. RUNTIME_API_CALL(cudaSetDevice(0)); testCpuToGpu(eventGroup, pDevBuffer0, pHostBuffer, bufferSize, cudaStreams, &timeDuration, numEventGroup); RUNTIME_API_CALL(cudaSetDevice(1)); testCpuToGpu(eventGroup, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams, &timeDuration, numEventGroup); RUNTIME_API_CALL(cudaSetDevice(0)); CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read values. } else if(gpuToGpu) { RUNTIME_API_CALL(cudaSetDevice(1)); for(i = 0; i < Streams; i++) RUNTIME_API_CALL(cudaMalloc((void **) &pDevBuffer1[i], bufferSize)); // Prepare the copy, load up buffers on each device from the host. testGpuToGpu_part1(eventGroup, pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams, &timeDuration, numEventGroup); // Copy from device 0->1, then device 1->0. CALL_PAPI_OK(PAPI_start(EventSet)); // Start event counters. testGpuToGpu_part2(eventGroup, pDevBuffer0, pDevBuffer1, pHostBuffer, bufferSize, cudaStreams, &timeDuration, numEventGroup); CALL_PAPI_OK(PAPI_stop(EventSet, values)); // Stop and read values. } // report each event counted. for(i = 0; i < eventCount; i++) { if (values[i] >= 0) { // If not still -1, eventsRead++; // .. count and report. calculateSize(str, (uint64_t) values[i] ); printf("PAPI %64s: %s \n", EventName[i], str); } } } // Program cleanup. CALL_PAPI_OK(PAPI_cleanup_eventset(EventSet)); // Delete all events in set. CALL_PAPI_OK(PAPI_destroy_eventset(&EventSet)); // Release PAPI memory. PAPI_shutdown(); // Has no return. if (eventsRead > 0) { // If we succeeded with any, report. printf("%i bandwidth events successfully reported.\n", eventsRead); return(0); // exit OK. } printf("Failed to read any bandwidth events.\n"); // report a failure. return (-1); // Exit with error. } // end MAIN.
d38211cc9467ca1e481c0672659fd4477827281e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* 2013 * Maciej Szeptuch * IIUWr */ #include <cstdlib> #include <cstdio> #include <GL/glew.h> #include <GL/glut.h> #include <cuda_gl_interop.h> #define EPS 0.00001 #define cudaErr(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(hipError_t code, char *file, int line) { if(code != hipSuccess) { fprintf(stderr,"%s:%d CUDA: %s(%d)\n", file, line, hipGetErrorString(code), code); exit(code); } } inline int divup(int a, int b) { return (a + b - 1) / b; } // RANDOM __device__ unsigned int TausStep(unsigned int &z, unsigned int S1, unsigned int S2, unsigned int S3, unsigned int M); __device__ unsigned int LCGStep(unsigned int &z, unsigned int A, unsigned int C); __device__ float HybridTaus(unsigned int &z1, unsigned int &z2, unsigned int &z3, unsigned int &z4); __device__ unsigned int HybridTausInt(unsigned int &z1, unsigned int &z2, unsigned int &z3, unsigned int &z4); __device__ unsigned int rand(unsigned int salt); // GLUT and drawing stuff void menuDraw(void); void cudaDraw(void); void cudaInit(void); void glutDisplayCallback(void); void glutKeyboardCallback(unsigned char key, int, int); void glutReshapeCallback(int w, int h); void cleanup(void); __global__ void draw(int *picture, int width, int height, float scale, int steps, int posX, int posY, float *_matrix); __device__ __host__ void multiply(float &dx, float &dy, float sx, float sy, float *_matrix); int width = 800, height = 600, steps = 50, posX = 0, posY = 0; float scale = 10; float matrix[12] = { -0.40, 0.00, -1.00, 0.00, -0.40, 0.10, 0.76, -0.40, 0.00, 0.40, 0.76, 0.00, }; int *picture; GLuint data; int *cudaData; float *cudaMatrix; struct ActEdit { int m; int x; int y; } actEdit; dim3 blockSize(16,16); dim3 gridSize; int main(int argc, char *argv[]) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE); glutInitWindowSize(width, height); glutCreateWindow("CUDA GL IFS"); glutDisplayFunc(glutDisplayCallback); glutKeyboardFunc(glutKeyboardCallback); glutReshapeFunc(glutReshapeCallback); glewInit(); if(!glewIsSupported("GL_VERSION_2_1")) { fprintf(stderr, "OpenGL >= 2.1 required\n"); return 2; } cudaInit(); atexit(cleanup); glutMainLoop(); return 0; } void cleanup(void) { hipGLUnregisterBufferObject(data); glDeleteBuffers(1, &data); delete[] picture; hipFree(cudaMatrix); } void glutReshapeCallback(int w, int h) { width = w; height = h; cudaInit(); glViewport(0, 0, w, h); glLoadIdentity(); glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0); } void glutKeyboardCallback(unsigned char key, int, int) { switch(key) { case '\e': case 'q': case 'Q': exit(3); break; case '\t': ++ actEdit.x; if(actEdit.x == 3) { ++ actEdit.y; actEdit.x = 0; } if(actEdit.y == 2) { ++ actEdit.m; actEdit.y = 0; } if(actEdit.m == 2) actEdit.m = 0; break; case '+': matrix[actEdit.m * 6 + actEdit.y * 3 + actEdit.x] += 0.01; cudaErr(hipMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), hipMemcpyHostToDevice)); break; case '-': matrix[actEdit.m * 6 + actEdit.y * 3 + actEdit.x] -= 0.01; cudaErr(hipMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), hipMemcpyHostToDevice)); break; case '[': scale += 0.1; break; case ']': scale -= 0.1; break; case ',': steps -= 1; break; case '.': steps += 1; break; case 'w': posY += 5; break; case 's': posY -= 5; break; case 'a': posX -= 5; break; case 'd': posX += 5; break; } menuDraw(); glutPostRedisplay(); } void glutDisplayCallback(void) { menuDraw(); cudaDraw(); hipDeviceSynchronize(); glClear(GL_COLOR_BUFFER_BIT); glDisable(GL_DEPTH_TEST); glRasterPos2i(0, 0); glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0); glutSwapBuffers(); glutReportErrors(); } void cudaInit(void) { if(data) { hipGLUnregisterBufferObject(data); glDeleteBuffers(1, &data); delete[] picture; hipFree(cudaMatrix); } glGenBuffers(1, &data); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, data); glBufferData(GL_PIXEL_UNPACK_BUFFER, width * height * sizeof(GLubyte) * 4, 0, GL_STREAM_DRAW); picture = new int[width * height]; memset(picture, 0, width * height * sizeof(int)); cudaErr(hipGLRegisterBufferObject(data)); gridSize = dim3(divup(width, blockSize.x), divup(height, blockSize.y)); cudaErr(hipMalloc(&cudaMatrix, 2 * 2 * 3 * sizeof(float))); cudaErr(hipMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), hipMemcpyHostToDevice)); } void cudaDraw(void) { hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); hipEventRecord(start, NULL); cudaErr(hipGLMapBufferObject__((void **) &cudaData, data)); cudaErr(hipMemcpy(cudaData, picture, width * height * sizeof(int), hipMemcpyHostToDevice)); hipLaunchKernelGGL(( draw), dim3(gridSize), dim3(blockSize), 0, 0, cudaData, width, height, scale, steps, posX, posY, cudaMatrix); cudaErr(hipPeekAtLastError()); cudaErr(hipDeviceSynchronize()); cudaErr(hipGLUnmapBufferObject(data)); hipEventRecord(end, NULL); hipEventSynchronize(end); float gputotal = 0; hipEventElapsedTime(&gputotal, start, end); printf("========== ][ Kernel took: %5.2f ][ ==========\n", gputotal); } __device__ __host__ void multiply(float &dx, float &dy, float sx, float sy, float *_matrix) { dx = sx * _matrix[0] + sy * _matrix[1] + _matrix[2]; dy = sx * _matrix[3] + sy * _matrix[4] + _matrix[5]; } __global__ void draw(int *picture, int width, int height, float scale, int steps, int posX, int posY, float *_matrix) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int id = y * width + x; int salt = rand(id); if(x >= width || y >= height) return; float px = x - width / 2, py = y - height / 2, lx = 0.0, ly = 0.0; for(int t = 0; t < 32; ++ t) { multiply(px, py, px, py, _matrix + (salt < 0) * 6); salt = rand(salt); } for(int t = 0; t < steps; ++ t) { multiply(px, py, px, py, _matrix + (salt < 0) * 6); salt = rand(salt); if(abs(px - lx) < EPS && abs(py - ly) < EPS) break; int _x = px / scale * width + width / 2 - posX; int _y = py / scale * height + height / 2 - posY; if(0 <= _x && _x < width && 0 <= _y && _y < height) picture[_y * width + _x] = 0xFFFFFF; lx = px; ly = py; } } void menuDraw(void) { system("clear"); puts("========== ][ CUDA IFS ][ =========="); printf("Resolution: %dx%d | Position (%d, %d)\n", width, height, posX, posY); printf("Scale: %4.1f | Steps: %3d\n", 10. / scale, steps); puts("Matrices: "); for(int m = 0; m < 2; ++ m) { puts(""); for(int y = 0; y < 2; ++ y) { printf("|"); for(int x = 0; x < 3; ++ x) { if(actEdit.m == m && actEdit.y == y && actEdit.x == x) printf("*%5.2f*", matrix[m * 6 + y * 3 + x]); else printf(" %5.2f ", matrix[m * 6 + y * 3 + x]); if(x == 1) printf("| |"); } puts("|"); } } puts(""); } __device__ unsigned int TausStep(unsigned int &z, unsigned int S1, unsigned int S2, unsigned int S3, unsigned int M) { unsigned int b = (((z << S1) ^ z) >> S2); return z = (((z & M) << S3) ^ b); } __device__ unsigned int LCGStep(unsigned int &z, unsigned int A, unsigned int C) { return z = (A * z + C); } __device__ float HybridTaus(unsigned int &z1, unsigned int &z2, unsigned int &z3, unsigned int &z4) { return 2.3283064365387e-10 * ( TausStep(z1, 13, 19, 12, 4294967294UL) ^ TausStep(z2, 2, 25, 4, 4294967288UL) ^ TausStep(z3, 3, 11, 17, 4294967280UL) ^ LCGStep( z4, 1664525, 1013904223UL) ); } __device__ unsigned int HybridTausInt(unsigned int &z1, unsigned int &z2, unsigned int &z3, unsigned int &z4) { return ( TausStep(z1, 13, 19, 12, 4294967294UL) ^ LCGStep( z4, 1664525, 1013904223UL) ); } __device__ unsigned int rand(unsigned int salt) { return HybridTausInt(salt, salt, salt, salt); }
d38211cc9467ca1e481c0672659fd4477827281e.cu
/* 2013 * Maciej Szeptuch * IIUWr */ #include <cstdlib> #include <cstdio> #include <GL/glew.h> #include <GL/glut.h> #include <cuda_gl_interop.h> #define EPS 0.00001 #define cudaErr(ans) { cudaAssert((ans), __FILE__, __LINE__); } inline void cudaAssert(cudaError_t code, char *file, int line) { if(code != cudaSuccess) { fprintf(stderr,"%s:%d CUDA: %s(%d)\n", file, line, cudaGetErrorString(code), code); exit(code); } } inline int divup(int a, int b) { return (a + b - 1) / b; } // RANDOM __device__ unsigned int TausStep(unsigned int &z, unsigned int S1, unsigned int S2, unsigned int S3, unsigned int M); __device__ unsigned int LCGStep(unsigned int &z, unsigned int A, unsigned int C); __device__ float HybridTaus(unsigned int &z1, unsigned int &z2, unsigned int &z3, unsigned int &z4); __device__ unsigned int HybridTausInt(unsigned int &z1, unsigned int &z2, unsigned int &z3, unsigned int &z4); __device__ unsigned int rand(unsigned int salt); // GLUT and drawing stuff void menuDraw(void); void cudaDraw(void); void cudaInit(void); void glutDisplayCallback(void); void glutKeyboardCallback(unsigned char key, int, int); void glutReshapeCallback(int w, int h); void cleanup(void); __global__ void draw(int *picture, int width, int height, float scale, int steps, int posX, int posY, float *_matrix); __device__ __host__ void multiply(float &dx, float &dy, float sx, float sy, float *_matrix); int width = 800, height = 600, steps = 50, posX = 0, posY = 0; float scale = 10; float matrix[12] = { -0.40, 0.00, -1.00, 0.00, -0.40, 0.10, 0.76, -0.40, 0.00, 0.40, 0.76, 0.00, }; int *picture; GLuint data; int *cudaData; float *cudaMatrix; struct ActEdit { int m; int x; int y; } actEdit; dim3 blockSize(16,16); dim3 gridSize; int main(int argc, char *argv[]) { glutInit(&argc, argv); glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE); glutInitWindowSize(width, height); glutCreateWindow("CUDA GL IFS"); glutDisplayFunc(glutDisplayCallback); glutKeyboardFunc(glutKeyboardCallback); glutReshapeFunc(glutReshapeCallback); glewInit(); if(!glewIsSupported("GL_VERSION_2_1")) { fprintf(stderr, "OpenGL >= 2.1 required\n"); return 2; } cudaInit(); atexit(cleanup); glutMainLoop(); return 0; } void cleanup(void) { cudaGLUnregisterBufferObject(data); glDeleteBuffers(1, &data); delete[] picture; cudaFree(cudaMatrix); } void glutReshapeCallback(int w, int h) { width = w; height = h; cudaInit(); glViewport(0, 0, w, h); glLoadIdentity(); glOrtho(0.0, 1.0, 0.0, 1.0, 0.0, 1.0); } void glutKeyboardCallback(unsigned char key, int, int) { switch(key) { case '\e': case 'q': case 'Q': exit(3); break; case '\t': ++ actEdit.x; if(actEdit.x == 3) { ++ actEdit.y; actEdit.x = 0; } if(actEdit.y == 2) { ++ actEdit.m; actEdit.y = 0; } if(actEdit.m == 2) actEdit.m = 0; break; case '+': matrix[actEdit.m * 6 + actEdit.y * 3 + actEdit.x] += 0.01; cudaErr(cudaMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), cudaMemcpyHostToDevice)); break; case '-': matrix[actEdit.m * 6 + actEdit.y * 3 + actEdit.x] -= 0.01; cudaErr(cudaMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), cudaMemcpyHostToDevice)); break; case '[': scale += 0.1; break; case ']': scale -= 0.1; break; case ',': steps -= 1; break; case '.': steps += 1; break; case 'w': posY += 5; break; case 's': posY -= 5; break; case 'a': posX -= 5; break; case 'd': posX += 5; break; } menuDraw(); glutPostRedisplay(); } void glutDisplayCallback(void) { menuDraw(); cudaDraw(); cudaThreadSynchronize(); glClear(GL_COLOR_BUFFER_BIT); glDisable(GL_DEPTH_TEST); glRasterPos2i(0, 0); glDrawPixels(width, height, GL_RGBA, GL_UNSIGNED_BYTE, 0); glutSwapBuffers(); glutReportErrors(); } void cudaInit(void) { if(data) { cudaGLUnregisterBufferObject(data); glDeleteBuffers(1, &data); delete[] picture; cudaFree(cudaMatrix); } glGenBuffers(1, &data); glBindBuffer(GL_PIXEL_UNPACK_BUFFER, data); glBufferData(GL_PIXEL_UNPACK_BUFFER, width * height * sizeof(GLubyte) * 4, 0, GL_STREAM_DRAW); picture = new int[width * height]; memset(picture, 0, width * height * sizeof(int)); cudaErr(cudaGLRegisterBufferObject(data)); gridSize = dim3(divup(width, blockSize.x), divup(height, blockSize.y)); cudaErr(cudaMalloc(&cudaMatrix, 2 * 2 * 3 * sizeof(float))); cudaErr(cudaMemcpy(cudaMatrix, matrix, 2 * 2 * 3 * sizeof(float), cudaMemcpyHostToDevice)); } void cudaDraw(void) { cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); cudaEventRecord(start, NULL); cudaErr(cudaGLMapBufferObject((void **) &cudaData, data)); cudaErr(cudaMemcpy(cudaData, picture, width * height * sizeof(int), cudaMemcpyHostToDevice)); draw<<<gridSize, blockSize>>>(cudaData, width, height, scale, steps, posX, posY, cudaMatrix); cudaErr(cudaPeekAtLastError()); cudaErr(cudaDeviceSynchronize()); cudaErr(cudaGLUnmapBufferObject(data)); cudaEventRecord(end, NULL); cudaEventSynchronize(end); float gputotal = 0; cudaEventElapsedTime(&gputotal, start, end); printf("========== ][ Kernel took: %5.2f ][ ==========\n", gputotal); } __device__ __host__ void multiply(float &dx, float &dy, float sx, float sy, float *_matrix) { dx = sx * _matrix[0] + sy * _matrix[1] + _matrix[2]; dy = sx * _matrix[3] + sy * _matrix[4] + _matrix[5]; } __global__ void draw(int *picture, int width, int height, float scale, int steps, int posX, int posY, float *_matrix) { int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; int id = y * width + x; int salt = rand(id); if(x >= width || y >= height) return; float px = x - width / 2, py = y - height / 2, lx = 0.0, ly = 0.0; for(int t = 0; t < 32; ++ t) { multiply(px, py, px, py, _matrix + (salt < 0) * 6); salt = rand(salt); } for(int t = 0; t < steps; ++ t) { multiply(px, py, px, py, _matrix + (salt < 0) * 6); salt = rand(salt); if(abs(px - lx) < EPS && abs(py - ly) < EPS) break; int _x = px / scale * width + width / 2 - posX; int _y = py / scale * height + height / 2 - posY; if(0 <= _x && _x < width && 0 <= _y && _y < height) picture[_y * width + _x] = 0xFFFFFF; lx = px; ly = py; } } void menuDraw(void) { system("clear"); puts("========== ][ CUDA IFS ][ =========="); printf("Resolution: %dx%d | Position (%d, %d)\n", width, height, posX, posY); printf("Scale: %4.1f | Steps: %3d\n", 10. / scale, steps); puts("Matrices: "); for(int m = 0; m < 2; ++ m) { puts(""); for(int y = 0; y < 2; ++ y) { printf("|"); for(int x = 0; x < 3; ++ x) { if(actEdit.m == m && actEdit.y == y && actEdit.x == x) printf("*%5.2f*", matrix[m * 6 + y * 3 + x]); else printf(" %5.2f ", matrix[m * 6 + y * 3 + x]); if(x == 1) printf("| |"); } puts("|"); } } puts(""); } __device__ unsigned int TausStep(unsigned int &z, unsigned int S1, unsigned int S2, unsigned int S3, unsigned int M) { unsigned int b = (((z << S1) ^ z) >> S2); return z = (((z & M) << S3) ^ b); } __device__ unsigned int LCGStep(unsigned int &z, unsigned int A, unsigned int C) { return z = (A * z + C); } __device__ float HybridTaus(unsigned int &z1, unsigned int &z2, unsigned int &z3, unsigned int &z4) { return 2.3283064365387e-10 * ( TausStep(z1, 13, 19, 12, 4294967294UL) ^ TausStep(z2, 2, 25, 4, 4294967288UL) ^ TausStep(z3, 3, 11, 17, 4294967280UL) ^ LCGStep( z4, 1664525, 1013904223UL) ); } __device__ unsigned int HybridTausInt(unsigned int &z1, unsigned int &z2, unsigned int &z3, unsigned int &z4) { return ( TausStep(z1, 13, 19, 12, 4294967294UL) ^ LCGStep( z4, 1664525, 1013904223UL) ); } __device__ unsigned int rand(unsigned int salt) { return HybridTausInt(salt, salt, salt, salt); }
8eb09e11cf6db3c718002f6379193d14ab2c793b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * ===================================================================================== * * Filename: glsltest_cuda.cu * * Description: * * Version: 1.0 * Created: 20160811 181502 * Revision: none * Compiler: gcc * * Author: YOUR NAME (), * Organization: * * ===================================================================================== */ __global__ void moveVAO_cuda_kernel(float* v) { unsigned int tx = threadIdx.x; int i = tx * 3 + 2; v[i] += 0.025; v[i] = ( v[i] > 1.0 ) ? -1.0f : v[i]; } void moveVAO_cuda(float* v, int vc) { hipLaunchKernelGGL(( moveVAO_cuda_kernel), dim3(1), dim3(vc), 0, 0, v); }
8eb09e11cf6db3c718002f6379193d14ab2c793b.cu
/* * ===================================================================================== * * Filename: glsltest_cuda.cu * * Description: * * Version: 1.0 * Created: 2016年08月11日 18時15分02秒 * Revision: none * Compiler: gcc * * Author: YOUR NAME (), * Organization: * * ===================================================================================== */ __global__ void moveVAO_cuda_kernel(float* v) { unsigned int tx = threadIdx.x; int i = tx * 3 + 2; v[i] += 0.025; v[i] = ( v[i] > 1.0 ) ? -1.0f : v[i]; } void moveVAO_cuda(float* v, int vc) { moveVAO_cuda_kernel<<<1, vc>>>(v); }
80e25ef6ccd7bd9e20f206375657b981176ca01f.hip
// !!! This is a file automatically generated by hipify!!! /* * --------------------------------------------------------------------------- * Copyright 2014 Nervana Systems Inc. All rights reserved. * --------------------------------------------------------------------------- */ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include <Python.h> #include <arrayobject.h> #include <assert.h> #include <helper_cuda.h> #include <rocblas.h> #include <time.h> #include <vector> #include <execinfo.h> #include <signal.h> #include "../../util/include/matrix.h" #include "../../util/include/queue.h" #include "../../nvmatrix/include/nvmatrix.cuh" #include "../../cudaconv3/include/cudaconv2.cuh" #include "../include/cudanetmat.cuh" #include "../../cudaconvnet/include/layer_kernels.cuh" extern "C" { int elementwise_check3(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target); int elementwise_check2(cudanetmat* mat1, cudanetmat* target); /* ------------------------------ CUBLAS init/shutdown ------------------------------ */ inline bool check_cublas_error() { cublasStatus status = hipblasGetError(); return status != HIPBLAS_STATUS_SUCCESS; } inline bool checkCUDAError() { hipError_t err = hipGetLastError(); if (hipSuccess != err) printf("%s\n", hipGetErrorString( err)); return hipSuccess != err; } extern const char* get_last_cuda_error() { hipError_t err = hipGetLastError(); return hipGetErrorString( err); } extern int cublas_init() { if (NVMatrix::getNumCublasHandles() == 0) { NVMatrix::initCublas(); } if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } extern int cublas_shutdown() { if (NVMatrix::getNumCublasHandles() > 0) { NVMatrix::destroyCublas(); hipDeviceReset(); } return 0; } extern void init_random(unsigned long long seed) { if (!NVMatrix::isRndInitialized()) NVMatrix::initRandom(seed); } extern void init_random_no_seed() { if (!NVMatrix::isRndInitialized()) NVMatrix::initRandom(); } extern void destroy_random() { if (NVMatrix::isRndInitialized()) NVMatrix::destroyRandom(); } extern int get_device_id() { // DEVICE_HOST is -1 // DEVICE_NULL is -2 return NVMatrix::getDeviceID(); } extern void sync_stream() { NVMatrix::syncStream(NVMatrix::getDefaultStream()); } extern void set_device_id(int d) { NVMatrix::setDeviceID(d); } extern int get_peer_access(int srcDevice, int tgtDevice) { return NVMatrix::canAccessPeer(srcDevice, tgtDevice); } extern int cuda_set_device(int deviceId) { hipSetDevice(deviceId); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int get_num_devices(int* err_code) { int numd; *err_code = hipGetDeviceCount(&numd); return numd; } /* ------------------------------ Utility routines ------------------------------ */ int elementwise_check3(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; return 0; } int elementwise_check2(cudanetmat* mat, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; return 0; } extern int get_leading_dimension(cudanetmat* mat) { return mat->is_trans ? mat->size[1] : mat->size[0]; } extern int get_nonleading_dimension(cudanetmat* mat) { return mat->is_trans ? mat->size[0] : mat->size[1]; } extern void set_transpose(cudanetmat* mat, int is_trans) { mat->is_trans = is_trans; } inline char get_transpose_char(cudanetmat* mat) { return mat->is_trans ? 't' : 'n'; } extern void cuda_sync_threads() { hipDeviceSynchronize(); } /* ------------------------------ Allocating/moving data ------------------------------ */ extern int allocate_device_memory(cudanetmat* mat) { mat->data_device = new NVMatrix(mat->size[0], mat->size[1], mat->is_trans ? true : false); mat->on_device = 1; return 0; } extern int copy_to_host(cudanetmat* mat) { if (mat->on_device) { mat->data_device->copyToHost(*(mat->data_host)); } else return ERROR_NOT_ON_DEVICE; return 0; } extern int set_host_mat(cudanetmat* mat, float *data) { if (mat->data_host) delete mat->data_host; mat->data_host = new Matrix(data, (int64) mat->size[0], (int64) mat->size[1], mat->is_trans); return 0; } extern int get_data_device_id(cudanetmat* mat) { if (!mat->on_device) { return ERROR_NOT_ON_DEVICE; } return mat->data_device->getDataDeviceID(); } extern int copy_to_device(cudanetmat* mat) { if (!mat->on_device) { allocate_device_memory(mat); mat->data_device->copyFromHost(*(mat->data_host), true); } else { mat->data_device->copyFromHost(*(mat->data_host), true); } mat->is_trans = mat->data_device->isTrans(); mat->size[0] = mat->data_device->getNumRows(); mat->size[1] = mat->data_device->getNumCols(); return 0; } extern int copy_from(cudanetmat* mat, float* data, bool is_trans) { Matrix mat_data(data, (int64) mat->size[0], (int64) mat->size[1], is_trans); mat->data_device->copyFromHost(mat_data, false); return 0; } extern int copy_on_device(cudanetmat* mat1, cudanetmat* mat2) { if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->copy(*(mat2->data_device)); return 0; } extern void init_from_array(cudanetmat* mat, float* data, int m, int n) { mat->data_host = new Matrix(data, (int64) m, (int64) n, false); mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 1; mat->is_trans = 0; mat->owns_data = 1; } extern int init_empty(cudanetmat* mat, int m, int n) { mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 0; mat->is_trans = 0; mat->owns_data = 1; return allocate_device_memory(mat); } extern int assign_scalar(cudanetmat* mat, float alpha) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; mat->data_device->assign(alpha); return 0; } extern int add_scalar(cudanetmat* mat, float alpha, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->addScalar(alpha, *(target->data_device)); return 0; } extern int add_elementwise(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { int errcheck = elementwise_check3(mat1, mat2, target); if (errcheck !=0) return errcheck; mat1->data_device->add(*(mat2->data_device), *(target->data_device)); return 0; } extern int subtract_elementwise(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { int errcheck = elementwise_check3(mat1, mat2, target); if (errcheck !=0) return errcheck; mat1->data_device->subtract(*(mat2->data_device), *(target->data_device)); return 0; } extern int divide_elementwise(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { int errcheck = elementwise_check3(mat1, mat2, target); if (errcheck !=0) return errcheck; mat1->data_device->eltwiseDivide(*(mat2->data_device), *(target->data_device)); return 0; } /* Elementwise multiplication of 2 matrices */ extern int mult_elementwise(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { int errcheck = elementwise_check3(mat1, mat2, target); if (errcheck !=0) return errcheck; mat1->data_device->eltwiseMult(*(mat2->data_device), *(target->data_device)); return 0; } extern int mult_by_scalar(cudanetmat* mat, float alpha, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->scale(alpha, *(target->data_device)); return 0; } extern int divide_by_scalar(cudanetmat* mat, float alpha, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::DivByScalar(alpha), *(target->data_device)); return 0; } extern int sign(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Sign(), *(target->data_device)); return 0; } extern int apply_sigmoid(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Logistic(), *(target->data_device)); return 0; } extern int apply_tanh(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Tanh(), *(target->data_device)); return 0; } extern int apply_soft_threshold(cudanetmat* mat, float alpha, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::SoftThreshold(alpha), *(target->data_device)); return 0; } extern int apply_abs(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Abs(), *(target->data_device)); return 0; } extern int apply_log_1_plus_exp(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Log1PlusExp(), *(target->data_device)); return 0; } extern int apply_log(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Log(), *(target->data_device)); return 0; } extern int apply_clip_range(cudanetmat* mat, cudanetmat* target, float lower, float upper) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; mat->data_device->apply(NVMatrixOps::ClipUpperLower(lower, upper), *(target->data_device)); return 0; } extern int apply_exp(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Exp(), *(target->data_device)); return 0; } extern int apply_gamma(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); mat->data_device->apply(NVMatrixOps::Gamma(), *(target->data_device)); return 0; } extern int apply_lgamma(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); mat->data_device->apply(NVMatrixOps::LGamma(), *(target->data_device)); return 0; } extern int apply_sqrt(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Sqrt(), *(target->data_device)); return 0; } extern int apply_pow(cudanetmat* mat, float pow, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Pow(pow),*(target->data_device)); return 0; } // For convolution, krizhevsky expects // Weights as (K1xK2xC) Rows x F Columns in 'C' order // Images as (D1xD2xC) Rows x (N) Columns in 'C' order // Target as (OD1xOD2xF) Rows x (N) Columsn in 'C' order extern int convolution(cudanetmat* wts, cudanetmat* imgs, cudanetmat* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups, bool localconv) { // int numFilterColors = numImgColors / numGroups; int numFilters = wts->size[1]; int numModules = numModulesX * numModulesY; int numImages = imgs->size[1]; int imgPixels = imgs->size[0]/numImgColors; int imgSizeX = imgPixels / imgSizeY; if (wts->is_trans || imgs->is_trans || targets->is_trans) { return ERROR_TRANSPOSEDNESS; } if (imgPixels != imgSizeY*imgSizeX) return ERROR_CONV_DIMENSION; if (numFilters % 16 != 0) return ERROR_CONV_NUM_FILTERS; if (targets->size[0] != numFilters * numModules || targets->size[1] != numImages) return ERROR_INCOMPATIBLE_DIMENSIONS; if (!localconv) { convFilterActs(*(imgs->data_device), *(wts->data_device), *(targets->data_device), imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups); } else { localFilterActs(*(imgs->data_device), *(wts->data_device), *(targets->data_device), imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups); } return 0; } extern int convolution_back_weights(cudanetmat* hidActs, cudanetmat* imgs, cudanetmat* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int sumWidth, float scaleTargets, float scaleOutputs, bool localconv) { // int numFilterColors = numImgColors / numGroups; int numFilters = targets->size[1]; // int numModules = numModulesX * numModulesX; // int numImages = imgs->size[1]; int imgPixels = imgs->size[0]/numImgColors; int imgSizeX = imgPixels / imgSizeY; int filterPixels = filterSize*filterSize; int filterChannels = numImgColors/numGroups; int outWidth = DIVUP(numModulesX, sumWidth); int outChunks = outWidth * outWidth; if (hidActs->is_trans || imgs->is_trans || targets->is_trans) { return ERROR_TRANSPOSEDNESS; } if (imgPixels != imgSizeY*imgSizeX) return ERROR_CONV_DIMENSION; if (numFilters % 16 != 0) return ERROR_CONV_NUM_FILTERS; if (!localconv) { if (targets->size[0] != filterChannels * filterPixels || targets->size[1] != numFilters) return ERROR_INCOMPATIBLE_DIMENSIONS; bool doPartialSum = sumWidth < numModulesX; NVMatrix _weightGradTmp; NVMatrix& tgt = doPartialSum ? _weightGradTmp : *(targets->data_device); convWeightActs(*(imgs->data_device), *(hidActs->data_device), tgt, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, sumWidth, doPartialSum ? 0 : scaleTargets, scaleOutputs); if (doPartialSum) { int pScaleTargets = scaleTargets > 0; // TODO determine whether this makes sense _weightGradTmp.reshape(outChunks, filterChannels * filterPixels * numFilters); targets->data_device->addSum(_weightGradTmp, 0, pScaleTargets, 1); targets->data_device->reshape(filterChannels * filterPixels, numFilters); } } else { localWeightActs(*(imgs->data_device), *(hidActs->data_device), *(targets->data_device), imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutputs); } return 0; } extern int convolution_back_errors(cudanetmat* wts, cudanetmat* imgs, cudanetmat* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, bool localconv) { int numFilterColors = numImgColors / numGroups; int numFilters = wts->size[1]; // int numModules = numModulesX * numModulesX; int numImages = imgs->size[1]; int numModules = imgs->size[0]/numFilters; if (wts->is_trans || imgs->is_trans || targets->is_trans) { return ERROR_TRANSPOSEDNESS; } int filterModuleMult = localconv ? 1 : numModules; int filterPixels = wts->size[0] / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; if (numFilters % 16 != 0) return ERROR_CONV_NUM_FILTERS; if (targets->size[0] != numImgColors * imgPixels || targets->size[1] != numImages) return ERROR_INCOMPATIBLE_DIMENSIONS; if (!localconv) { convImgActs(*(imgs->data_device), *(wts->data_device), *(targets->data_device), imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, 1); } else { localImgActs(*(imgs->data_device), *(wts->data_device), *(targets->data_device), imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, 1); } return 0; } extern int dot(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target, float beta, float alpha) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (get_leading_dimension(mat1) != get_leading_dimension(target) || get_nonleading_dimension(mat2) != get_nonleading_dimension(target) || get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) { return ERROR_INCOMPATIBLE_DIMENSIONS; } int m = get_leading_dimension(mat1), k = get_leading_dimension(mat2), n = get_nonleading_dimension(mat2); // cublas, why? // had to do some weirdness here to avoid forcing target to transpose (added function to nvmatrix to handle row major matrices) target->data_device->addProductRM(*(mat1->data_device), *(mat2->data_device), beta, alpha, mat1->is_trans, mat2->is_trans); return 0; } extern float vdot(cudanetmat* mat1, cudanetmat* mat2, int* err_code) { float res; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) { *err_code = ERROR_TRANSPOSEDNESS; return 0; } if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) { *err_code = ERROR_INCOMPATIBLE_DIMENSIONS; return 0; } res = mat1->data_device->dotProduct(*(mat2->data_device)); *err_code = 0; return res; } extern int add_vector(cudanetmat* mat, cudanetmat* vec, float scaleVec, cudanetmat* target) { if (!mat->on_device || !vec->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; if (target == vec) return ERROR_UNSUPPORTED; if (vec->size[0] != 1 && vec->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; if (vec->size[0] != mat->size[0] && vec->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->addVector(*(vec->data_device), scaleVec, *(target->data_device)); return 0; } extern int mat_vector_op(cudanetmat* mat, cudanetmat* vec, float scaleVec, cudanetmat* target, char opchar) { if (!mat->on_device || !vec->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; if (target == vec) return ERROR_UNSUPPORTED; if (vec->size[0] != 1 && vec->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; if (vec->size[0] != mat->size[0] && vec->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; switch (opchar) { case 'a' : mat->data_device->addVector(*(vec->data_device), scaleVec, *(target->data_device)); break; case 's' : mat->data_device->addVector(*(vec->data_device), -scaleVec, *(target->data_device)); break; case 'm' : mat->data_device->eltwiseMultByVector(*(vec->data_device), *(target->data_device)); break; case 'd' : mat->data_device->eltwiseDivideByVector(*(vec->data_device), *(target->data_device)); break; case 'e' : mat->data_device->equalsVector(*(vec->data_device), *(target->data_device)); break; default: { printf("This char is unsupported: %c\n", opchar); return ERROR_UNSUPPORTED; } } return 0; } extern int quantize(cudanetmat* mat, int intwidth, int totalwidth) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; mat->data_device->quantizeValues(intwidth, abs(intwidth-totalwidth)); return 0; } extern int randomize_gaussian(cudanetmat* mat, float mean, float stdev) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->randomizeGaussian(mean, stdev); return 0; } extern int randomize_uniform(cudanetmat* mat) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->randomizeUniform(); return 0; } extern int randomize_uniform_thresh(cudanetmat* mat, float thresh) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->randomizeUniform(); mat->data_device->apply(NVMatrixOps::DropoutKernelOperator(thresh)); return 0; } extern int randomize_binary(cudanetmat* mat) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->binarizeProbs(); return 0; } extern int add_noise_gaussian(cudanetmat* mat, float stdev) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->addGaussianNoise(stdev); return 0; } extern int add_noise_uniform(cudanetmat* mat, float minRange, float maxRange) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->addUniformNoise(minRange, maxRange); return 0; } extern int unpool_forward(cudanetmat* smallMat, cudanetmat* largeMat, int channels, int sizeX, int smallX, int largeX) { if (!smallMat->on_device || !largeMat->on_device) return ERROR_NOT_ON_DEVICE; if (smallMat->is_trans || largeMat->is_trans) return ERROR_TRANSPOSEDNESS; convLocalUnpoolForward(*(smallMat->data_device), *(largeMat->data_device), channels, sizeX, smallX, largeX); largeMat->size[0] = largeMat->data_device->getNumRows(); largeMat->size[1] = largeMat->data_device->getNumCols(); return 0; } extern int unpool_backward(cudanetmat* largeMat, cudanetmat* smallMat, int channels, int sizeX, int smallX, int largeX) { if (!smallMat->on_device || !largeMat->on_device) return ERROR_NOT_ON_DEVICE; if (smallMat->is_trans || largeMat->is_trans) return ERROR_TRANSPOSEDNESS; convLocalUnpoolBackward(*(largeMat->data_device), *(smallMat->data_device), channels, sizeX, smallX, largeX); smallMat->size[0] = smallMat->data_device->getNumRows(); smallMat->size[1] = smallMat->data_device->getNumCols(); return 0; } extern int max_pool(cudanetmat* mat, cudanetmat* target, int channels, int sizeX, int start, int stride, int outputsX) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalPool(*(mat->data_device), *(target->data_device), channels, sizeX, start, stride, outputsX, MaxPooler()); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int max_abs_pool(cudanetmat* mat, cudanetmat* target, int channels, int sizeX, int start, int stride, int outputsX) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalPool(*(mat->data_device), *(target->data_device), channels, sizeX, start, stride, outputsX, MaxAbsPooler()); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int avg_pool(cudanetmat* mat, cudanetmat* target, int channels, int sizeX, int start, int stride, int outputsX) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalPool(*(mat->data_device), *(target->data_device), channels, sizeX, start, stride, outputsX, AvgPooler()); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int l2_pool(cudanetmat* mat, cudanetmat* target, int channels, int sizeX, int start, int stride, int outputsX) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalPool(*(mat->data_device), *(target->data_device), channels, sizeX, start, stride, outputsX, L2Pooler()); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int max_pool_undo(cudanetmat* imgs, cudanetmat* maxGrads, cudanetmat* maxActs, cudanetmat* target, int sizeX, int start, int stride, int outputsX) { if (!imgs->on_device || !maxGrads->on_device || !maxActs->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (imgs->is_trans || maxGrads->is_trans || maxActs->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; if (maxGrads->size[0]!=maxActs->size[0] || maxGrads->size[1] != maxActs->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (imgs->size[0]!=target->size[0] || imgs->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; convLocalMaxUndo(*(imgs->data_device), *(maxGrads->data_device), *(maxActs->data_device), *(target->data_device), sizeX, start, stride, outputsX); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int avg_pool_undo(cudanetmat* avgGrads, cudanetmat* target, int sizeX, int start, int stride, int outputsX, int imgSizeX) { if (!avgGrads->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (avgGrads->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalAvgUndo(*(avgGrads->data_device), *(target->data_device), sizeX, start, stride, outputsX, imgSizeX); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int l2_pool_undo(cudanetmat* imgs, cudanetmat* l2Grads, cudanetmat* l2Acts, cudanetmat* target, int sizeX, int start, int stride, int outputsX) { if (!imgs->on_device || !l2Grads->on_device || !l2Acts->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (imgs->is_trans || l2Grads->is_trans || l2Acts->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; if (l2Grads->size[0]!=l2Acts->size[0] || l2Grads->size[1] != l2Acts->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (imgs->size[0]!=target->size[0] || imgs->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; convLocalL2Undo(*(imgs->data_device), *(l2Grads->data_device), *(l2Acts->data_device), *(target->data_device), sizeX, start, stride, outputsX); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int crossmap_response_norm(cudanetmat* mat, cudanetmat* target, int channels, int sizeX, float scale, float power) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convResponseNormCrossMap(*(mat->data_device), *(target->data_device), channels, sizeX, scale, power, 1.0, false); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } // v = respGrads, inputs = imgs, getActs = respActs // convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); extern int crossmap_response_norm_undo(cudanetmat* imgs, cudanetmat* respGrads, cudanetmat* respActs, cudanetmat* target, int channels, int sizeX, float scale, float power, float scaleTargets) { if (!imgs->on_device || !respGrads->on_device || !respActs->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (imgs->is_trans || respGrads->is_trans || respActs->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; if (respGrads->size[0]!=respActs->size[0] || respGrads->size[1] != respActs->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (imgs->size[0]!=target->size[0] || imgs->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; convResponseNormCrossMapUndo(*(respGrads->data_device), *(imgs->data_device), *(respActs->data_device), *(target->data_device), channels, sizeX, scale, power, 1.0, false, scaleTargets, 1); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int local_contrast_norm(cudanetmat* mat, cudanetmat* meanDiffs, cudanetmat *denoms, cudanetmat* target, int imgSizeX, int channels, int sizeX, float scale, float power) { if (!meanDiffs->on_device || !denoms->on_device || !mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalPool(*(mat->data_device), *(meanDiffs->data_device), channels, sizeX, -sizeX/2, 1, imgSizeX, AvgPooler()); meanDiffs->data_device->add(*(mat->data_device), -1, 1); convContrastNorm(*(mat->data_device), *(meanDiffs->data_device), *(denoms->data_device), *(target->data_device), channels, sizeX, scale, power, 1.0); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } // convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); extern int local_contrast_norm_undo(cudanetmat* meanDiffs, cudanetmat *denoms, cudanetmat* respGrads, cudanetmat* respActs, cudanetmat* target, int channels, int sizeX, float scale, float power, float scaleTargets) { if (!meanDiffs->on_device || !denoms->on_device || !respGrads->on_device || !respActs->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (respGrads->is_trans || respActs->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; if (respGrads->size[0]!=respActs->size[0] || respGrads->size[1] != respActs->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (meanDiffs->size[0]!=target->size[0] || meanDiffs->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; convContrastNormUndo(*(respGrads->data_device), *(denoms->data_device), *(meanDiffs->data_device), *(respActs->data_device), *(target->data_device), channels, sizeX, scale, power, scaleTargets, 1); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int adadelta_update(cudanetmat* grads, cudanetmat* eGradSq, cudanetmat* eDeltSq, cudanetmat* deltX, float rho, float eps){ int errcheck = elementwise_check3(grads, eGradSq, eDeltSq); if (errcheck !=0) return errcheck; errcheck = elementwise_check2(grads, deltX); if (errcheck !=0) return errcheck; // This operator is used to compute the decay updates: a(t) = a(t-1) * rho + b(t)*b(t) * (1-rho) NVMatrixBinaryOps::AxPBysq sqwadd = NVMatrixBinaryOps::AxPBysq(rho, 1-rho); NVMatrixTernaryOps::SqrtRatioMult srmult = NVMatrixTernaryOps::SqrtRatioMult(eps); eGradSq->data_device->applyBinary(sqwadd, *(grads->data_device)); eDeltSq->data_device->applyTernary(srmult, *(eGradSq->data_device), *(grads->data_device), *(deltX->data_device)); eDeltSq->data_device->applyBinary(sqwadd, *(deltX->data_device)); return 0; } extern int get_vector_slice(cudanetmat* source, cudanetmat* target, unsigned int first_ind, unsigned int last_ind) { // source must be a vector if (source->size[0] > 1 && source->size[1] > 1) return ERROR_GENERIC; if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (first_ind >= last_ind) return ERROR_INCOMPATIBLE_DIMENSIONS; if (source->size[0] > 1) { //source is a column vect if (last_ind > source->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; target->size[0] = last_ind - first_ind; target->size[1] = 1; target->data_device = &(source->data_device->slice(first_ind, last_ind, 0,1)); } else { if (last_ind > source->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; //source is a row vect target->size[0] = 1; target->size[1] = last_ind - first_ind; target->data_device = &(source->data_device->slice(0,1,first_ind, last_ind)); } target->on_device = 1; target->on_host = 0; target->is_trans = 0; target->owns_data = 0; return 0; } extern int get_slice_view(cudanetmat* source, cudanetmat* target, unsigned int first_row, unsigned int last_row, unsigned int first_col, unsigned int last_col) { if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (last_col > source->size[1] || (first_col >= last_col)) return ERROR_INCOMPATIBLE_DIMENSIONS; if (last_row > source->size[0] || (first_row >= last_row)) return ERROR_INCOMPATIBLE_DIMENSIONS; target->data_device = &(source->data_device->slice(first_row, last_row, first_col, last_col)); target->data_host = NULL; target->on_device = 1; target->on_host = 0; target->size[0] = last_row - first_row; target->size[1] = last_col - first_col; target->is_trans = 0; target->owns_data = 0; return 0; } extern int get_col_slice_view(cudanetmat* source, cudanetmat* target, unsigned int first_col, unsigned int last_col) { return get_slice_view(source, target, 0, source->size[0], first_col, last_col); } extern int get_row_slice_view(cudanetmat* source, cudanetmat* target, unsigned int first_row, unsigned int last_row) { return get_slice_view(source, target, first_row, last_row, 0, source->size[1]); } extern int get_col_slice_copy(cudanetmat* source, cudanetmat* target, unsigned int first_col, unsigned int last_col) { if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (last_col > source->size[1] || (first_col >= last_col)) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->sliceCols(first_col, last_col, *(target->data_device)); target->on_device = 1; target->on_host = 0; target->size[0] = source->size[0]; target->size[1] = last_col - first_col; target->is_trans = 0; target->owns_data = 1; return 0; } extern int get_row_slice_copy(cudanetmat* source, cudanetmat* target, unsigned int first_row, unsigned int last_row) { if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (last_row > source->size[0] || (first_row >= last_row)) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->sliceRows(first_row, last_row, *(target->data_device)); target->on_device = 1; target->on_host = 0; target->size[1] = source->size[1]; target->size[0] = last_row - first_row; target->is_trans = 0; target->owns_data = 1; return 0; } extern int add_mult(cudanetmat* mat1, cudanetmat* mat2, float alpha, float beta) { if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->add(*(mat2->data_device), alpha, beta); return 0; } extern int set_col_slice(cudanetmat* source, cudanetmat* target, unsigned int start, unsigned int end) { int height = target->size[0]; int width = target->size[1]; if ((end - start) != source->size[1] || source->size[0] != height || start >= end || end > width) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->copy(*(target->data_device), 0, source->size[0], 0, source->size[1], 0, start); return 0; } extern int set_row_slice(cudanetmat* source, cudanetmat* target, unsigned int start, unsigned int end) { int height = target->size[0]; int width = target->size[1]; if ((end - start) != source->size[0] || source->size[1] != width || start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->copy(*(target->data_device), 0, source->size[0], 0, source->size[1], start, 0); return 0; } extern int assign_col_slice(cudanetmat* source, unsigned int start, unsigned int end, float val) { int height = source->size[0]; int width = source->size[1]; if (start >= end || end > width) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->assignSlice(0, height, start, end, val); return 0; } extern int assign_row_slice(cudanetmat* source, unsigned int start, unsigned int end, float val) { int height = source->size[0]; int width = source->size[1]; if (start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->assignSlice(start, end, 0, width, val); return 0; } extern int apply_pow_matrix(cudanetmat* mat, cudanetmat* pow, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->applyBinary(NVMatrixBinaryOps::Power(), *(pow->data_device), *(target->data_device)); return 0; } extern int print_devmat(cudanetmat* mat) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; mat->data_device->print(0, mat->data_device->getNumRows(), 0, mat->data_device->getNumCols()); printf("stride: %d ld: %d, fd:%d\n", mat->data_device->getStride(), mat->data_device->getLeadingDim(), mat->data_device->getFollowingDim()); return 0; } // extern int apply_pow_matrix(cudanetmat* mat, cudanetmat* pow, cudanetmat* target) { // int errcheck = elementwise_check2(mat, target); // mat->data_device->func(*(target->data_device)); // } extern int reciprocal(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Reciprocal(), *(target->data_device)); return 0; } extern int free_device_memory(cudanetmat* mat) { if (mat->owns_data && mat->on_device) { delete mat->data_device; mat->data_device = NULL; mat->on_device = 0; } return 0; } extern float euclid_norm(cudanetmat* mat, int* err_code) { if (!mat->on_device) { *err_code = ERROR_NOT_ON_DEVICE; return -1.; } float res = mat->data_device->norm(); *err_code = 0; return res; } extern float manhattan_norm(cudanetmat* mat, int* err_code) { if (!mat->on_device) { *err_code = ERROR_NOT_ON_DEVICE; return -1.; } float res = mat->data_device->sumabs(); *err_code = 0; return res; } extern int less_than(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->applyBinary(NVMatrixBinaryOps::SmallerThan(), *(mat2->data_device), *(target->data_device)); return 0; } extern int less_than_scalar(cudanetmat* mat, float val, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->apply(NVMatrixOps::SmallerThanScalar(val), *(target->data_device)); return 0; } extern int greater_than(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->applyBinary(NVMatrixBinaryOps::BiggerThan(), *(mat2->data_device), *(target->data_device)); return 0; } extern int greater_than_scalar(cudanetmat* mat, float val, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->apply(NVMatrixOps::BiggerThanScalar(val), *(target->data_device)); return 0; } extern int equals(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->applyBinary(NVMatrixBinaryOps::Equals(), *(mat2->data_device), *(target->data_device)); return 0; } extern int equals_scalar(cudanetmat* mat, float val, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->apply(NVMatrixOps::EqualsScalar(val), *(target->data_device)); return 0; } extern int where(cudanetmat* condition_mat, cudanetmat* if_mat, cudanetmat* else_mat, cudanetmat* target) { if (!condition_mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (condition_mat->size[0] != target->size[0] || condition_mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (condition_mat->size[0] != if_mat->size[0] || condition_mat->size[1] != if_mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (condition_mat->size[0] != else_mat->size[0] || condition_mat->size[1] != else_mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; condition_mat->data_device->applyTernary(NVMatrixTernaryOps::Where(), *(if_mat->data_device), *(else_mat->data_device), *(target->data_device)); return 0; } extern int minimum(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->applyBinary(NVMatrixBinaryOps::Minimum(), *(mat2->data_device), *(target->data_device)); return 0; } extern int minimum_scalar(cudanetmat* mat, float val, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->apply(NVMatrixOps::MinWithScalar(val), *(target->data_device)); return 0; } extern int maximum(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->applyBinary(NVMatrixBinaryOps::Maximum(), *(mat2->data_device), *(target->data_device)); return 0; } extern int maximum_scalar(cudanetmat* mat, float val, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->apply(NVMatrixOps::MaxWithScalar(val), *(target->data_device)); return 0; } extern int reshape(cudanetmat* mat, unsigned int m, unsigned int n) { if (mat->size[0] * mat->size[1] != m * n) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->on_device) mat->data_device->resize(m,n); mat->size[0] = m; mat->size[1] = n; return 0; } extern int add_col_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->addVector(*(vec->data_device), *(target->data_device)); return 0; } extern int add_col_mult(cudanetmat* mat, cudanetmat* vec, cudanetmat* target, float mult) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->addVector(*(vec->data_device), mult, *(target->data_device)); return 0; } extern int add_row_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->addVector(*(vec->data_device), *(target->data_device)); return 0; } extern int mult_by_col_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->eltwiseMultByVector(*(vec->data_device), *(target->data_device)); return 0; } extern int mult_by_row_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->eltwiseMultByVector(*(vec->data_device), *(target->data_device)); return 0; } extern int divide_by_col_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->eltwiseDivideByVector(*(vec->data_device), *(target->data_device)); return 0; } extern int divide_by_row_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->eltwiseDivideByVector(*(vec->data_device), *(target->data_device)); return 0; } extern int max_by_axis(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; target->data_device->assign(mat->data_device->max()); } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->max(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->max(1, *(target->data_device)); } return 0; } extern int min_by_axis(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; target->data_device->assign(mat->data_device->min()); } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->min(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->min(1, *(target->data_device)); } return 0; } extern int sum(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; target->data_device->assign(mat->data_device->sum()); } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->sum(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->sum(1, *(target->data_device)); } return 0; } extern int sumsq(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; NVMatrix tmp; mat->data_device->sumOfSquares(0, tmp); target->data_device->assign(tmp.sum()); } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->sumOfSquares(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->sumOfSquares(1, *(target->data_device)); } return 0; } extern int mean(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; target->data_device->assign(mat->data_device->mean()); } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->mean(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->mean(1, *(target->data_device)); } return 0; } extern int var(cudanetmat* mat, cudanetmat* mean, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; return ERROR_UNSUPPORTED; } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->var(0, *(mean->data_device), *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->var(1, *(mean->data_device), *(target->data_device)); } return 0; } extern int mean_norm(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (axis == -1) { float mval = mat->data_device->mean(); mat->data_device->addScalar(-mval, *(target->data_device)); } else if (axis == 0 || axis == 1) { NVMatrix mvals; mat->data_device->mean(axis, mvals); mat->data_device->addVector(mvals, -1.0, *(target->data_device)); } else { return ERROR_UNSUPPORTED; } return 0; } extern int argmax_by_axis(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->argmax(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->argmax(1, *(target->data_device)); } return 0; } extern int argmin_by_axis(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->argmin(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->argmin(1, *(target->data_device)); } return 0; } extern int copy_transpose(cudanetmat* source, cudanetmat* target) { if (source->size[0] != target->size[1] || source->size[1] != target->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->transpose(*(target->data_device)); return 0; } extern int xcov(cudanetmat* X, cudanetmat* Y, cudanetmat* covMat, int normX, int normY, float normAll) { if (!X->on_device || !Y->on_device || !covMat->on_device) return ERROR_NOT_ON_DEVICE; if (X->is_trans || Y->is_trans || covMat->is_trans) return ERROR_TRANSPOSED; if (get_nonleading_dimension(Y) != get_nonleading_dimension(X) || get_leading_dimension(X) != get_leading_dimension(covMat) || get_leading_dimension(Y) != get_nonleading_dimension(covMat)) { return ERROR_INCOMPATIBLE_DIMENSIONS; } // Mean normalize each input matrix along major axis (for _cudanet, this is along 1) matrices are K x N // Xmean and Ymean are K-dim row vectors NVMatrix Xmean, Ymean; X->data_device->mean(1, Xmean); Y->data_device->mean(1, Ymean); // Now normalize in each NVMatrix Xnorm, Ynorm; X->data_device->addVector(Xmean, -1*normX, Xnorm); Y->data_device->addVector(Ymean, -1*normY, Ynorm); // Now calc the norm into covMat covMat->data_device->addProductRM(Xnorm, Ynorm, 0, 1/normAll, 0 /* trans of X */, 1 /* non-trans of Y*/); return 0; } extern unsigned long int get_gpu_pointer(cudanetmat* source) { return (unsigned long int) source->data_device->getDevData(); } extern PyObject* get_gpu_pythonbuf(cudanetmat* source) { PyObject* py_buf = PyBuffer_FromReadWriteMemory((void *) (source->data_device->getDevData()), source->data_device->getNumElements() * sizeof(float)); Py_INCREF(py_buf); return py_buf; } extern int multi_ranked_error(cudanetmat* probs, cudanetmat* labels, cudanetmat *labellogprob, cudanetmat* top1probs, cudanetmat* topkprobs, int topk) { NVMatrix _maxProbs; probs->data_device->max(0, _maxProbs); computeMultiSoftmaxCost(*(labels->data_device), *(probs->data_device), _maxProbs, *(labellogprob->data_device), *(top1probs->data_device), *(topkprobs->data_device), topk); return 0; } // If axis == 0, then mat is K x N where K is number of outputs, N is number of examples // If axis == 1, then mat is N x K where K is number of outputs, N is number of examples // Cudanet convention is axis = 0, so extern int softmax(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSED; NVMatrix _max, _sum; NVMatrix& input = *(mat->data_device); NVMatrix& tgt = *(target->data_device); input.max(axis, _max); input.addVector(_max, -1, tgt); tgt.apply(NVMatrixOps::Exp()); tgt.sum(axis, _sum); tgt.eltwiseDivideByVector(_sum); return 0; } // acts, actsGrad, and target are all numOut x BatchSize extern int softmax_grad(cudanetmat* acts, cudanetmat* actsGrad, cudanetmat* target) { if (!acts->on_device || !actsGrad->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (acts->is_trans || actsGrad->is_trans || target->is_trans) return ERROR_TRANSPOSED; int errcheck = elementwise_check3(acts, actsGrad, target); if (errcheck !=0) return errcheck; acts->data_device->transpose(true); actsGrad->data_device->transpose(true); target->data_device->transpose(true); //Change assertion in computeSoftmaxgrad to just ensure that acts and actsGrad are same computeSoftmaxGrad(*(acts->data_device), *(actsGrad->data_device), *(target->data_device), 0, 1); acts->data_device->transpose(false); actsGrad->data_device->transpose(false); target->data_device->transpose(false); return 0; } // labels and outputs are numOut x BatchSize, target is 1 x BatchSize extern int crossent_cost(cudanetmat* labels, cudanetmat* outputs, cudanetmat* target) { if (!labels->on_device || !outputs->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (labels->is_trans || outputs->is_trans || target->is_trans) return ERROR_TRANSPOSED; int errcheck = elementwise_check2(labels, outputs); if (errcheck !=0) return errcheck; NVMatrix correctProbs_out; // This gets resized in cost call computeCrossEntCost(*(labels->data_device), *(outputs->data_device), *(target->data_device), correctProbs_out); return 0; } extern int crossent_cost_grad(cudanetmat* labels, cudanetmat* outputs, cudanetmat* target) { if (!labels->on_device || !outputs->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (labels->is_trans || outputs->is_trans || target->is_trans) return ERROR_TRANSPOSED; int errcheck = elementwise_check2(labels, outputs); if (errcheck !=0) return errcheck; computeCrossEntGrad(*(labels->data_device), *(outputs->data_device), *(target->data_device), 0, 1); return 0; } extern int weight_norm_along_axis(cudanetmat* weights, cudanetmat* target, int axis, float norm) { // checks if the l2 norm of weights along axis is greater than norm -- if so, scale so l2norm(weights) is norm if (!weights->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (weights->is_trans || target->is_trans) return ERROR_TRANSPOSED; if (axis!=0 && axis!=1) return ERROR_UNSUPPORTED; NVMatrix normVect; weights->data_device->sumOfSquares(axis, normVect); normVect.apply(MaxWeightConstraintOperator(norm)); weights->data_device->eltwiseMultByVector(normVect, *(target->data_device)); return 0; } extern PyObject *test_make_tuple(int nval) { PyObject *t; t = Py_BuildValue("(iis)", nval, nval, "three"); return t; } // These are still to do // Weight column norm // softmax grad // cross entropy multi-class cost // }
80e25ef6ccd7bd9e20f206375657b981176ca01f.cu
/* * --------------------------------------------------------------------------- * Copyright 2014 Nervana Systems Inc. All rights reserved. * --------------------------------------------------------------------------- */ #define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION #include <Python.h> #include <arrayobject.h> #include <assert.h> #include <helper_cuda.h> #include <cublas.h> #include <time.h> #include <vector> #include <execinfo.h> #include <signal.h> #include "../../util/include/matrix.h" #include "../../util/include/queue.h" #include "../../nvmatrix/include/nvmatrix.cuh" #include "../../cudaconv3/include/cudaconv2.cuh" #include "../include/cudanetmat.cuh" #include "../../cudaconvnet/include/layer_kernels.cuh" extern "C" { int elementwise_check3(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target); int elementwise_check2(cudanetmat* mat1, cudanetmat* target); /* ------------------------------ CUBLAS init/shutdown ------------------------------ */ inline bool check_cublas_error() { cublasStatus status = cublasGetError(); return status != CUBLAS_STATUS_SUCCESS; } inline bool checkCUDAError() { cudaError_t err = cudaGetLastError(); if (cudaSuccess != err) printf("%s\n", cudaGetErrorString( err)); return cudaSuccess != err; } extern const char* get_last_cuda_error() { cudaError_t err = cudaGetLastError(); return cudaGetErrorString( err); } extern int cublas_init() { if (NVMatrix::getNumCublasHandles() == 0) { NVMatrix::initCublas(); } if (check_cublas_error()) return CUBLAS_ERROR; else return 0; } extern int cublas_shutdown() { if (NVMatrix::getNumCublasHandles() > 0) { NVMatrix::destroyCublas(); cudaThreadExit(); } return 0; } extern void init_random(unsigned long long seed) { if (!NVMatrix::isRndInitialized()) NVMatrix::initRandom(seed); } extern void init_random_no_seed() { if (!NVMatrix::isRndInitialized()) NVMatrix::initRandom(); } extern void destroy_random() { if (NVMatrix::isRndInitialized()) NVMatrix::destroyRandom(); } extern int get_device_id() { // DEVICE_HOST is -1 // DEVICE_NULL is -2 return NVMatrix::getDeviceID(); } extern void sync_stream() { NVMatrix::syncStream(NVMatrix::getDefaultStream()); } extern void set_device_id(int d) { NVMatrix::setDeviceID(d); } extern int get_peer_access(int srcDevice, int tgtDevice) { return NVMatrix::canAccessPeer(srcDevice, tgtDevice); } extern int cuda_set_device(int deviceId) { cudaSetDevice(deviceId); if (checkCUDAError()) return CUDA_ERROR; else return 0; } extern int get_num_devices(int* err_code) { int numd; *err_code = cudaGetDeviceCount(&numd); return numd; } /* ------------------------------ Utility routines ------------------------------ */ int elementwise_check3(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; return 0; } int elementwise_check2(cudanetmat* mat, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; return 0; } extern int get_leading_dimension(cudanetmat* mat) { return mat->is_trans ? mat->size[1] : mat->size[0]; } extern int get_nonleading_dimension(cudanetmat* mat) { return mat->is_trans ? mat->size[0] : mat->size[1]; } extern void set_transpose(cudanetmat* mat, int is_trans) { mat->is_trans = is_trans; } inline char get_transpose_char(cudanetmat* mat) { return mat->is_trans ? 't' : 'n'; } extern void cuda_sync_threads() { cudaThreadSynchronize(); } /* ------------------------------ Allocating/moving data ------------------------------ */ extern int allocate_device_memory(cudanetmat* mat) { mat->data_device = new NVMatrix(mat->size[0], mat->size[1], mat->is_trans ? true : false); mat->on_device = 1; return 0; } extern int copy_to_host(cudanetmat* mat) { if (mat->on_device) { mat->data_device->copyToHost(*(mat->data_host)); } else return ERROR_NOT_ON_DEVICE; return 0; } extern int set_host_mat(cudanetmat* mat, float *data) { if (mat->data_host) delete mat->data_host; mat->data_host = new Matrix(data, (int64) mat->size[0], (int64) mat->size[1], mat->is_trans); return 0; } extern int get_data_device_id(cudanetmat* mat) { if (!mat->on_device) { return ERROR_NOT_ON_DEVICE; } return mat->data_device->getDataDeviceID(); } extern int copy_to_device(cudanetmat* mat) { if (!mat->on_device) { allocate_device_memory(mat); mat->data_device->copyFromHost(*(mat->data_host), true); } else { mat->data_device->copyFromHost(*(mat->data_host), true); } mat->is_trans = mat->data_device->isTrans(); mat->size[0] = mat->data_device->getNumRows(); mat->size[1] = mat->data_device->getNumCols(); return 0; } extern int copy_from(cudanetmat* mat, float* data, bool is_trans) { Matrix mat_data(data, (int64) mat->size[0], (int64) mat->size[1], is_trans); mat->data_device->copyFromHost(mat_data, false); return 0; } extern int copy_on_device(cudanetmat* mat1, cudanetmat* mat2) { if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->copy(*(mat2->data_device)); return 0; } extern void init_from_array(cudanetmat* mat, float* data, int m, int n) { mat->data_host = new Matrix(data, (int64) m, (int64) n, false); mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 1; mat->is_trans = 0; mat->owns_data = 1; } extern int init_empty(cudanetmat* mat, int m, int n) { mat->size[0] = m; mat->size[1] = n; mat->on_device = 0; mat->on_host = 0; mat->is_trans = 0; mat->owns_data = 1; return allocate_device_memory(mat); } extern int assign_scalar(cudanetmat* mat, float alpha) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; mat->data_device->assign(alpha); return 0; } extern int add_scalar(cudanetmat* mat, float alpha, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->addScalar(alpha, *(target->data_device)); return 0; } extern int add_elementwise(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { int errcheck = elementwise_check3(mat1, mat2, target); if (errcheck !=0) return errcheck; mat1->data_device->add(*(mat2->data_device), *(target->data_device)); return 0; } extern int subtract_elementwise(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { int errcheck = elementwise_check3(mat1, mat2, target); if (errcheck !=0) return errcheck; mat1->data_device->subtract(*(mat2->data_device), *(target->data_device)); return 0; } extern int divide_elementwise(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { int errcheck = elementwise_check3(mat1, mat2, target); if (errcheck !=0) return errcheck; mat1->data_device->eltwiseDivide(*(mat2->data_device), *(target->data_device)); return 0; } /* Elementwise multiplication of 2 matrices */ extern int mult_elementwise(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { int errcheck = elementwise_check3(mat1, mat2, target); if (errcheck !=0) return errcheck; mat1->data_device->eltwiseMult(*(mat2->data_device), *(target->data_device)); return 0; } extern int mult_by_scalar(cudanetmat* mat, float alpha, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->scale(alpha, *(target->data_device)); return 0; } extern int divide_by_scalar(cudanetmat* mat, float alpha, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::DivByScalar(alpha), *(target->data_device)); return 0; } extern int sign(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Sign(), *(target->data_device)); return 0; } extern int apply_sigmoid(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Logistic(), *(target->data_device)); return 0; } extern int apply_tanh(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Tanh(), *(target->data_device)); return 0; } extern int apply_soft_threshold(cudanetmat* mat, float alpha, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::SoftThreshold(alpha), *(target->data_device)); return 0; } extern int apply_abs(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Abs(), *(target->data_device)); return 0; } extern int apply_log_1_plus_exp(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Log1PlusExp(), *(target->data_device)); return 0; } extern int apply_log(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Log(), *(target->data_device)); return 0; } extern int apply_clip_range(cudanetmat* mat, cudanetmat* target, float lower, float upper) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; mat->data_device->apply(NVMatrixOps::ClipUpperLower(lower, upper), *(target->data_device)); return 0; } extern int apply_exp(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Exp(), *(target->data_device)); return 0; } extern int apply_gamma(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); mat->data_device->apply(NVMatrixOps::Gamma(), *(target->data_device)); return 0; } extern int apply_lgamma(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); mat->data_device->apply(NVMatrixOps::LGamma(), *(target->data_device)); return 0; } extern int apply_sqrt(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Sqrt(), *(target->data_device)); return 0; } extern int apply_pow(cudanetmat* mat, float pow, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Pow(pow),*(target->data_device)); return 0; } // For convolution, krizhevsky expects // Weights as (K1xK2xC) Rows x F Columns in 'C' order // Images as (D1xD2xC) Rows x (N) Columns in 'C' order // Target as (OD1xOD2xF) Rows x (N) Columsn in 'C' order extern int convolution(cudanetmat* wts, cudanetmat* imgs, cudanetmat* targets, int imgSizeY, int numModulesY, int numModulesX, int paddingStart, int moduleStride, int numImgColors, int numGroups, bool localconv) { // int numFilterColors = numImgColors / numGroups; int numFilters = wts->size[1]; int numModules = numModulesX * numModulesY; int numImages = imgs->size[1]; int imgPixels = imgs->size[0]/numImgColors; int imgSizeX = imgPixels / imgSizeY; if (wts->is_trans || imgs->is_trans || targets->is_trans) { return ERROR_TRANSPOSEDNESS; } if (imgPixels != imgSizeY*imgSizeX) return ERROR_CONV_DIMENSION; if (numFilters % 16 != 0) return ERROR_CONV_NUM_FILTERS; if (targets->size[0] != numFilters * numModules || targets->size[1] != numImages) return ERROR_INCOMPATIBLE_DIMENSIONS; if (!localconv) { convFilterActs(*(imgs->data_device), *(wts->data_device), *(targets->data_device), imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups); } else { localFilterActs(*(imgs->data_device), *(wts->data_device), *(targets->data_device), imgSizeY, numModulesY, numModulesX, paddingStart, moduleStride, numImgColors, numGroups); } return 0; } extern int convolution_back_weights(cudanetmat* hidActs, cudanetmat* imgs, cudanetmat* targets, int imgSizeY, int numModulesY, int numModulesX, int filterSize, int paddingStart, int moduleStride, int numImgColors, int numGroups, int sumWidth, float scaleTargets, float scaleOutputs, bool localconv) { // int numFilterColors = numImgColors / numGroups; int numFilters = targets->size[1]; // int numModules = numModulesX * numModulesX; // int numImages = imgs->size[1]; int imgPixels = imgs->size[0]/numImgColors; int imgSizeX = imgPixels / imgSizeY; int filterPixels = filterSize*filterSize; int filterChannels = numImgColors/numGroups; int outWidth = DIVUP(numModulesX, sumWidth); int outChunks = outWidth * outWidth; if (hidActs->is_trans || imgs->is_trans || targets->is_trans) { return ERROR_TRANSPOSEDNESS; } if (imgPixels != imgSizeY*imgSizeX) return ERROR_CONV_DIMENSION; if (numFilters % 16 != 0) return ERROR_CONV_NUM_FILTERS; if (!localconv) { if (targets->size[0] != filterChannels * filterPixels || targets->size[1] != numFilters) return ERROR_INCOMPATIBLE_DIMENSIONS; bool doPartialSum = sumWidth < numModulesX; NVMatrix _weightGradTmp; NVMatrix& tgt = doPartialSum ? _weightGradTmp : *(targets->data_device); convWeightActs(*(imgs->data_device), *(hidActs->data_device), tgt, imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, sumWidth, doPartialSum ? 0 : scaleTargets, scaleOutputs); if (doPartialSum) { int pScaleTargets = scaleTargets > 0; // TODO determine whether this makes sense _weightGradTmp.reshape(outChunks, filterChannels * filterPixels * numFilters); targets->data_device->addSum(_weightGradTmp, 0, pScaleTargets, 1); targets->data_device->reshape(filterChannels * filterPixels, numFilters); } } else { localWeightActs(*(imgs->data_device), *(hidActs->data_device), *(targets->data_device), imgSizeY, numModulesY, numModulesX, filterSize, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, scaleOutputs); } return 0; } extern int convolution_back_errors(cudanetmat* wts, cudanetmat* imgs, cudanetmat* targets, int imgSizeY, int imgSizeX, int numModulesY, int paddingStart, int moduleStride, int numImgColors, int numGroups, float scaleTargets, bool localconv) { int numFilterColors = numImgColors / numGroups; int numFilters = wts->size[1]; // int numModules = numModulesX * numModulesX; int numImages = imgs->size[1]; int numModules = imgs->size[0]/numFilters; if (wts->is_trans || imgs->is_trans || targets->is_trans) { return ERROR_TRANSPOSEDNESS; } int filterModuleMult = localconv ? 1 : numModules; int filterPixels = wts->size[0] / (filterModuleMult * numFilterColors); int filterSize = sqrt(filterPixels); int imgPixels = imgSizeY * imgSizeX; if (numFilters % 16 != 0) return ERROR_CONV_NUM_FILTERS; if (targets->size[0] != numImgColors * imgPixels || targets->size[1] != numImages) return ERROR_INCOMPATIBLE_DIMENSIONS; if (!localconv) { convImgActs(*(imgs->data_device), *(wts->data_device), *(targets->data_device), imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, 1); } else { localImgActs(*(imgs->data_device), *(wts->data_device), *(targets->data_device), imgSizeY, imgSizeX, numModulesY, paddingStart, moduleStride, numImgColors, numGroups, scaleTargets, 1); } return 0; } extern int dot(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target, float beta, float alpha) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (get_leading_dimension(mat1) != get_leading_dimension(target) || get_nonleading_dimension(mat2) != get_nonleading_dimension(target) || get_nonleading_dimension(mat1) != get_leading_dimension(mat2)) { return ERROR_INCOMPATIBLE_DIMENSIONS; } int m = get_leading_dimension(mat1), k = get_leading_dimension(mat2), n = get_nonleading_dimension(mat2); // cublas, why? // had to do some weirdness here to avoid forcing target to transpose (added function to nvmatrix to handle row major matrices) target->data_device->addProductRM(*(mat1->data_device), *(mat2->data_device), beta, alpha, mat1->is_trans, mat2->is_trans); return 0; } extern float vdot(cudanetmat* mat1, cudanetmat* mat2, int* err_code) { float res; if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) { *err_code = ERROR_TRANSPOSEDNESS; return 0; } if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) { *err_code = ERROR_INCOMPATIBLE_DIMENSIONS; return 0; } res = mat1->data_device->dotProduct(*(mat2->data_device)); *err_code = 0; return res; } extern int add_vector(cudanetmat* mat, cudanetmat* vec, float scaleVec, cudanetmat* target) { if (!mat->on_device || !vec->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; if (target == vec) return ERROR_UNSUPPORTED; if (vec->size[0] != 1 && vec->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; if (vec->size[0] != mat->size[0] && vec->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->addVector(*(vec->data_device), scaleVec, *(target->data_device)); return 0; } extern int mat_vector_op(cudanetmat* mat, cudanetmat* vec, float scaleVec, cudanetmat* target, char opchar) { if (!mat->on_device || !vec->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; if (target == vec) return ERROR_UNSUPPORTED; if (vec->size[0] != 1 && vec->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; if (vec->size[0] != mat->size[0] && vec->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; switch (opchar) { case 'a' : mat->data_device->addVector(*(vec->data_device), scaleVec, *(target->data_device)); break; case 's' : mat->data_device->addVector(*(vec->data_device), -scaleVec, *(target->data_device)); break; case 'm' : mat->data_device->eltwiseMultByVector(*(vec->data_device), *(target->data_device)); break; case 'd' : mat->data_device->eltwiseDivideByVector(*(vec->data_device), *(target->data_device)); break; case 'e' : mat->data_device->equalsVector(*(vec->data_device), *(target->data_device)); break; default: { printf("This char is unsupported: %c\n", opchar); return ERROR_UNSUPPORTED; } } return 0; } extern int quantize(cudanetmat* mat, int intwidth, int totalwidth) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; mat->data_device->quantizeValues(intwidth, abs(intwidth-totalwidth)); return 0; } extern int randomize_gaussian(cudanetmat* mat, float mean, float stdev) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->randomizeGaussian(mean, stdev); return 0; } extern int randomize_uniform(cudanetmat* mat) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->randomizeUniform(); return 0; } extern int randomize_uniform_thresh(cudanetmat* mat, float thresh) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->randomizeUniform(); mat->data_device->apply(NVMatrixOps::DropoutKernelOperator(thresh)); return 0; } extern int randomize_binary(cudanetmat* mat) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->binarizeProbs(); return 0; } extern int add_noise_gaussian(cudanetmat* mat, float stdev) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->addGaussianNoise(stdev); return 0; } extern int add_noise_uniform(cudanetmat* mat, float minRange, float maxRange) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; if (!NVMatrix::isRndInitialized()) return ERROR_RND_NOT_INITIALIZED; mat->data_device->addUniformNoise(minRange, maxRange); return 0; } extern int unpool_forward(cudanetmat* smallMat, cudanetmat* largeMat, int channels, int sizeX, int smallX, int largeX) { if (!smallMat->on_device || !largeMat->on_device) return ERROR_NOT_ON_DEVICE; if (smallMat->is_trans || largeMat->is_trans) return ERROR_TRANSPOSEDNESS; convLocalUnpoolForward(*(smallMat->data_device), *(largeMat->data_device), channels, sizeX, smallX, largeX); largeMat->size[0] = largeMat->data_device->getNumRows(); largeMat->size[1] = largeMat->data_device->getNumCols(); return 0; } extern int unpool_backward(cudanetmat* largeMat, cudanetmat* smallMat, int channels, int sizeX, int smallX, int largeX) { if (!smallMat->on_device || !largeMat->on_device) return ERROR_NOT_ON_DEVICE; if (smallMat->is_trans || largeMat->is_trans) return ERROR_TRANSPOSEDNESS; convLocalUnpoolBackward(*(largeMat->data_device), *(smallMat->data_device), channels, sizeX, smallX, largeX); smallMat->size[0] = smallMat->data_device->getNumRows(); smallMat->size[1] = smallMat->data_device->getNumCols(); return 0; } extern int max_pool(cudanetmat* mat, cudanetmat* target, int channels, int sizeX, int start, int stride, int outputsX) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalPool(*(mat->data_device), *(target->data_device), channels, sizeX, start, stride, outputsX, MaxPooler()); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int max_abs_pool(cudanetmat* mat, cudanetmat* target, int channels, int sizeX, int start, int stride, int outputsX) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalPool(*(mat->data_device), *(target->data_device), channels, sizeX, start, stride, outputsX, MaxAbsPooler()); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int avg_pool(cudanetmat* mat, cudanetmat* target, int channels, int sizeX, int start, int stride, int outputsX) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalPool(*(mat->data_device), *(target->data_device), channels, sizeX, start, stride, outputsX, AvgPooler()); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int l2_pool(cudanetmat* mat, cudanetmat* target, int channels, int sizeX, int start, int stride, int outputsX) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalPool(*(mat->data_device), *(target->data_device), channels, sizeX, start, stride, outputsX, L2Pooler()); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int max_pool_undo(cudanetmat* imgs, cudanetmat* maxGrads, cudanetmat* maxActs, cudanetmat* target, int sizeX, int start, int stride, int outputsX) { if (!imgs->on_device || !maxGrads->on_device || !maxActs->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (imgs->is_trans || maxGrads->is_trans || maxActs->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; if (maxGrads->size[0]!=maxActs->size[0] || maxGrads->size[1] != maxActs->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (imgs->size[0]!=target->size[0] || imgs->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; convLocalMaxUndo(*(imgs->data_device), *(maxGrads->data_device), *(maxActs->data_device), *(target->data_device), sizeX, start, stride, outputsX); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int avg_pool_undo(cudanetmat* avgGrads, cudanetmat* target, int sizeX, int start, int stride, int outputsX, int imgSizeX) { if (!avgGrads->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (avgGrads->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalAvgUndo(*(avgGrads->data_device), *(target->data_device), sizeX, start, stride, outputsX, imgSizeX); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int l2_pool_undo(cudanetmat* imgs, cudanetmat* l2Grads, cudanetmat* l2Acts, cudanetmat* target, int sizeX, int start, int stride, int outputsX) { if (!imgs->on_device || !l2Grads->on_device || !l2Acts->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (imgs->is_trans || l2Grads->is_trans || l2Acts->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; if (l2Grads->size[0]!=l2Acts->size[0] || l2Grads->size[1] != l2Acts->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (imgs->size[0]!=target->size[0] || imgs->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; convLocalL2Undo(*(imgs->data_device), *(l2Grads->data_device), *(l2Acts->data_device), *(target->data_device), sizeX, start, stride, outputsX); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int crossmap_response_norm(cudanetmat* mat, cudanetmat* target, int channels, int sizeX, float scale, float power) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convResponseNormCrossMap(*(mat->data_device), *(target->data_device), channels, sizeX, scale, power, 1.0, false); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } // v = respGrads, inputs = imgs, getActs = respActs // convResponseNormUndo(v, _denoms, *_inputs[0], getActs(), _prev[replicaIdx][0]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); extern int crossmap_response_norm_undo(cudanetmat* imgs, cudanetmat* respGrads, cudanetmat* respActs, cudanetmat* target, int channels, int sizeX, float scale, float power, float scaleTargets) { if (!imgs->on_device || !respGrads->on_device || !respActs->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (imgs->is_trans || respGrads->is_trans || respActs->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; if (respGrads->size[0]!=respActs->size[0] || respGrads->size[1] != respActs->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (imgs->size[0]!=target->size[0] || imgs->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; convResponseNormCrossMapUndo(*(respGrads->data_device), *(imgs->data_device), *(respActs->data_device), *(target->data_device), channels, sizeX, scale, power, 1.0, false, scaleTargets, 1); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int local_contrast_norm(cudanetmat* mat, cudanetmat* meanDiffs, cudanetmat *denoms, cudanetmat* target, int imgSizeX, int channels, int sizeX, float scale, float power) { if (!meanDiffs->on_device || !denoms->on_device || !mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; convLocalPool(*(mat->data_device), *(meanDiffs->data_device), channels, sizeX, -sizeX/2, 1, imgSizeX, AvgPooler()); meanDiffs->data_device->add(*(mat->data_device), -1, 1); convContrastNorm(*(mat->data_device), *(meanDiffs->data_device), *(denoms->data_device), *(target->data_device), channels, sizeX, scale, power, 1.0); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } // convContrastNormUndo(v, _denoms, _meanDiffs, getActs(), _prev[replicaIdx][inpIdx]->getActsGrad(), _channels, _size, _scale, _pow, scaleTargets, 1); extern int local_contrast_norm_undo(cudanetmat* meanDiffs, cudanetmat *denoms, cudanetmat* respGrads, cudanetmat* respActs, cudanetmat* target, int channels, int sizeX, float scale, float power, float scaleTargets) { if (!meanDiffs->on_device || !denoms->on_device || !respGrads->on_device || !respActs->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (respGrads->is_trans || respActs->is_trans || target->is_trans) return ERROR_TRANSPOSEDNESS; if (respGrads->size[0]!=respActs->size[0] || respGrads->size[1] != respActs->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (meanDiffs->size[0]!=target->size[0] || meanDiffs->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; convContrastNormUndo(*(respGrads->data_device), *(denoms->data_device), *(meanDiffs->data_device), *(respActs->data_device), *(target->data_device), channels, sizeX, scale, power, scaleTargets, 1); target->size[0] = target->data_device->getNumRows(); target->size[1] = target->data_device->getNumCols(); return 0; } extern int adadelta_update(cudanetmat* grads, cudanetmat* eGradSq, cudanetmat* eDeltSq, cudanetmat* deltX, float rho, float eps){ int errcheck = elementwise_check3(grads, eGradSq, eDeltSq); if (errcheck !=0) return errcheck; errcheck = elementwise_check2(grads, deltX); if (errcheck !=0) return errcheck; // This operator is used to compute the decay updates: a(t) = a(t-1) * rho + b(t)*b(t) * (1-rho) NVMatrixBinaryOps::AxPBysq sqwadd = NVMatrixBinaryOps::AxPBysq(rho, 1-rho); NVMatrixTernaryOps::SqrtRatioMult srmult = NVMatrixTernaryOps::SqrtRatioMult(eps); eGradSq->data_device->applyBinary(sqwadd, *(grads->data_device)); eDeltSq->data_device->applyTernary(srmult, *(eGradSq->data_device), *(grads->data_device), *(deltX->data_device)); eDeltSq->data_device->applyBinary(sqwadd, *(deltX->data_device)); return 0; } extern int get_vector_slice(cudanetmat* source, cudanetmat* target, unsigned int first_ind, unsigned int last_ind) { // source must be a vector if (source->size[0] > 1 && source->size[1] > 1) return ERROR_GENERIC; if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (first_ind >= last_ind) return ERROR_INCOMPATIBLE_DIMENSIONS; if (source->size[0] > 1) { //source is a column vect if (last_ind > source->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; target->size[0] = last_ind - first_ind; target->size[1] = 1; target->data_device = &(source->data_device->slice(first_ind, last_ind, 0,1)); } else { if (last_ind > source->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; //source is a row vect target->size[0] = 1; target->size[1] = last_ind - first_ind; target->data_device = &(source->data_device->slice(0,1,first_ind, last_ind)); } target->on_device = 1; target->on_host = 0; target->is_trans = 0; target->owns_data = 0; return 0; } extern int get_slice_view(cudanetmat* source, cudanetmat* target, unsigned int first_row, unsigned int last_row, unsigned int first_col, unsigned int last_col) { if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (last_col > source->size[1] || (first_col >= last_col)) return ERROR_INCOMPATIBLE_DIMENSIONS; if (last_row > source->size[0] || (first_row >= last_row)) return ERROR_INCOMPATIBLE_DIMENSIONS; target->data_device = &(source->data_device->slice(first_row, last_row, first_col, last_col)); target->data_host = NULL; target->on_device = 1; target->on_host = 0; target->size[0] = last_row - first_row; target->size[1] = last_col - first_col; target->is_trans = 0; target->owns_data = 0; return 0; } extern int get_col_slice_view(cudanetmat* source, cudanetmat* target, unsigned int first_col, unsigned int last_col) { return get_slice_view(source, target, 0, source->size[0], first_col, last_col); } extern int get_row_slice_view(cudanetmat* source, cudanetmat* target, unsigned int first_row, unsigned int last_row) { return get_slice_view(source, target, first_row, last_row, 0, source->size[1]); } extern int get_col_slice_copy(cudanetmat* source, cudanetmat* target, unsigned int first_col, unsigned int last_col) { if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (last_col > source->size[1] || (first_col >= last_col)) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->sliceCols(first_col, last_col, *(target->data_device)); target->on_device = 1; target->on_host = 0; target->size[0] = source->size[0]; target->size[1] = last_col - first_col; target->is_trans = 0; target->owns_data = 1; return 0; } extern int get_row_slice_copy(cudanetmat* source, cudanetmat* target, unsigned int first_row, unsigned int last_row) { if (source->is_trans) return ERROR_TRANSPOSED; if (!source->on_device) return ERROR_NOT_ON_DEVICE; if (last_row > source->size[0] || (first_row >= last_row)) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->sliceRows(first_row, last_row, *(target->data_device)); target->on_device = 1; target->on_host = 0; target->size[1] = source->size[1]; target->size[0] = last_row - first_row; target->is_trans = 0; target->owns_data = 1; return 0; } extern int add_mult(cudanetmat* mat1, cudanetmat* mat2, float alpha, float beta) { if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->add(*(mat2->data_device), alpha, beta); return 0; } extern int set_col_slice(cudanetmat* source, cudanetmat* target, unsigned int start, unsigned int end) { int height = target->size[0]; int width = target->size[1]; if ((end - start) != source->size[1] || source->size[0] != height || start >= end || end > width) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->copy(*(target->data_device), 0, source->size[0], 0, source->size[1], 0, start); return 0; } extern int set_row_slice(cudanetmat* source, cudanetmat* target, unsigned int start, unsigned int end) { int height = target->size[0]; int width = target->size[1]; if ((end - start) != source->size[0] || source->size[1] != width || start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->copy(*(target->data_device), 0, source->size[0], 0, source->size[1], start, 0); return 0; } extern int assign_col_slice(cudanetmat* source, unsigned int start, unsigned int end, float val) { int height = source->size[0]; int width = source->size[1]; if (start >= end || end > width) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->assignSlice(0, height, start, end, val); return 0; } extern int assign_row_slice(cudanetmat* source, unsigned int start, unsigned int end, float val) { int height = source->size[0]; int width = source->size[1]; if (start >= end || end > height) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->assignSlice(start, end, 0, width, val); return 0; } extern int apply_pow_matrix(cudanetmat* mat, cudanetmat* pow, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->size[0] != pow->size[0] || mat->size[1] != pow->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->applyBinary(NVMatrixBinaryOps::Power(), *(pow->data_device), *(target->data_device)); return 0; } extern int print_devmat(cudanetmat* mat) { if (!mat->on_device) return ERROR_NOT_ON_DEVICE; mat->data_device->print(0, mat->data_device->getNumRows(), 0, mat->data_device->getNumCols()); printf("stride: %d ld: %d, fd:%d\n", mat->data_device->getStride(), mat->data_device->getLeadingDim(), mat->data_device->getFollowingDim()); return 0; } // extern int apply_pow_matrix(cudanetmat* mat, cudanetmat* pow, cudanetmat* target) { // int errcheck = elementwise_check2(mat, target); // mat->data_device->func(*(target->data_device)); // } extern int reciprocal(cudanetmat* mat, cudanetmat* target) { int errcheck = elementwise_check2(mat, target); if (errcheck !=0) return errcheck; mat->data_device->apply(NVMatrixOps::Reciprocal(), *(target->data_device)); return 0; } extern int free_device_memory(cudanetmat* mat) { if (mat->owns_data && mat->on_device) { delete mat->data_device; mat->data_device = NULL; mat->on_device = 0; } return 0; } extern float euclid_norm(cudanetmat* mat, int* err_code) { if (!mat->on_device) { *err_code = ERROR_NOT_ON_DEVICE; return -1.; } float res = mat->data_device->norm(); *err_code = 0; return res; } extern float manhattan_norm(cudanetmat* mat, int* err_code) { if (!mat->on_device) { *err_code = ERROR_NOT_ON_DEVICE; return -1.; } float res = mat->data_device->sumabs(); *err_code = 0; return res; } extern int less_than(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->applyBinary(NVMatrixBinaryOps::SmallerThan(), *(mat2->data_device), *(target->data_device)); return 0; } extern int less_than_scalar(cudanetmat* mat, float val, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->apply(NVMatrixOps::SmallerThanScalar(val), *(target->data_device)); return 0; } extern int greater_than(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->applyBinary(NVMatrixBinaryOps::BiggerThan(), *(mat2->data_device), *(target->data_device)); return 0; } extern int greater_than_scalar(cudanetmat* mat, float val, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->apply(NVMatrixOps::BiggerThanScalar(val), *(target->data_device)); return 0; } extern int equals(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->applyBinary(NVMatrixBinaryOps::Equals(), *(mat2->data_device), *(target->data_device)); return 0; } extern int equals_scalar(cudanetmat* mat, float val, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->apply(NVMatrixOps::EqualsScalar(val), *(target->data_device)); return 0; } extern int where(cudanetmat* condition_mat, cudanetmat* if_mat, cudanetmat* else_mat, cudanetmat* target) { if (!condition_mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (condition_mat->size[0] != target->size[0] || condition_mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (condition_mat->size[0] != if_mat->size[0] || condition_mat->size[1] != if_mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (condition_mat->size[0] != else_mat->size[0] || condition_mat->size[1] != else_mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; condition_mat->data_device->applyTernary(NVMatrixTernaryOps::Where(), *(if_mat->data_device), *(else_mat->data_device), *(target->data_device)); return 0; } extern int minimum(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->applyBinary(NVMatrixBinaryOps::Minimum(), *(mat2->data_device), *(target->data_device)); return 0; } extern int minimum_scalar(cudanetmat* mat, float val, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->apply(NVMatrixOps::MinWithScalar(val), *(target->data_device)); return 0; } extern int maximum(cudanetmat* mat1, cudanetmat* mat2, cudanetmat* target) { if (!mat1->on_device || !mat2->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat1->is_trans != mat2->is_trans) return ERROR_TRANSPOSEDNESS; if (mat1->size[0] != mat2->size[0] || mat1->size[1] != mat2->size[1] || mat1->size[0] != target->size[0] || mat1->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat1->data_device->applyBinary(NVMatrixBinaryOps::Maximum(), *(mat2->data_device), *(target->data_device)); return 0; } extern int maximum_scalar(cudanetmat* mat, float val, cudanetmat* target) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans != target->is_trans) return ERROR_TRANSPOSEDNESS; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->apply(NVMatrixOps::MaxWithScalar(val), *(target->data_device)); return 0; } extern int reshape(cudanetmat* mat, unsigned int m, unsigned int n) { if (mat->size[0] * mat->size[1] != m * n) return ERROR_INCOMPATIBLE_DIMENSIONS; if (mat->on_device) mat->data_device->resize(m,n); mat->size[0] = m; mat->size[1] = n; return 0; } extern int add_col_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->addVector(*(vec->data_device), *(target->data_device)); return 0; } extern int add_col_mult(cudanetmat* mat, cudanetmat* vec, cudanetmat* target, float mult) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->addVector(*(vec->data_device), mult, *(target->data_device)); return 0; } extern int add_row_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->addVector(*(vec->data_device), *(target->data_device)); return 0; } extern int mult_by_col_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->eltwiseMultByVector(*(vec->data_device), *(target->data_device)); return 0; } extern int mult_by_row_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->eltwiseMultByVector(*(vec->data_device), *(target->data_device)); return 0; } extern int divide_by_col_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != vec->size[0] || vec->size[1] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->eltwiseDivideByVector(*(vec->data_device), *(target->data_device)); return 0; } extern int divide_by_row_vec(cudanetmat* mat, cudanetmat* vec, cudanetmat* target) { if (!mat->on_device || !vec->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[1] != vec->size[1] || vec->size[0] != 1 || mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->eltwiseDivideByVector(*(vec->data_device), *(target->data_device)); return 0; } extern int max_by_axis(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; target->data_device->assign(mat->data_device->max()); } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->max(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->max(1, *(target->data_device)); } return 0; } extern int min_by_axis(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; target->data_device->assign(mat->data_device->min()); } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->min(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->min(1, *(target->data_device)); } return 0; } extern int sum(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; target->data_device->assign(mat->data_device->sum()); } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->sum(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->sum(1, *(target->data_device)); } return 0; } extern int sumsq(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; NVMatrix tmp; mat->data_device->sumOfSquares(0, tmp); target->data_device->assign(tmp.sum()); } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->sumOfSquares(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->sumOfSquares(1, *(target->data_device)); } return 0; } extern int mean(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; target->data_device->assign(mat->data_device->mean()); } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->mean(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->mean(1, *(target->data_device)); } return 0; } extern int var(cudanetmat* mat, cudanetmat* mean, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == -1) { if (target->size[0] != 1 || target->size[1] != 1) return ERROR_INCOMPATIBLE_DIMENSIONS; return ERROR_UNSUPPORTED; } else if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->var(0, *(mean->data_device), *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->var(1, *(mean->data_device), *(target->data_device)); } return 0; } extern int mean_norm(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (mat->size[0] != target->size[0] || mat->size[1] != target->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; if (axis == -1) { float mval = mat->data_device->mean(); mat->data_device->addScalar(-mval, *(target->data_device)); } else if (axis == 0 || axis == 1) { NVMatrix mvals; mat->data_device->mean(axis, mvals); mat->data_device->addVector(mvals, -1.0, *(target->data_device)); } else { return ERROR_UNSUPPORTED; } return 0; } extern int argmax_by_axis(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->argmax(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->argmax(1, *(target->data_device)); } return 0; } extern int argmin_by_axis(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans) return ERROR_TRANSPOSED; if (axis == 0) { if (target->size[0] != 1 || target->size[1] != mat->size[1]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->argmin(0, *(target->data_device)); } else { if (target->size[1] != 1 || target->size[0] != mat->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; mat->data_device->argmin(1, *(target->data_device)); } return 0; } extern int copy_transpose(cudanetmat* source, cudanetmat* target) { if (source->size[0] != target->size[1] || source->size[1] != target->size[0]) return ERROR_INCOMPATIBLE_DIMENSIONS; source->data_device->transpose(*(target->data_device)); return 0; } extern int xcov(cudanetmat* X, cudanetmat* Y, cudanetmat* covMat, int normX, int normY, float normAll) { if (!X->on_device || !Y->on_device || !covMat->on_device) return ERROR_NOT_ON_DEVICE; if (X->is_trans || Y->is_trans || covMat->is_trans) return ERROR_TRANSPOSED; if (get_nonleading_dimension(Y) != get_nonleading_dimension(X) || get_leading_dimension(X) != get_leading_dimension(covMat) || get_leading_dimension(Y) != get_nonleading_dimension(covMat)) { return ERROR_INCOMPATIBLE_DIMENSIONS; } // Mean normalize each input matrix along major axis (for _cudanet, this is along 1) matrices are K x N // Xmean and Ymean are K-dim row vectors NVMatrix Xmean, Ymean; X->data_device->mean(1, Xmean); Y->data_device->mean(1, Ymean); // Now normalize in each NVMatrix Xnorm, Ynorm; X->data_device->addVector(Xmean, -1*normX, Xnorm); Y->data_device->addVector(Ymean, -1*normY, Ynorm); // Now calc the norm into covMat covMat->data_device->addProductRM(Xnorm, Ynorm, 0, 1/normAll, 0 /* trans of X */, 1 /* non-trans of Y*/); return 0; } extern unsigned long int get_gpu_pointer(cudanetmat* source) { return (unsigned long int) source->data_device->getDevData(); } extern PyObject* get_gpu_pythonbuf(cudanetmat* source) { PyObject* py_buf = PyBuffer_FromReadWriteMemory((void *) (source->data_device->getDevData()), source->data_device->getNumElements() * sizeof(float)); Py_INCREF(py_buf); return py_buf; } extern int multi_ranked_error(cudanetmat* probs, cudanetmat* labels, cudanetmat *labellogprob, cudanetmat* top1probs, cudanetmat* topkprobs, int topk) { NVMatrix _maxProbs; probs->data_device->max(0, _maxProbs); computeMultiSoftmaxCost(*(labels->data_device), *(probs->data_device), _maxProbs, *(labellogprob->data_device), *(top1probs->data_device), *(topkprobs->data_device), topk); return 0; } // If axis == 0, then mat is K x N where K is number of outputs, N is number of examples // If axis == 1, then mat is N x K where K is number of outputs, N is number of examples // Cudanet convention is axis = 0, so extern int softmax(cudanetmat* mat, cudanetmat* target, int axis) { if (!mat->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (mat->is_trans || target->is_trans) return ERROR_TRANSPOSED; NVMatrix _max, _sum; NVMatrix& input = *(mat->data_device); NVMatrix& tgt = *(target->data_device); input.max(axis, _max); input.addVector(_max, -1, tgt); tgt.apply(NVMatrixOps::Exp()); tgt.sum(axis, _sum); tgt.eltwiseDivideByVector(_sum); return 0; } // acts, actsGrad, and target are all numOut x BatchSize extern int softmax_grad(cudanetmat* acts, cudanetmat* actsGrad, cudanetmat* target) { if (!acts->on_device || !actsGrad->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (acts->is_trans || actsGrad->is_trans || target->is_trans) return ERROR_TRANSPOSED; int errcheck = elementwise_check3(acts, actsGrad, target); if (errcheck !=0) return errcheck; acts->data_device->transpose(true); actsGrad->data_device->transpose(true); target->data_device->transpose(true); //Change assertion in computeSoftmaxgrad to just ensure that acts and actsGrad are same computeSoftmaxGrad(*(acts->data_device), *(actsGrad->data_device), *(target->data_device), 0, 1); acts->data_device->transpose(false); actsGrad->data_device->transpose(false); target->data_device->transpose(false); return 0; } // labels and outputs are numOut x BatchSize, target is 1 x BatchSize extern int crossent_cost(cudanetmat* labels, cudanetmat* outputs, cudanetmat* target) { if (!labels->on_device || !outputs->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (labels->is_trans || outputs->is_trans || target->is_trans) return ERROR_TRANSPOSED; int errcheck = elementwise_check2(labels, outputs); if (errcheck !=0) return errcheck; NVMatrix correctProbs_out; // This gets resized in cost call computeCrossEntCost(*(labels->data_device), *(outputs->data_device), *(target->data_device), correctProbs_out); return 0; } extern int crossent_cost_grad(cudanetmat* labels, cudanetmat* outputs, cudanetmat* target) { if (!labels->on_device || !outputs->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (labels->is_trans || outputs->is_trans || target->is_trans) return ERROR_TRANSPOSED; int errcheck = elementwise_check2(labels, outputs); if (errcheck !=0) return errcheck; computeCrossEntGrad(*(labels->data_device), *(outputs->data_device), *(target->data_device), 0, 1); return 0; } extern int weight_norm_along_axis(cudanetmat* weights, cudanetmat* target, int axis, float norm) { // checks if the l2 norm of weights along axis is greater than norm -- if so, scale so l2norm(weights) is norm if (!weights->on_device || !target->on_device) return ERROR_NOT_ON_DEVICE; if (weights->is_trans || target->is_trans) return ERROR_TRANSPOSED; if (axis!=0 && axis!=1) return ERROR_UNSUPPORTED; NVMatrix normVect; weights->data_device->sumOfSquares(axis, normVect); normVect.apply(MaxWeightConstraintOperator(norm)); weights->data_device->eltwiseMultByVector(normVect, *(target->data_device)); return 0; } extern PyObject *test_make_tuple(int nval) { PyObject *t; t = Py_BuildValue("(iis)", nval, nval, "three"); return t; } // These are still to do // Weight column norm // softmax grad // cross entropy multi-class cost // }
341fc4ac02f57839761f611bdb379c146bd8f712.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<stdlib.h> #include<math.h> #include<cudpp.h> #include<limits.h> #include <sys/time.h> #define NO_OF_THREADS_PER_BLOCK 1024 float f; unsigned int noOfEdges; unsigned int noOfVertices; unsigned int *vertices; unsigned int *edges; unsigned int *weights; unsigned int *d_size; unsigned int *d_edgeListSize; unsigned int *d_vertexListSize; unsigned int *segmentedMinScanInput; unsigned int *d_segmentedMinScanInput; unsigned int *d_segmentedMinScanOutput; unsigned int *d_previousIDs; unsigned int *d_successorArray; unsigned int *d_successorArrayTemp; unsigned int *d_indices; unsigned int *d_edgeMap; unsigned int *d_edgeMapCopy; unsigned int *d_edgesCopy; unsigned int *d_edgeIndices; unsigned int *d_superVertexID; unsigned int *d_superEdgeId; unsigned int *d_MSTOutput; unsigned int *h_MSTOutput; unsigned int *d_edges; unsigned int *d_vertices; unsigned int *d_weights; unsigned int *d_edgeFlagArray; unsigned int *d_vertexFlagArray; unsigned int noOfEdgesOriginal; unsigned int noOfVerticesOriginal; int *d_pickArray; CUDPPHandle theCudpp; CUDPPHandle segmentedScanPlan_min; CUDPPConfiguration segmented_min_scan_config; CUDPPHandle scanPlan; CUDPPConfiguration scan_config; CUDPPHandle sortPlan; CUDPPConfiguration config_sort; /* Append vertexid and edge into a single integer of an array*/ __global__ void mergeEdgeAndWeight(unsigned int *d_segmentedMinScanInput, unsigned int *d_vertices, unsigned int *d_weight, unsigned int *d_edges, unsigned int noOfEdges) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges) { unsigned int temp = d_weight[index]; d_segmentedMinScanInput[index] = (temp<<22) | d_edges[index]; } } /* initialise all entries of array pointed by d_array of given size to 0*/ __global__ void initArray(unsigned int *d_Array, unsigned int size) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < size) { d_Array[index] = 0; } } __global__ void initArray1(unsigned int *d_Array, unsigned int size, int t) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < size && index >= t) d_Array[index] = 0; } __global__ void printArr(unsigned int *d_arr, unsigned int size) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if (index < size) { printf("%d ", d_arr[index]); } printf("\n"); } /* creates a flag array for segmented scan. Sets to 1 the index from where outgoing vertex starts*/ __global__ void markSegment(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int *d_edges, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { d_edgeFlagArray[d_vertex[index]] = 1; } } /*prints new edge and vertex size*/ __global__ void print(unsigned int *d_edgeListSize, unsigned int *d_vertexListSize) { printf("Edges: %d, Vertices %d \n", *d_edgeListSize, *d_vertexListSize); } /*creates successor array*/ __global__ void createSuccArray(unsigned int *d_successorArray, unsigned int *d_vertices, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int noOfVertices, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; unsigned int minEdgeIndex; if(index < noOfVertices) { //index is same as vertex ID if (index == noOfVertices-1) minEdgeIndex = noOfEdges - 1; else minEdgeIndex = d_vertices[index+1] - 1; // min value is stored in loc of last neighbour unsigned int val = d_segmentedMinScanOutput[minEdgeIndex]; //unsigned int minWeight = val >> 22; unsigned int minVertex = val & (unsigned int)(pow(2.0,22)-1); d_successorArray[index] = minVertex; } } /*removes cycles from successor array*/ __global__ void eliminateCycles(unsigned int *d_successor, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { unsigned int succIndex = d_successor[d_successor[index]]; if(index == succIndex) { if(index < d_successor[index]) { d_successor[index] = index; } else { d_successor[d_successor[index]]= d_successor[index]; } } } } /* hybrid implementation of markSegment function */ __global__ void markSegment1(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices && index > 0) { d_edgeFlagArray[d_vertex[index]] = 1; } } /*This function is to determine which edges are actually needed*/ __global__ void populatePArray(int *d_pickArray, unsigned int *d_vertices, unsigned int *d_successor, unsigned int *d_preIDs, unsigned int noOfVertices, unsigned int noOfEdges) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges) { if(d_preIDs[index] != d_successor[d_preIDs[index]]) { if(d_preIDs[index] < (noOfVertices - 1)) d_pickArray[index] = d_vertices[d_preIDs[index]+1] - 1; else d_pickArray[index] = noOfEdges - 1; } else d_pickArray[index] = -1; } } /*This function determines which edges will be part of output*/ __global__ void AppendOutputEdges(int *d_pickArray, unsigned int * d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_MSTOutput, unsigned int *d_edgeMap, unsigned int noOfEdges) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges && d_pickArray[index] >= 0) { unsigned int edgeid = d_edgeMap[index]; unsigned int prev = 0; int temp = -1; unsigned int segmentedOutput = d_segmentedMinScanOutput[d_pickArray[index]]; unsigned int currIndex = d_segmentedMinScanOutput[index]; if(index > 0) { temp = d_pickArray[index-1]; prev = d_segmentedMinScanOutput[index-1]; } if(d_pickArray[index] != temp) { if(currIndex == segmentedOutput) { d_MSTOutput[edgeid]=1; } } else { if(currIndex != prev && currIndex == segmentedOutput) { d_MSTOutput[edgeid]=1; } } } } /*This function sets each value of array equal to its index*/ __global__ void setIndices(unsigned int *d_arr,unsigned int size) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < size) d_arr[index] = index; } /* This function copies data from original successorArray so that it can be used for new computation*/ __global__ void setIndices1(unsigned int *d_arr,unsigned int size, int l) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if (index < size && index >= l) d_arr[index] = index; } __global__ void makeTempSuccCopy(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { unsigned int t = d_successorArray[index]; d_successorArrayTemp[index] = t; } } /* This function copies data from temporary successorArray so that it can be updated with correct value */ __global__ void updateSuccArray(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { unsigned int t = d_successorArrayTemp[index]; d_successorArray[index] = t; } } /* This function uses pointer doubling to assign representative id to each vertex*/ __global__ void propagateRepVertexID(unsigned int *d_successorArray, bool *d_isSuccUpdated, unsigned int *d_previousIDs, unsigned int *d_successorArrayTemp, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { unsigned int successor = d_successorArray[index]; if(successor != d_successorArray[successor]) { //Eindex = 2 and end = 6 and u = 2 and succ[u] = 2 *d_isSuccUpdated=true; d_successorArrayTemp[index] = d_successorArray[successor]; } } } /* This function iteratively sets s(s(u)) = u and propogates representative vertex id*/ void propagateID(unsigned int noOfBlocks_vertices, unsigned int noOfThreads_vertices) { bool succchange; bool *d_isSuccUpdated; hipMalloc(&d_successorArrayTemp, sizeof(int)*noOfVertices); hipMalloc((void**)&d_isSuccUpdated, sizeof(bool)); do { succchange=false; hipMemcpy(d_isSuccUpdated, &succchange, sizeof(bool), hipMemcpyHostToDevice); hipLaunchKernelGGL(( makeTempSuccCopy), dim3(noOfBlocks_vertices),dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices); hipLaunchKernelGGL(( propagateRepVertexID), dim3(noOfBlocks_vertices),dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_isSuccUpdated, d_previousIDs,d_successorArrayTemp, noOfVertices); hipLaunchKernelGGL(( updateSuccArray), dim3(noOfBlocks_vertices),dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices); hipMemcpy(&succchange, d_isSuccUpdated, sizeof(bool), hipMemcpyDeviceToHost); }while(succchange); hipFree(d_successorArrayTemp); hipFree(d_isSuccUpdated); } /*This function creates scan flag*/ void __global__ createScanFlag(unsigned int *d_vertexFlagArray, unsigned int *d_successorArray, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices && index > 0) { unsigned int prev_val = d_successorArray[index-1]; unsigned int curr_val = d_successorArray[index]; if (prev_val != curr_val) { d_vertexFlagArray[index] = 1; } } } /*This function assigns supervertex id to each vertex*/ __global__ void assignSuperVertexID(unsigned int *d_superVertex, unsigned int *d_indices, unsigned int *d_vertexFlagArray,unsigned int *d_previousIDs,unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { d_vertexFlagArray[d_indices[index]] = d_superVertex[index]; } } /* This function updates supervertexid */ __global__ void updateSuperVertexID(unsigned int *d_superVertex,unsigned int *d_arr,unsigned int *d_vertexFlagArray, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { unsigned int newId = d_vertexFlagArray[index]; d_superVertex[index] = newId; } } /* This function removes self edges after successor array is created */ __global__ void removeSelfEdges(unsigned int *d_edges, unsigned int *d_prevIds,unsigned int *d_superVertexID, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges) { unsigned int uid = d_superVertexID[d_prevIds[index]]; //vause d_prevIds[index] is 1 to 6 but we need 0 to 5 unsigned int vid = d_superVertexID[d_edges[index]]; if(uid == vid) { d_edges[index]=INT_MAX; } } } /* This function is to assign new super edge id*/ __global__ void assignSuperEdgeId(unsigned int *d_superEdgeId, unsigned int *d_previousIds, unsigned int *d_superVertexId, unsigned int *d_edge, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges) { unsigned int x = d_previousIds[index]; unsigned int id = INT_MAX; if (x != INT_MAX && d_edge[index] != INT_MAX) { id = d_superVertexId[x]; } d_superEdgeId[index] = id; } } /* This function is to compress the edge list*/ __global__ void edgeCompression(unsigned int *d_edges, unsigned int *d_weights, unsigned int *d_vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_superVertexID, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeFlagArray, unsigned int *d_superEdgeId, unsigned int * d_edgeIndices, int *d_pickArray, unsigned int *d_size, unsigned int *d_edgeListSize, unsigned int *d_vertexListSize) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < *d_size) { unsigned int id = d_edgeIndices[index]; if(d_superEdgeId[index] != INT_MAX && d_edges[id] != INT_MAX) { if(index == *d_size-1) { *d_edgeListSize = index + 1; *d_vertexListSize = d_superEdgeId[index] + 1; } d_segmentedMinScanOutput[index] = d_weights[id]; d_segmentedMinScanInput[index] = d_superVertexID[d_edges[id]]; d_pickArray[index] = d_superEdgeId[index]; d_edgeMapCopy[index] = d_edgeMap[id]; } } } /*This function copies the temporary array to arrays which will be actually used*/ __global__ void copyArrays(unsigned int *d_edges, unsigned int *d_weights, unsigned int *vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeCopy, unsigned int *d_size) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < *d_size) { unsigned int p = d_segmentedMinScanInput[index]; d_edges[index] = p; unsigned int wt = d_segmentedMinScanOutput[index]; d_weights[index] = wt; unsigned int mapVal = d_edgeMapCopy[index]; d_edgeMap[index] = mapVal; } } /*This function determines the new edge list*/ __global__ void makeEdgeList(unsigned int *d_edgeFlagArray, unsigned int *d_edges, unsigned int *d_superEdgeId, unsigned int *d_size, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index == 0) { d_edgeFlagArray[index] = 1; } else if(index < noOfEdges && index > 0) { if(d_superEdgeId[index-1] != INT_MAX && d_superEdgeId[index] == INT_MAX) { *d_size = index; } if(d_superEdgeId[index] > d_superEdgeId[index-1]) { d_edgeFlagArray[index] = 1; } } } /*This function helps in creating new vertices list for next iteration*/ __global__ void CreateVertexListFlag(unsigned int *d_edgeFlagArray, unsigned int *d_vertices, int *d_pickArray, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index == 0) { d_edgeFlagArray[index] = 1; } else if(index < noOfEdges && index > 0) { if(d_pickArray[index] > d_pickArray[index-1]) { d_edgeFlagArray[index] = 1; } } } /*This function helps to build new vertex list*/ __global__ void BuildVertexList(unsigned int *d_vertices, unsigned int *d_edges, int *d_pickArray, unsigned int *d_edgeFlagArray, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges && d_edgeFlagArray[index] == 1) { d_vertices[d_pickArray[index]] = index; } } /* Parse the input file to setup our graph * we set the relevant arrays here */ void parseInputFile(char *fileName) { unsigned int x,temp; unsigned int edgeNo, weightOfEdge; FILE *fp; fp = fopen(fileName,"r"); printf("\n Parsing Input File: \n"); fscanf(fp,"%d",&noOfVertices); vertices = (unsigned int *)malloc(sizeof(unsigned int) * noOfVertices); int i; for (i=0; i<noOfVertices; i++) { fscanf(fp,"%d %d",&x, &temp); vertices[i] = x; } fscanf(fp,"%d",&temp); fscanf(fp,"%d",&noOfEdges); edges = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges); weights = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges); for(i=0; i<noOfEdges; i++) { fscanf(fp,"%d %d",&edgeNo, &weightOfEdge); edges[i] = edgeNo; weights[i] = weightOfEdge; } printf("No. of Vertices in Input: %d\n",noOfVertices); printf("No. of Edges in Input: %d\n", noOfEdges); fclose(fp); } /* this is to setup configuration parameters for various primitives*/ void setupPlan() { cudppCreate(&theCudpp); scan_config.algorithm = CUDPP_SCAN; scan_config.op = CUDPP_ADD; scan_config.datatype = CUDPP_UINT; scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; segmented_min_scan_config.algorithm = CUDPP_SEGMENTED_SCAN; segmented_min_scan_config.op = CUDPP_MIN; segmented_min_scan_config.datatype = CUDPP_UINT; segmented_min_scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; config_sort.algorithm = CUDPP_SORT_RADIX; config_sort.datatype = CUDPP_UINT; config_sort.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_KEY_VALUE_PAIRS; f = 0.05; } /* Dynamically allocate necessary arrays*/ void mallocArr() { hipMalloc(&d_segmentedMinScanInput, sizeof(unsigned int )*noOfEdges); hipMalloc(&d_weights, sizeof(unsigned int )*noOfEdges); hipMalloc(&d_edges, sizeof(unsigned int )*noOfEdges); hipMalloc(&d_vertices, sizeof(unsigned int )*noOfVertices); hipMalloc(&d_edgeFlagArray, sizeof(unsigned int )*noOfEdges); hipMalloc(&d_segmentedMinScanOutput, sizeof(unsigned int )*noOfEdges); hipMalloc(&d_successorArray, sizeof(unsigned int )*noOfVertices); hipMalloc(&d_previousIDs, sizeof(unsigned int )*noOfEdges); hipMalloc(&d_pickArray, sizeof(int )*noOfEdges); hipMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices); hipMalloc(&d_MSTOutput, sizeof(unsigned int )*noOfEdges); hipMalloc(&d_indices, sizeof(unsigned int )*noOfVertices); hipMalloc(&d_vertexFlagArray, sizeof(unsigned int )*noOfVertices); hipMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices); hipMalloc(&d_size, sizeof(unsigned int )); hipMalloc(&d_superEdgeId, sizeof(unsigned int )*noOfEdges); hipMalloc(&d_edgeIndices, sizeof(unsigned int )*noOfEdges); hipMalloc(&d_edgeListSize, sizeof(unsigned int )); hipMalloc(&d_vertexListSize, sizeof(unsigned int )); hipMalloc(&d_edgeMapCopy, sizeof(unsigned int )*noOfEdges); hipMalloc(&d_edgeMap, sizeof(unsigned int )*noOfEdges); h_MSTOutput = (unsigned int *)malloc(sizeof(unsigned int )*noOfEdges); } /*Free the dynamically allocated memory. Do other cleanup here*/ void cleanUp() { hipFree(d_edgeIndices); hipFree(d_superEdgeId); hipFree(d_edgeMap); hipFree(d_edgeMapCopy); hipFree(d_superVertexID); hipFree(d_vertexFlagArray); hipFree(d_indices); hipFree(d_MSTOutput); hipFree(d_previousIDs); hipFree(d_pickArray); hipFree(d_successorArray); hipFree(d_segmentedMinScanOutput); hipFree(d_edgeFlagArray); hipFree(d_vertices); hipFree(d_edges); hipFree(d_weights); hipFree(d_segmentedMinScanInput); hipFree(d_size); hipFree(d_edgeListSize); hipFree(d_vertexListSize); cudppDestroy(theCudpp); free(h_MSTOutput); free(edges); free(vertices); free(weights); } /* Do basic initialization*/ void initialize() { unsigned int i; hipMemcpy(d_vertices, vertices, sizeof(unsigned int)*noOfVertices, hipMemcpyHostToDevice); hipMemcpy(d_edges, edges, sizeof(unsigned int)*noOfEdges, hipMemcpyHostToDevice); hipMemcpy(d_weights, weights, sizeof(unsigned int)*noOfEdges, hipMemcpyHostToDevice); unsigned int *temp = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges); for(i=0; i<noOfEdges; i++) temp[i] = 0; hipMemcpy(d_MSTOutput, temp, sizeof(unsigned int )*noOfEdges, hipMemcpyHostToDevice); for(i=0; i<noOfEdges; i++) temp[i]=i; hipMemcpy(d_edgeMap, temp, sizeof(unsigned int)*noOfEdges, hipMemcpyHostToDevice); free(temp); } /* Helper function to determine no of threads to be used */ unsigned int getNoOfThreads(unsigned int size) { unsigned int threadsPerBlock; if (size <= 1024) threadsPerBlock = size; else threadsPerBlock = 1024; return threadsPerBlock; } void boruvka() { int t; unsigned int noOfThreads_edge = getNoOfThreads(noOfEdges); unsigned int noOfBlocks_edge = (noOfEdges+1024)/noOfThreads_edge; unsigned int noOfThreads_vertices = getNoOfThreads(noOfVertices); unsigned int noOfBlocks_vertices = (noOfVertices+1024)/noOfThreads_vertices; hipError_t error; hipLaunchKernelGGL(( mergeEdgeAndWeight), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanInput, d_vertices, d_weights, d_edges, noOfEdges); error = hipGetLastError(); if(error != hipSuccess) { printf("0.1 CUDA error: %s\n", hipGetErrorString(error)); exit(-1); } t = noOfEdges * f; if (noOfEdges >= 200) { unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int)); hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges,t); int i; for(i = 0; i<t;i++) temp_h_efa[i] = 0; hipDeviceSynchronize(); hipMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, hipMemcpyHostToDevice); free(temp_h_efa); } else hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges); hipDeviceSynchronize(); error = hipGetLastError(); if(error != hipSuccess) { printf("At line 577 CUDA error: %s\n", hipGetErrorString(error)); exit(-1); } hipLaunchKernelGGL(( markSegment), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_edgeFlagArray, d_vertices, d_edges, noOfVertices); error = hipGetLastError(); if(error != hipSuccess) { printf("3 CUDA error: %s\n", hipGetErrorString(error)); exit(-1); } cudppPlan(theCudpp, &segmentedScanPlan_min,segmented_min_scan_config, noOfEdges, 1, 0 ); //Make the segmented min scan plan cudppSegmentedScan(segmentedScanPlan_min, d_segmentedMinScanOutput, d_segmentedMinScanInput, (const unsigned int *)d_edgeFlagArray, noOfEdges); cudppDestroyPlan(segmentedScanPlan_min); error = hipGetLastError(); if(error != hipSuccess) { printf("CUDA error: %s\n", hipGetErrorString(error)); // exit(-1); } hipLaunchKernelGGL(( createSuccArray), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_successorArray, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, noOfVertices, noOfEdges); hipLaunchKernelGGL(( eliminateCycles), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_successorArray, noOfVertices); t = noOfEdges * f; if (noOfEdges >= 200) { unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int)); hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges,t); int i; for(i = 0; i<t;i++) temp_h_efa[i] = 0; hipDeviceSynchronize(); hipMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, hipMemcpyHostToDevice); free(temp_h_efa); } else hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges); hipDeviceSynchronize(); hipLaunchKernelGGL(( markSegment1), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_edgeFlagArray, d_vertices, noOfVertices); cudppPlan(theCudpp, &scanPlan, scan_config, noOfEdges, 1, 0); cudppScan(scanPlan, d_previousIDs, d_edgeFlagArray, noOfEdges); cudppDestroyPlan(scanPlan); error = hipGetLastError(); if(error != hipSuccess) { printf("At line 662 CUDA error: %s\n", hipGetErrorString(error)); } t = noOfEdges * f; if(noOfEdges >= 200) { unsigned int *temp_h_pa = (unsigned int *)malloc(t*sizeof(unsigned int)); hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_pickArray, noOfEdges, t); int i; for(i = 0; i<t;i++) temp_h_pa[i] = 0; hipDeviceSynchronize(); hipMemcpy(d_pickArray, temp_h_pa, sizeof(unsigned int )*t, hipMemcpyHostToDevice); free(temp_h_pa); } else hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_pickArray, noOfEdges); hipDeviceSynchronize(); hipLaunchKernelGGL(( populatePArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_pickArray, d_vertices, d_successorArray, d_previousIDs, noOfVertices, noOfEdges); hipLaunchKernelGGL(( AppendOutputEdges), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_pickArray, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_MSTOutput, d_edgeMap, noOfEdges); error = hipGetLastError(); if(error != hipSuccess) { printf("At line 691 CUDA error: %s\n", hipGetErrorString(error)); } propagateID(noOfBlocks_vertices, noOfThreads_vertices); t = noOfVertices*f; if(noOfVertices >= 20) { unsigned int *temp_h_setIndices = (unsigned int *)malloc(t*sizeof(unsigned int)); hipLaunchKernelGGL(( setIndices1), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_indices, noOfVertices, t); int i; for(i = 0; i<t;i++) temp_h_setIndices[i] = i; hipDeviceSynchronize(); hipMemcpy(d_indices, temp_h_setIndices, sizeof(unsigned int )*t, hipMemcpyHostToDevice); free(temp_h_setIndices); } else hipLaunchKernelGGL(( setIndices1), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_indices, noOfVertices, 0); hipDeviceSynchronize(); cudppPlan(theCudpp, &sortPlan, config_sort, noOfVertices, 1, 0); cudppRadixSort(sortPlan, d_successorArray, d_indices, noOfVertices); cudppDestroyPlan(sortPlan); hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_vertexFlagArray,noOfVertices); hipLaunchKernelGGL(( createScanFlag), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_vertexFlagArray, d_successorArray,noOfVertices); cudppPlan(theCudpp, &scanPlan, scan_config, noOfVertices, 1, 0); cudppScan(scanPlan, d_superVertexID, d_vertexFlagArray, noOfVertices); cudppDestroyPlan(scanPlan); error = hipGetLastError(); if(error != hipSuccess) { printf("At line 726 CUDA error: %s\n", hipGetErrorString(error)); } hipLaunchKernelGGL(( assignSuperVertexID), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_superVertexID,d_indices,d_vertexFlagArray,d_previousIDs,noOfVertices); hipLaunchKernelGGL(( updateSuperVertexID), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_superVertexID,d_indices,d_vertexFlagArray, noOfVertices); hipLaunchKernelGGL(( removeSelfEdges), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edges,d_previousIDs,d_superVertexID,noOfEdges); hipLaunchKernelGGL(( assignSuperEdgeId), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_superEdgeId,d_previousIDs, d_superVertexID, d_edges, noOfEdges); t = noOfEdges*f; //printf("noOfVertices = %d and point = %d\n",noOfVertices, t); if (noOfEdges >= 200) { unsigned int *temp_h_setIndices = (unsigned int *)malloc(t*sizeof(unsigned int)); hipLaunchKernelGGL(( setIndices1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeIndices, noOfEdges, t); int i; for(i = 0; i<t;i++) temp_h_setIndices[i] = i; hipDeviceSynchronize(); hipMemcpy(d_edgeIndices, temp_h_setIndices, sizeof(unsigned int )*t, hipMemcpyHostToDevice); free(temp_h_setIndices); } else hipLaunchKernelGGL(( setIndices1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeIndices,noOfEdges,0); hipDeviceSynchronize(); cudppPlan(theCudpp, &sortPlan, config_sort, noOfEdges, 1, 0); cudppRadixSort(sortPlan, d_superEdgeId, d_edgeIndices, noOfEdges); cudppDestroyPlan(sortPlan); t = noOfEdges * f; if (noOfEdges >= 200) { unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int)); hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_edgeFlagArray, noOfEdges, t); int i; for(i = 0; i<t;i++) temp_h_efa[i] = 0; hipDeviceSynchronize(); hipMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, hipMemcpyHostToDevice); free(temp_h_efa); } else hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray,noOfEdges); hipDeviceSynchronize(); unsigned int h_size = noOfEdges + 1; hipMemcpy(d_size,&h_size,sizeof(unsigned int ), hipMemcpyHostToDevice); hipLaunchKernelGGL(( makeEdgeList), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, d_edges, d_superEdgeId, d_size, noOfEdges); error = hipGetLastError(); if(error != hipSuccess) { printf("At line 779 CUDA error: %s\n", hipGetErrorString(error)); } unsigned int zero = 0; hipMemcpy(d_edgeListSize, &zero, sizeof(unsigned int ), hipMemcpyHostToDevice); hipMemcpy(d_vertexListSize, &zero, sizeof(unsigned int ), hipMemcpyHostToDevice); t = noOfEdges * f; if (noOfEdges >= 200) { unsigned int *temp_arr = (unsigned int *)malloc(t*sizeof(unsigned int)); hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanInput, noOfEdges,t); hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanOutput, noOfEdges, t); hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_pickArray, noOfEdges, t); hipLaunchKernelGGL(( initArray1), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeMapCopy, noOfEdges, t); int i; for(i = 0; i<t;i++) temp_arr[i] = 0; hipDeviceSynchronize(); hipMemcpy(d_segmentedMinScanInput, temp_arr, sizeof(unsigned int )*t, hipMemcpyHostToDevice); hipMemcpy(d_segmentedMinScanOutput, temp_arr, sizeof(unsigned int )*t, hipMemcpyHostToDevice); hipMemcpy(d_pickArray, temp_arr, sizeof(unsigned int )*t, hipMemcpyHostToDevice); hipMemcpy(d_edgeMapCopy, temp_arr, sizeof(unsigned int )*t, hipMemcpyHostToDevice); free(temp_arr); } else { hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanInput, noOfEdges); hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_segmentedMinScanOutput, noOfEdges); hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, (unsigned int*)d_pickArray, noOfEdges); hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeMapCopy, noOfEdges); } hipDeviceSynchronize(); hipMemcpy(&h_size,d_size,sizeof(unsigned int ), hipMemcpyDeviceToHost); unsigned int noOfThreads_new = getNoOfThreads(h_size); unsigned int noOfBlocks_new = (h_size+1024)/noOfThreads_new; hipLaunchKernelGGL(( edgeCompression), dim3(noOfBlocks_new), dim3(noOfThreads_new), 0, 0, d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_superVertexID, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_superEdgeId, d_edgeIndices, d_pickArray, d_size, d_edgeListSize, d_vertexListSize); error = hipGetLastError(); if(error != hipSuccess) { printf("At line 825 CUDA error: %s\n", hipGetErrorString(error)); } hipLaunchKernelGGL(( copyArrays), dim3(noOfBlocks_new), dim3(noOfThreads_new), 0, 0, d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_size); hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, noOfEdges); hipLaunchKernelGGL(( initArray), dim3(noOfBlocks_vertices), dim3(noOfThreads_vertices), 0, 0, d_vertices, noOfVertices); hipLaunchKernelGGL(( CreateVertexListFlag), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_edgeFlagArray, d_vertices, d_pickArray, noOfEdges); hipLaunchKernelGGL(( BuildVertexList), dim3(noOfBlocks_edge), dim3(noOfThreads_edge), 0, 0, d_vertices, d_edges, d_pickArray, d_edgeFlagArray, noOfEdges); error = hipGetLastError(); if(error != hipSuccess) { printf("after build vertex listlast CUDA error: %s\n", hipGetErrorString(error)); } hipMemcpy(&noOfEdges, d_edgeListSize, sizeof(unsigned int ), hipMemcpyDeviceToHost); hipMemcpy(&noOfVertices, d_vertexListSize, sizeof(unsigned int ), hipMemcpyDeviceToHost); printf("for next round, no of edges = %d and no of vertices = %d\n",noOfEdges, noOfVertices); error = hipGetLastError(); if(error != hipSuccess) { printf("last CUDA error: %s\n", hipGetErrorString(error)); } } int main (int argc, char** argv) { unsigned int noOfMSTEdges = 0; unsigned long long int finalMSTWeight = 0; unsigned int i; parseInputFile(argv[1]); noOfVerticesOriginal = noOfVertices; noOfEdgesOriginal = noOfEdges; mallocArr(); initialize(); setupPlan(); struct timeval tv1, tv2; gettimeofday(&tv1, NULL); do { boruvka(); }while(noOfVertices > 1); hipDeviceSynchronize(); gettimeofday(&tv2, NULL); printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec)); hipMemcpy(h_MSTOutput, d_MSTOutput, sizeof(unsigned int )*noOfEdgesOriginal, hipMemcpyDeviceToHost); for(i=0; i<noOfEdgesOriginal; i++) { if(h_MSTOutput[i] == 1) { //printf("%d %d\n", edges[i], weights[i]); finalMSTWeight += weights[i]; noOfMSTEdges++; } } printf("\nNo. of edges in MST [must be equal to (%d-1)]: %d\n", noOfVerticesOriginal, noOfMSTEdges); printf("Final Weight of resultant MST: %llu\n", finalMSTWeight); cleanUp(); return 0; }
341fc4ac02f57839761f611bdb379c146bd8f712.cu
#include<stdio.h> #include<stdlib.h> #include<math.h> #include<cudpp.h> #include<limits.h> #include <sys/time.h> #define NO_OF_THREADS_PER_BLOCK 1024 float f; unsigned int noOfEdges; unsigned int noOfVertices; unsigned int *vertices; unsigned int *edges; unsigned int *weights; unsigned int *d_size; unsigned int *d_edgeListSize; unsigned int *d_vertexListSize; unsigned int *segmentedMinScanInput; unsigned int *d_segmentedMinScanInput; unsigned int *d_segmentedMinScanOutput; unsigned int *d_previousIDs; unsigned int *d_successorArray; unsigned int *d_successorArrayTemp; unsigned int *d_indices; unsigned int *d_edgeMap; unsigned int *d_edgeMapCopy; unsigned int *d_edgesCopy; unsigned int *d_edgeIndices; unsigned int *d_superVertexID; unsigned int *d_superEdgeId; unsigned int *d_MSTOutput; unsigned int *h_MSTOutput; unsigned int *d_edges; unsigned int *d_vertices; unsigned int *d_weights; unsigned int *d_edgeFlagArray; unsigned int *d_vertexFlagArray; unsigned int noOfEdgesOriginal; unsigned int noOfVerticesOriginal; int *d_pickArray; CUDPPHandle theCudpp; CUDPPHandle segmentedScanPlan_min; CUDPPConfiguration segmented_min_scan_config; CUDPPHandle scanPlan; CUDPPConfiguration scan_config; CUDPPHandle sortPlan; CUDPPConfiguration config_sort; /* Append vertexid and edge into a single integer of an array*/ __global__ void mergeEdgeAndWeight(unsigned int *d_segmentedMinScanInput, unsigned int *d_vertices, unsigned int *d_weight, unsigned int *d_edges, unsigned int noOfEdges) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges) { unsigned int temp = d_weight[index]; d_segmentedMinScanInput[index] = (temp<<22) | d_edges[index]; } } /* initialise all entries of array pointed by d_array of given size to 0*/ __global__ void initArray(unsigned int *d_Array, unsigned int size) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < size) { d_Array[index] = 0; } } __global__ void initArray1(unsigned int *d_Array, unsigned int size, int t) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < size && index >= t) d_Array[index] = 0; } __global__ void printArr(unsigned int *d_arr, unsigned int size) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if (index < size) { printf("%d ", d_arr[index]); } printf("\n"); } /* creates a flag array for segmented scan. Sets to 1 the index from where outgoing vertex starts*/ __global__ void markSegment(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int *d_edges, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { d_edgeFlagArray[d_vertex[index]] = 1; } } /*prints new edge and vertex size*/ __global__ void print(unsigned int *d_edgeListSize, unsigned int *d_vertexListSize) { printf("Edges: %d, Vertices %d \n", *d_edgeListSize, *d_vertexListSize); } /*creates successor array*/ __global__ void createSuccArray(unsigned int *d_successorArray, unsigned int *d_vertices, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int noOfVertices, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; unsigned int minEdgeIndex; if(index < noOfVertices) { //index is same as vertex ID if (index == noOfVertices-1) minEdgeIndex = noOfEdges - 1; else minEdgeIndex = d_vertices[index+1] - 1; // min value is stored in loc of last neighbour unsigned int val = d_segmentedMinScanOutput[minEdgeIndex]; //unsigned int minWeight = val >> 22; unsigned int minVertex = val & (unsigned int)(pow(2.0,22)-1); d_successorArray[index] = minVertex; } } /*removes cycles from successor array*/ __global__ void eliminateCycles(unsigned int *d_successor, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { unsigned int succIndex = d_successor[d_successor[index]]; if(index == succIndex) { if(index < d_successor[index]) { d_successor[index] = index; } else { d_successor[d_successor[index]]= d_successor[index]; } } } } /* hybrid implementation of markSegment function */ __global__ void markSegment1(unsigned int *d_edgeFlagArray, unsigned int *d_vertex, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices && index > 0) { d_edgeFlagArray[d_vertex[index]] = 1; } } /*This function is to determine which edges are actually needed*/ __global__ void populatePArray(int *d_pickArray, unsigned int *d_vertices, unsigned int *d_successor, unsigned int *d_preIDs, unsigned int noOfVertices, unsigned int noOfEdges) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges) { if(d_preIDs[index] != d_successor[d_preIDs[index]]) { if(d_preIDs[index] < (noOfVertices - 1)) d_pickArray[index] = d_vertices[d_preIDs[index]+1] - 1; else d_pickArray[index] = noOfEdges - 1; } else d_pickArray[index] = -1; } } /*This function determines which edges will be part of output*/ __global__ void AppendOutputEdges(int *d_pickArray, unsigned int * d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_MSTOutput, unsigned int *d_edgeMap, unsigned int noOfEdges) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges && d_pickArray[index] >= 0) { unsigned int edgeid = d_edgeMap[index]; unsigned int prev = 0; int temp = -1; unsigned int segmentedOutput = d_segmentedMinScanOutput[d_pickArray[index]]; unsigned int currIndex = d_segmentedMinScanOutput[index]; if(index > 0) { temp = d_pickArray[index-1]; prev = d_segmentedMinScanOutput[index-1]; } if(d_pickArray[index] != temp) { if(currIndex == segmentedOutput) { d_MSTOutput[edgeid]=1; } } else { if(currIndex != prev && currIndex == segmentedOutput) { d_MSTOutput[edgeid]=1; } } } } /*This function sets each value of array equal to its index*/ __global__ void setIndices(unsigned int *d_arr,unsigned int size) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < size) d_arr[index] = index; } /* This function copies data from original successorArray so that it can be used for new computation*/ __global__ void setIndices1(unsigned int *d_arr,unsigned int size, int l) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if (index < size && index >= l) d_arr[index] = index; } __global__ void makeTempSuccCopy(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { unsigned int t = d_successorArray[index]; d_successorArrayTemp[index] = t; } } /* This function copies data from temporary successorArray so that it can be updated with correct value */ __global__ void updateSuccArray(unsigned int *d_successorArray, unsigned int* d_vertex, unsigned int *d_successorArrayTemp, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { unsigned int t = d_successorArrayTemp[index]; d_successorArray[index] = t; } } /* This function uses pointer doubling to assign representative id to each vertex*/ __global__ void propagateRepVertexID(unsigned int *d_successorArray, bool *d_isSuccUpdated, unsigned int *d_previousIDs, unsigned int *d_successorArrayTemp, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { unsigned int successor = d_successorArray[index]; if(successor != d_successorArray[successor]) { //Eindex = 2 and end = 6 and u = 2 and succ[u] = 2 *d_isSuccUpdated=true; d_successorArrayTemp[index] = d_successorArray[successor]; } } } /* This function iteratively sets s(s(u)) = u and propogates representative vertex id*/ void propagateID(unsigned int noOfBlocks_vertices, unsigned int noOfThreads_vertices) { bool succchange; bool *d_isSuccUpdated; cudaMalloc(&d_successorArrayTemp, sizeof(int)*noOfVertices); cudaMalloc((void**)&d_isSuccUpdated, sizeof(bool)); do { succchange=false; cudaMemcpy(d_isSuccUpdated, &succchange, sizeof(bool), cudaMemcpyHostToDevice); makeTempSuccCopy<<<noOfBlocks_vertices,noOfThreads_vertices>>>(d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices); propagateRepVertexID<<<noOfBlocks_vertices,noOfThreads_vertices>>>(d_successorArray, d_isSuccUpdated, d_previousIDs,d_successorArrayTemp, noOfVertices); updateSuccArray<<<noOfBlocks_vertices,noOfThreads_vertices>>>(d_successorArray, d_vertices, d_successorArrayTemp, noOfVertices); cudaMemcpy(&succchange, d_isSuccUpdated, sizeof(bool), cudaMemcpyDeviceToHost); }while(succchange); cudaFree(d_successorArrayTemp); cudaFree(d_isSuccUpdated); } /*This function creates scan flag*/ void __global__ createScanFlag(unsigned int *d_vertexFlagArray, unsigned int *d_successorArray, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices && index > 0) { unsigned int prev_val = d_successorArray[index-1]; unsigned int curr_val = d_successorArray[index]; if (prev_val != curr_val) { d_vertexFlagArray[index] = 1; } } } /*This function assigns supervertex id to each vertex*/ __global__ void assignSuperVertexID(unsigned int *d_superVertex, unsigned int *d_indices, unsigned int *d_vertexFlagArray,unsigned int *d_previousIDs,unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { d_vertexFlagArray[d_indices[index]] = d_superVertex[index]; } } /* This function updates supervertexid */ __global__ void updateSuperVertexID(unsigned int *d_superVertex,unsigned int *d_arr,unsigned int *d_vertexFlagArray, unsigned int noOfVertices) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfVertices) { unsigned int newId = d_vertexFlagArray[index]; d_superVertex[index] = newId; } } /* This function removes self edges after successor array is created */ __global__ void removeSelfEdges(unsigned int *d_edges, unsigned int *d_prevIds,unsigned int *d_superVertexID, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges) { unsigned int uid = d_superVertexID[d_prevIds[index]]; //vause d_prevIds[index] is 1 to 6 but we need 0 to 5 unsigned int vid = d_superVertexID[d_edges[index]]; if(uid == vid) { d_edges[index]=INT_MAX; } } } /* This function is to assign new super edge id*/ __global__ void assignSuperEdgeId(unsigned int *d_superEdgeId, unsigned int *d_previousIds, unsigned int *d_superVertexId, unsigned int *d_edge, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges) { unsigned int x = d_previousIds[index]; unsigned int id = INT_MAX; if (x != INT_MAX && d_edge[index] != INT_MAX) { id = d_superVertexId[x]; } d_superEdgeId[index] = id; } } /* This function is to compress the edge list*/ __global__ void edgeCompression(unsigned int *d_edges, unsigned int *d_weights, unsigned int *d_vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_superVertexID, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeFlagArray, unsigned int *d_superEdgeId, unsigned int * d_edgeIndices, int *d_pickArray, unsigned int *d_size, unsigned int *d_edgeListSize, unsigned int *d_vertexListSize) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < *d_size) { unsigned int id = d_edgeIndices[index]; if(d_superEdgeId[index] != INT_MAX && d_edges[id] != INT_MAX) { if(index == *d_size-1) { *d_edgeListSize = index + 1; *d_vertexListSize = d_superEdgeId[index] + 1; } d_segmentedMinScanOutput[index] = d_weights[id]; d_segmentedMinScanInput[index] = d_superVertexID[d_edges[id]]; d_pickArray[index] = d_superEdgeId[index]; d_edgeMapCopy[index] = d_edgeMap[id]; } } } /*This function copies the temporary array to arrays which will be actually used*/ __global__ void copyArrays(unsigned int *d_edges, unsigned int *d_weights, unsigned int *vertex, unsigned int *d_segmentedMinScanInput, unsigned int *d_segmentedMinScanOutput, unsigned int *d_edgeMap, unsigned int *d_edgeMapCopy, unsigned int *d_edgeCopy, unsigned int *d_size) { unsigned int index = blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < *d_size) { unsigned int p = d_segmentedMinScanInput[index]; d_edges[index] = p; unsigned int wt = d_segmentedMinScanOutput[index]; d_weights[index] = wt; unsigned int mapVal = d_edgeMapCopy[index]; d_edgeMap[index] = mapVal; } } /*This function determines the new edge list*/ __global__ void makeEdgeList(unsigned int *d_edgeFlagArray, unsigned int *d_edges, unsigned int *d_superEdgeId, unsigned int *d_size, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index == 0) { d_edgeFlagArray[index] = 1; } else if(index < noOfEdges && index > 0) { if(d_superEdgeId[index-1] != INT_MAX && d_superEdgeId[index] == INT_MAX) { *d_size = index; } if(d_superEdgeId[index] > d_superEdgeId[index-1]) { d_edgeFlagArray[index] = 1; } } } /*This function helps in creating new vertices list for next iteration*/ __global__ void CreateVertexListFlag(unsigned int *d_edgeFlagArray, unsigned int *d_vertices, int *d_pickArray, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index == 0) { d_edgeFlagArray[index] = 1; } else if(index < noOfEdges && index > 0) { if(d_pickArray[index] > d_pickArray[index-1]) { d_edgeFlagArray[index] = 1; } } } /*This function helps to build new vertex list*/ __global__ void BuildVertexList(unsigned int *d_vertices, unsigned int *d_edges, int *d_pickArray, unsigned int *d_edgeFlagArray, unsigned int noOfEdges) { unsigned int index= blockIdx.x * NO_OF_THREADS_PER_BLOCK + threadIdx.x; if(index < noOfEdges && d_edgeFlagArray[index] == 1) { d_vertices[d_pickArray[index]] = index; } } /* Parse the input file to setup our graph * we set the relevant arrays here */ void parseInputFile(char *fileName) { unsigned int x,temp; unsigned int edgeNo, weightOfEdge; FILE *fp; fp = fopen(fileName,"r"); printf("\n Parsing Input File: \n"); fscanf(fp,"%d",&noOfVertices); vertices = (unsigned int *)malloc(sizeof(unsigned int) * noOfVertices); int i; for (i=0; i<noOfVertices; i++) { fscanf(fp,"%d %d",&x, &temp); vertices[i] = x; } fscanf(fp,"%d",&temp); fscanf(fp,"%d",&noOfEdges); edges = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges); weights = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges); for(i=0; i<noOfEdges; i++) { fscanf(fp,"%d %d",&edgeNo, &weightOfEdge); edges[i] = edgeNo; weights[i] = weightOfEdge; } printf("No. of Vertices in Input: %d\n",noOfVertices); printf("No. of Edges in Input: %d\n", noOfEdges); fclose(fp); } /* this is to setup configuration parameters for various primitives*/ void setupPlan() { cudppCreate(&theCudpp); scan_config.algorithm = CUDPP_SCAN; scan_config.op = CUDPP_ADD; scan_config.datatype = CUDPP_UINT; scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; segmented_min_scan_config.algorithm = CUDPP_SEGMENTED_SCAN; segmented_min_scan_config.op = CUDPP_MIN; segmented_min_scan_config.datatype = CUDPP_UINT; segmented_min_scan_config.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_INCLUSIVE; config_sort.algorithm = CUDPP_SORT_RADIX; config_sort.datatype = CUDPP_UINT; config_sort.options = CUDPP_OPTION_FORWARD | CUDPP_OPTION_KEY_VALUE_PAIRS; f = 0.05; } /* Dynamically allocate necessary arrays*/ void mallocArr() { cudaMalloc(&d_segmentedMinScanInput, sizeof(unsigned int )*noOfEdges); cudaMalloc(&d_weights, sizeof(unsigned int )*noOfEdges); cudaMalloc(&d_edges, sizeof(unsigned int )*noOfEdges); cudaMalloc(&d_vertices, sizeof(unsigned int )*noOfVertices); cudaMalloc(&d_edgeFlagArray, sizeof(unsigned int )*noOfEdges); cudaMalloc(&d_segmentedMinScanOutput, sizeof(unsigned int )*noOfEdges); cudaMalloc(&d_successorArray, sizeof(unsigned int )*noOfVertices); cudaMalloc(&d_previousIDs, sizeof(unsigned int )*noOfEdges); cudaMalloc(&d_pickArray, sizeof(int )*noOfEdges); cudaMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices); cudaMalloc(&d_MSTOutput, sizeof(unsigned int )*noOfEdges); cudaMalloc(&d_indices, sizeof(unsigned int )*noOfVertices); cudaMalloc(&d_vertexFlagArray, sizeof(unsigned int )*noOfVertices); cudaMalloc(&d_superVertexID, sizeof(unsigned int )*noOfVertices); cudaMalloc(&d_size, sizeof(unsigned int )); cudaMalloc(&d_superEdgeId, sizeof(unsigned int )*noOfEdges); cudaMalloc(&d_edgeIndices, sizeof(unsigned int )*noOfEdges); cudaMalloc(&d_edgeListSize, sizeof(unsigned int )); cudaMalloc(&d_vertexListSize, sizeof(unsigned int )); cudaMalloc(&d_edgeMapCopy, sizeof(unsigned int )*noOfEdges); cudaMalloc(&d_edgeMap, sizeof(unsigned int )*noOfEdges); h_MSTOutput = (unsigned int *)malloc(sizeof(unsigned int )*noOfEdges); } /*Free the dynamically allocated memory. Do other cleanup here*/ void cleanUp() { cudaFree(d_edgeIndices); cudaFree(d_superEdgeId); cudaFree(d_edgeMap); cudaFree(d_edgeMapCopy); cudaFree(d_superVertexID); cudaFree(d_vertexFlagArray); cudaFree(d_indices); cudaFree(d_MSTOutput); cudaFree(d_previousIDs); cudaFree(d_pickArray); cudaFree(d_successorArray); cudaFree(d_segmentedMinScanOutput); cudaFree(d_edgeFlagArray); cudaFree(d_vertices); cudaFree(d_edges); cudaFree(d_weights); cudaFree(d_segmentedMinScanInput); cudaFree(d_size); cudaFree(d_edgeListSize); cudaFree(d_vertexListSize); cudppDestroy(theCudpp); free(h_MSTOutput); free(edges); free(vertices); free(weights); } /* Do basic initialization*/ void initialize() { unsigned int i; cudaMemcpy(d_vertices, vertices, sizeof(unsigned int)*noOfVertices, cudaMemcpyHostToDevice); cudaMemcpy(d_edges, edges, sizeof(unsigned int)*noOfEdges, cudaMemcpyHostToDevice); cudaMemcpy(d_weights, weights, sizeof(unsigned int)*noOfEdges, cudaMemcpyHostToDevice); unsigned int *temp = (unsigned int *)malloc(sizeof(unsigned int)*noOfEdges); for(i=0; i<noOfEdges; i++) temp[i] = 0; cudaMemcpy(d_MSTOutput, temp, sizeof(unsigned int )*noOfEdges, cudaMemcpyHostToDevice); for(i=0; i<noOfEdges; i++) temp[i]=i; cudaMemcpy(d_edgeMap, temp, sizeof(unsigned int)*noOfEdges, cudaMemcpyHostToDevice); free(temp); } /* Helper function to determine no of threads to be used */ unsigned int getNoOfThreads(unsigned int size) { unsigned int threadsPerBlock; if (size <= 1024) threadsPerBlock = size; else threadsPerBlock = 1024; return threadsPerBlock; } void boruvka() { int t; unsigned int noOfThreads_edge = getNoOfThreads(noOfEdges); unsigned int noOfBlocks_edge = (noOfEdges+1024)/noOfThreads_edge; unsigned int noOfThreads_vertices = getNoOfThreads(noOfVertices); unsigned int noOfBlocks_vertices = (noOfVertices+1024)/noOfThreads_vertices; cudaError_t error; mergeEdgeAndWeight<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanInput, d_vertices, d_weights, d_edges, noOfEdges); error = cudaGetLastError(); if(error != cudaSuccess) { printf("0.1 CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } t = noOfEdges * f; if (noOfEdges >= 200) { unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int)); initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges,t); int i; for(i = 0; i<t;i++) temp_h_efa[i] = 0; cudaThreadSynchronize(); cudaMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, cudaMemcpyHostToDevice); free(temp_h_efa); } else initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges); cudaThreadSynchronize(); error = cudaGetLastError(); if(error != cudaSuccess) { printf("At line 577 CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } markSegment<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_edgeFlagArray, d_vertices, d_edges, noOfVertices); error = cudaGetLastError(); if(error != cudaSuccess) { printf("3 CUDA error: %s\n", cudaGetErrorString(error)); exit(-1); } cudppPlan(theCudpp, &segmentedScanPlan_min,segmented_min_scan_config, noOfEdges, 1, 0 ); //Make the segmented min scan plan cudppSegmentedScan(segmentedScanPlan_min, d_segmentedMinScanOutput, d_segmentedMinScanInput, (const unsigned int *)d_edgeFlagArray, noOfEdges); cudppDestroyPlan(segmentedScanPlan_min); error = cudaGetLastError(); if(error != cudaSuccess) { printf("CUDA error: %s\n", cudaGetErrorString(error)); // exit(-1); } createSuccArray<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_successorArray, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, noOfVertices, noOfEdges); eliminateCycles<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_successorArray, noOfVertices); t = noOfEdges * f; if (noOfEdges >= 200) { unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int)); initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges,t); int i; for(i = 0; i<t;i++) temp_h_efa[i] = 0; cudaThreadSynchronize(); cudaMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, cudaMemcpyHostToDevice); free(temp_h_efa); } else initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges); cudaThreadSynchronize(); markSegment1<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_edgeFlagArray, d_vertices, noOfVertices); cudppPlan(theCudpp, &scanPlan, scan_config, noOfEdges, 1, 0); cudppScan(scanPlan, d_previousIDs, d_edgeFlagArray, noOfEdges); cudppDestroyPlan(scanPlan); error = cudaGetLastError(); if(error != cudaSuccess) { printf("At line 662 CUDA error: %s\n", cudaGetErrorString(error)); } t = noOfEdges * f; if(noOfEdges >= 200) { unsigned int *temp_h_pa = (unsigned int *)malloc(t*sizeof(unsigned int)); initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_pickArray, noOfEdges, t); int i; for(i = 0; i<t;i++) temp_h_pa[i] = 0; cudaThreadSynchronize(); cudaMemcpy(d_pickArray, temp_h_pa, sizeof(unsigned int )*t, cudaMemcpyHostToDevice); free(temp_h_pa); } else initArray<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_pickArray, noOfEdges); cudaThreadSynchronize(); populatePArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_pickArray, d_vertices, d_successorArray, d_previousIDs, noOfVertices, noOfEdges); AppendOutputEdges<<<noOfBlocks_edge, noOfThreads_edge>>>(d_pickArray, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_MSTOutput, d_edgeMap, noOfEdges); error = cudaGetLastError(); if(error != cudaSuccess) { printf("At line 691 CUDA error: %s\n", cudaGetErrorString(error)); } propagateID(noOfBlocks_vertices, noOfThreads_vertices); t = noOfVertices*f; if(noOfVertices >= 20) { unsigned int *temp_h_setIndices = (unsigned int *)malloc(t*sizeof(unsigned int)); setIndices1<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_indices, noOfVertices, t); int i; for(i = 0; i<t;i++) temp_h_setIndices[i] = i; cudaThreadSynchronize(); cudaMemcpy(d_indices, temp_h_setIndices, sizeof(unsigned int )*t, cudaMemcpyHostToDevice); free(temp_h_setIndices); } else setIndices1<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_indices, noOfVertices, 0); cudaThreadSynchronize(); cudppPlan(theCudpp, &sortPlan, config_sort, noOfVertices, 1, 0); cudppRadixSort(sortPlan, d_successorArray, d_indices, noOfVertices); cudppDestroyPlan(sortPlan); initArray<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_vertexFlagArray,noOfVertices); createScanFlag<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_vertexFlagArray, d_successorArray,noOfVertices); cudppPlan(theCudpp, &scanPlan, scan_config, noOfVertices, 1, 0); cudppScan(scanPlan, d_superVertexID, d_vertexFlagArray, noOfVertices); cudppDestroyPlan(scanPlan); error = cudaGetLastError(); if(error != cudaSuccess) { printf("At line 726 CUDA error: %s\n", cudaGetErrorString(error)); } assignSuperVertexID<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_superVertexID,d_indices,d_vertexFlagArray,d_previousIDs,noOfVertices); updateSuperVertexID<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_superVertexID,d_indices,d_vertexFlagArray, noOfVertices); removeSelfEdges<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edges,d_previousIDs,d_superVertexID,noOfEdges); assignSuperEdgeId<<<noOfBlocks_edge, noOfThreads_edge>>>(d_superEdgeId,d_previousIDs, d_superVertexID, d_edges, noOfEdges); t = noOfEdges*f; //printf("noOfVertices = %d and point = %d\n",noOfVertices, t); if (noOfEdges >= 200) { unsigned int *temp_h_setIndices = (unsigned int *)malloc(t*sizeof(unsigned int)); setIndices1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeIndices, noOfEdges, t); int i; for(i = 0; i<t;i++) temp_h_setIndices[i] = i; cudaThreadSynchronize(); cudaMemcpy(d_edgeIndices, temp_h_setIndices, sizeof(unsigned int )*t, cudaMemcpyHostToDevice); free(temp_h_setIndices); } else setIndices1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeIndices,noOfEdges,0); cudaThreadSynchronize(); cudppPlan(theCudpp, &sortPlan, config_sort, noOfEdges, 1, 0); cudppRadixSort(sortPlan, d_superEdgeId, d_edgeIndices, noOfEdges); cudppDestroyPlan(sortPlan); t = noOfEdges * f; if (noOfEdges >= 200) { unsigned int *temp_h_efa = (unsigned int *)malloc(t*sizeof(unsigned int)); initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_edgeFlagArray, noOfEdges, t); int i; for(i = 0; i<t;i++) temp_h_efa[i] = 0; cudaThreadSynchronize(); cudaMemcpy(d_edgeFlagArray, temp_h_efa, sizeof(unsigned int )*t, cudaMemcpyHostToDevice); free(temp_h_efa); } else initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray,noOfEdges); cudaThreadSynchronize(); unsigned int h_size = noOfEdges + 1; cudaMemcpy(d_size,&h_size,sizeof(unsigned int ), cudaMemcpyHostToDevice); makeEdgeList<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, d_edges, d_superEdgeId, d_size, noOfEdges); error = cudaGetLastError(); if(error != cudaSuccess) { printf("At line 779 CUDA error: %s\n", cudaGetErrorString(error)); } unsigned int zero = 0; cudaMemcpy(d_edgeListSize, &zero, sizeof(unsigned int ), cudaMemcpyHostToDevice); cudaMemcpy(d_vertexListSize, &zero, sizeof(unsigned int ), cudaMemcpyHostToDevice); t = noOfEdges * f; if (noOfEdges >= 200) { unsigned int *temp_arr = (unsigned int *)malloc(t*sizeof(unsigned int)); initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanInput, noOfEdges,t); initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanOutput, noOfEdges, t); initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_pickArray, noOfEdges, t); initArray1<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeMapCopy, noOfEdges, t); int i; for(i = 0; i<t;i++) temp_arr[i] = 0; cudaThreadSynchronize(); cudaMemcpy(d_segmentedMinScanInput, temp_arr, sizeof(unsigned int )*t, cudaMemcpyHostToDevice); cudaMemcpy(d_segmentedMinScanOutput, temp_arr, sizeof(unsigned int )*t, cudaMemcpyHostToDevice); cudaMemcpy(d_pickArray, temp_arr, sizeof(unsigned int )*t, cudaMemcpyHostToDevice); cudaMemcpy(d_edgeMapCopy, temp_arr, sizeof(unsigned int )*t, cudaMemcpyHostToDevice); free(temp_arr); } else { initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanInput, noOfEdges); initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_segmentedMinScanOutput, noOfEdges); initArray<<<noOfBlocks_edge, noOfThreads_edge>>>((unsigned int*)d_pickArray, noOfEdges); initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeMapCopy, noOfEdges); } cudaThreadSynchronize(); cudaMemcpy(&h_size,d_size,sizeof(unsigned int ), cudaMemcpyDeviceToHost); unsigned int noOfThreads_new = getNoOfThreads(h_size); unsigned int noOfBlocks_new = (h_size+1024)/noOfThreads_new; edgeCompression<<<noOfBlocks_new, noOfThreads_new>>>(d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_superVertexID, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_superEdgeId, d_edgeIndices, d_pickArray, d_size, d_edgeListSize, d_vertexListSize); error = cudaGetLastError(); if(error != cudaSuccess) { printf("At line 825 CUDA error: %s\n", cudaGetErrorString(error)); } copyArrays<<<noOfBlocks_new, noOfThreads_new>>>(d_edges, d_weights, d_vertices, d_segmentedMinScanInput, d_segmentedMinScanOutput, d_edgeMap, d_edgeMapCopy, d_edgeFlagArray, d_size); initArray<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, noOfEdges); initArray<<<noOfBlocks_vertices, noOfThreads_vertices>>>(d_vertices, noOfVertices); CreateVertexListFlag<<<noOfBlocks_edge, noOfThreads_edge>>>(d_edgeFlagArray, d_vertices, d_pickArray, noOfEdges); BuildVertexList<<<noOfBlocks_edge, noOfThreads_edge>>>(d_vertices, d_edges, d_pickArray, d_edgeFlagArray, noOfEdges); error = cudaGetLastError(); if(error != cudaSuccess) { printf("after build vertex listlast CUDA error: %s\n", cudaGetErrorString(error)); } cudaMemcpy(&noOfEdges, d_edgeListSize, sizeof(unsigned int ), cudaMemcpyDeviceToHost); cudaMemcpy(&noOfVertices, d_vertexListSize, sizeof(unsigned int ), cudaMemcpyDeviceToHost); printf("for next round, no of edges = %d and no of vertices = %d\n",noOfEdges, noOfVertices); error = cudaGetLastError(); if(error != cudaSuccess) { printf("last CUDA error: %s\n", cudaGetErrorString(error)); } } int main (int argc, char** argv) { unsigned int noOfMSTEdges = 0; unsigned long long int finalMSTWeight = 0; unsigned int i; parseInputFile(argv[1]); noOfVerticesOriginal = noOfVertices; noOfEdgesOriginal = noOfEdges; mallocArr(); initialize(); setupPlan(); struct timeval tv1, tv2; gettimeofday(&tv1, NULL); do { boruvka(); }while(noOfVertices > 1); cudaThreadSynchronize(); gettimeofday(&tv2, NULL); printf ("Total Execution time = %f seconds\n", (double)(tv2.tv_usec - tv1.tv_usec) / 1000000 + (double)(tv2.tv_sec - tv1.tv_sec)); cudaMemcpy(h_MSTOutput, d_MSTOutput, sizeof(unsigned int )*noOfEdgesOriginal, cudaMemcpyDeviceToHost); for(i=0; i<noOfEdgesOriginal; i++) { if(h_MSTOutput[i] == 1) { //printf("%d %d\n", edges[i], weights[i]); finalMSTWeight += weights[i]; noOfMSTEdges++; } } printf("\nNo. of edges in MST [must be equal to (%d-1)]: %d\n", noOfVerticesOriginal, noOfMSTEdges); printf("Final Weight of resultant MST: %llu\n", finalMSTWeight); cleanUp(); return 0; }
27d3f46cce602acc369e75e1625c99d5c55809a4.hip
// !!! This is a file automatically generated by hipify!!! //////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "assert.h" #include "matx.h" #include "test_types.h" #include "utilities.h" #include "gtest/gtest.h" using namespace matx; constexpr int m = 100; constexpr int n = 50; template <typename T> class LUSolverTest : public ::testing::Test { protected: void SetUp() override { pb = std::make_unique<detail::MatXPybind>(); pb->InitAndRunTVGenerator<T>("00_solver", "lu", "run", {m, n}); pb->NumpyToTensorView(Av, "A"); pb->NumpyToTensorView(Lv, "L"); pb->NumpyToTensorView(Uv, "U"); } void TearDown() { pb.reset(); } std::unique_ptr<detail::MatXPybind> pb; tensor_t<T, 2> Av{{m, n}}; tensor_t<T, 2> Atv{{n, m}}; tensor_t<int64_t, 1> PivV{{::min(m, n)}}; tensor_t<T, 2> Lv{{m, ::min(m, n)}}; tensor_t<T, 2> Uv{{::min(m, n), n}}; }; template <typename TensorType> class LUSolverTestNonComplexFloatTypes : public LUSolverTest<TensorType> { }; TYPED_TEST_SUITE(LUSolverTestNonComplexFloatTypes, MatXFloatNonComplexNonHalfTypes); TYPED_TEST(LUSolverTestNonComplexFloatTypes, LUBasic) { MATX_ENTER_HANDLER(); // example-begin lu-test-1 (mtie(this->Av, this->PivV) = lu(this->Av)).run(); // example-end lu-test-1 hipStreamSynchronize(0); // The upper and lower triangle components are saved in Av. Python saves them // as separate matrices with the diagonal of the lower matrix set to 0 for (index_t i = 0; i < this->Av.Size(0); i++) { for (index_t j = 0; j < this->Av.Size(1); j++) { if (i > j) { // Lower triangle ASSERT_NEAR(this->Av(i, j), this->Lv(i, j), 0.001); } else { ASSERT_NEAR(this->Av(i, j), this->Uv(i, j), 0.001); } } } MATX_EXIT_HANDLER(); }
27d3f46cce602acc369e75e1625c99d5c55809a4.cu
//////////////////////////////////////////////////////////////////////////////// // BSD 3-Clause License // // Copyright (c) 2021, NVIDIA Corporation // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // 1. Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // 2. Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // 3. Neither the name of the copyright holder nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ///////////////////////////////////////////////////////////////////////////////// #include "assert.h" #include "matx.h" #include "test_types.h" #include "utilities.h" #include "gtest/gtest.h" using namespace matx; constexpr int m = 100; constexpr int n = 50; template <typename T> class LUSolverTest : public ::testing::Test { protected: void SetUp() override { pb = std::make_unique<detail::MatXPybind>(); pb->InitAndRunTVGenerator<T>("00_solver", "lu", "run", {m, n}); pb->NumpyToTensorView(Av, "A"); pb->NumpyToTensorView(Lv, "L"); pb->NumpyToTensorView(Uv, "U"); } void TearDown() { pb.reset(); } std::unique_ptr<detail::MatXPybind> pb; tensor_t<T, 2> Av{{m, n}}; tensor_t<T, 2> Atv{{n, m}}; tensor_t<int64_t, 1> PivV{{std::min(m, n)}}; tensor_t<T, 2> Lv{{m, std::min(m, n)}}; tensor_t<T, 2> Uv{{std::min(m, n), n}}; }; template <typename TensorType> class LUSolverTestNonComplexFloatTypes : public LUSolverTest<TensorType> { }; TYPED_TEST_SUITE(LUSolverTestNonComplexFloatTypes, MatXFloatNonComplexNonHalfTypes); TYPED_TEST(LUSolverTestNonComplexFloatTypes, LUBasic) { MATX_ENTER_HANDLER(); // example-begin lu-test-1 (mtie(this->Av, this->PivV) = lu(this->Av)).run(); // example-end lu-test-1 cudaStreamSynchronize(0); // The upper and lower triangle components are saved in Av. Python saves them // as separate matrices with the diagonal of the lower matrix set to 0 for (index_t i = 0; i < this->Av.Size(0); i++) { for (index_t j = 0; j < this->Av.Size(1); j++) { if (i > j) { // Lower triangle ASSERT_NEAR(this->Av(i, j), this->Lv(i, j), 0.001); } else { ASSERT_NEAR(this->Av(i, j), this->Uv(i, j), 0.001); } } } MATX_EXIT_HANDLER(); }
zmgeellmv.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void zmgeellmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; extern __shared__ magmaDoubleComplex dot[]; if(row < num_rows ){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_cols_per_row * row + n ]; magmaDoubleComplex val = dval [ num_cols_per_row * row + n ]; if( val != 0){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++) dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i * num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELLPACK. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeellmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors hipLaunchKernelGGL(( zmgeellmv_kernel), dim3(grid), dim3(threads), MEM_SIZE, queue->cuda_stream() , m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
zmgeellmv.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions normal z -> c d s */ #include "magmasparse_internal.h" #define BLOCK_SIZE 512 __global__ void zmgeellmv_kernel( int num_rows, int num_cols, int num_vecs, int num_cols_per_row, magmaDoubleComplex alpha, magmaDoubleComplex * dval, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockDim.x * blockIdx.x + threadIdx.x; extern __shared__ magmaDoubleComplex dot[]; if(row < num_rows ){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] = MAGMA_Z_MAKE(0.0, 0.0); for ( int n = 0; n < num_cols_per_row; n++ ) { int col = dcolind [ num_cols_per_row * row + n ]; magmaDoubleComplex val = dval [ num_cols_per_row * row + n ]; if( val != 0){ for( int i=0; i<num_vecs; i++) dot[ threadIdx.x + i*blockDim.x ] += val * dx[col + i * num_cols ]; } } for( int i=0; i<num_vecs; i++) dy[ row + i*num_cols ] = dot[ threadIdx.x + i*blockDim.x ] * alpha + beta * dy [ row + i * num_cols ]; } } /** Purpose ------- This routine computes Y = alpha * A * X + beta * Y for X and Y sets of num_vec vectors on the GPU. Input format is ELLPACK. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] num_vecs mama_int_t number of vectors @param[in] nnz_per_row magma_int_t number of elements in the longest row @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] dval magmaDoubleComplex_ptr array containing values of A in ELLPACK @param[in] dcolind magmaIndex_ptr columnindices of A in ELLPACK @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zmgeellmv( magma_trans_t transA, magma_int_t m, magma_int_t n, magma_int_t num_vecs, magma_int_t nnz_per_row, magmaDoubleComplex alpha, magmaDoubleComplex_ptr dval, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; unsigned int MEM_SIZE = num_vecs* BLOCK_SIZE * sizeof( magmaDoubleComplex ); // num_vecs vectors zmgeellmv_kernel<<< grid, threads, MEM_SIZE, queue->cuda_stream() >>> ( m, n, num_vecs, nnz_per_row, alpha, dval, dcolind, dx, beta, dy ); return MAGMA_SUCCESS; }
f49ad4880dac9e6a8efe8a6c8c2c489525abb9e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "cuda/pagani/demos/new_time_and_call.cuh" __global__ void profile(double* block_results){ size_t tid = threadIdx.x + blockIdx.x * blockDim.x; double val = static_cast<double>(tid); if(blockIdx.x == 0 && threadIdx.x == 0) printf("block %i : %e\n", blockIdx.x, val); __syncthreads(); val = quad::blockReduceSum(val); __syncthreads(); if(threadIdx.x == 0) block_results[blockIdx.x] = val; } double compute_expected(size_t num_blocks, size_t num_threads){ size_t res = 0; for(int i=0; i < num_blocks * num_threads; ++i) res += i; return static_cast<double>(res); } int main() { size_t num_blocks = 262144*4; size_t num_threads = 64; double* block_res = cuda_malloc<double>(num_blocks); hipLaunchKernelGGL(( profile), dim3(num_blocks), dim3(num_threads), 0, 0, block_res); hipDeviceSynchronize(); double res = reduction<double>(block_res, num_blocks); printf("res:%e expected:%e\n", res, compute_expected(num_blocks, num_threads)); return 0; }
f49ad4880dac9e6a8efe8a6c8c2c489525abb9e4.cu
#include <iostream> #include "cuda/pagani/demos/new_time_and_call.cuh" __global__ void profile(double* block_results){ size_t tid = threadIdx.x + blockIdx.x * blockDim.x; double val = static_cast<double>(tid); if(blockIdx.x == 0 && threadIdx.x == 0) printf("block %i : %e\n", blockIdx.x, val); __syncthreads(); val = quad::blockReduceSum(val); __syncthreads(); if(threadIdx.x == 0) block_results[blockIdx.x] = val; } double compute_expected(size_t num_blocks, size_t num_threads){ size_t res = 0; for(int i=0; i < num_blocks * num_threads; ++i) res += i; return static_cast<double>(res); } int main() { size_t num_blocks = 262144*4; size_t num_threads = 64; double* block_res = cuda_malloc<double>(num_blocks); profile<<<num_blocks, num_threads>>>(block_res); cudaDeviceSynchronize(); double res = reduction<double>(block_res, num_blocks); printf("res:%e expected:%e\n", res, compute_expected(num_blocks, num_threads)); return 0; }
f4627971b66bff37c2d2eea9877e3e6f657bbc9a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "gpu_pf.h" #include "gpu_pf_utilities.h" extern "C" ms3_gpu_model ms3_gpu_model_(int nx, int ny, int ne, double* g_lev_2, double* gx_lev_2, double* gxx_lev_2, double* h_lev_1, double* hx_lev_1, double** RQ_L, double* HmatL, double* det_Hmat, double* X0, double** P0_L, double** Pr_ss) { ms3_gpu_model mod; mod.nx = nx; mod.ny = ny; mod.ne = ne; mod.g_lev_2 = g_lev_2; mod.gx_lev_2 = gx_lev_2; mod.gxx_lev_2 = gxx_lev_2; mod.h_lev_1 = h_lev_1; mod.hx_lev_1 = hx_lev_1; mod.RQ_L = RQ_L; mod.HmatL = HmatL; mod.det_Hmat = det_Hmat; mod.X0 = X0; mod.P0_L = P0_L; mod.Pr_ss = Pr_ss; return mod; } void printmat(const double* M, int nrow, int ncol) { int i, j; for (i = 0; i < nrow; i++) { for (j = 0; j < ncol; j++) printf("%+8.4f ", M[ncol*i + j]); printf("\n"); } } void printmat(const int* M, int nrow, int ncol) { int i, j; for (i = 0; i < nrow; i++) { for (j = 0; j < ncol; j++) printf("%+4d ", M[ncol*i + j]); printf("\n"); } } /** * This is a markov-switching version of the ordinary PF. It has 3 (independent) markov switching states, * which are assumed to be unrelated to the transition equation (i.e. the particles move in the same way, * */ extern "C" int ms3_gpu_particle_filter(double* data, ms3_gpu_model* mod, gpu_options* opts, double* LL_out) { //--------------- // useful scalars int i, j, k, t; int verbose = 0; double alpha, beta; int resample = 1; *LL_out = 0.0; // steady state for markov switching double p_s0_ss_1 = mod->Pr_ss[0][0]; double p_s0_ss_2 = mod->Pr_ss[1][0]; double p_s0_ss_3 = mod->Pr_ss[2][0]; //---------------------------------- // importing parameters and matrices int T = opts->T; int N = opts->npart; int nx = mod->nx; int ny = mod->ny; int ne = mod->ne; double det_Hmat = mod->det_Hmat[0]; double log2pi = 1.8378770664093453390819377091; double y_coeff = -0.5 * (ny * log2pi + log(det_Hmat)); // fill in x-intercept double* h_host = (double*) malloc (nx * sizeof(double)); for (i = 0; i < nx; i++) h_host[i] = mod->h_lev_1[i]; double* h_devc; hipMalloc((void**)&h_devc, nx * sizeof(double)); hipMemcpy(h_devc, h_host, nx*sizeof(double), hipMemcpyHostToDevice); free(h_host); // fill in trans mat. using column major (where source is row-major) double* hx_host = (double*) malloc (nx * nx * sizeof(double)); for (i = 0; i < nx; i++) for (j = 0; j < nx; j++) hx_host[nx*i + j] = mod->hx_lev_1[nx*j + i]; double* hx_devc; hipMalloc((void**)&hx_devc, nx * nx * sizeof(double)); hipMemcpy(hx_devc, hx_host, nx*nx*sizeof(double), hipMemcpyHostToDevice); free(hx_host); /** * Since we assume that the Q matrix is different for each of the 8 combinations * of states, we must bake that into our program. My first thought is that I'll * explicitly make 8 matrices, and then just package their pointers. Also, we define * a macro to make life a little easier for ourselves. (I'd like to take a look at * the preprocessor output to make sure this does what I think it does.) */ double *RQ_L_host = (double*) calloc (nx * ne, sizeof(double)); double **RQ_L_devc_lookup = (double**) malloc (8 * sizeof(double*)); double *RQLD0, *RQLD1, *RQLD2, *RQLD3, *RQLD4, *RQLD5, *RQLD6, *RQLD7; #define SET_SHOCK_VARIANCE(___index___) \ for (i = 0; i < ne; i++) \ for (j = i; j < nx; j++) \ RQ_L_host[nx*i + j] = mod->RQ_L[___index___][ne*j + i]; \ hipMalloc((void**)&RQLD##___index___, nx * nx * sizeof(double)); \ hipMemcpy(RQLD##___index___, RQ_L_host, nx*ne*sizeof(double), hipMemcpyHostToDevice); \ RQ_L_devc_lookup[___index___] = RQLD##___index___; SET_SHOCK_VARIANCE(0) SET_SHOCK_VARIANCE(1) SET_SHOCK_VARIANCE(2) SET_SHOCK_VARIANCE(3) SET_SHOCK_VARIANCE(4) SET_SHOCK_VARIANCE(5) SET_SHOCK_VARIANCE(6) SET_SHOCK_VARIANCE(7) free(RQ_L_host); // fill in M.E. cov. mat. using column major (where source is row-major) double* HmatL_host = (double*) malloc (ny * ny * sizeof(double)); for (i = 0; i < ny; i++) for (j = 0; j < ny; j++) HmatL_host[ny*i + j] = mod->HmatL[ny*j + i]; double* HmatL_devc; hipMalloc((void**)&HmatL_devc, ny * ny * sizeof(double)); hipMemcpy(HmatL_devc, HmatL_host, ny*ny*sizeof(double), hipMemcpyHostToDevice); free(HmatL_host); // fill in gx & gxx using column major (where source is row-major) double* g_host = (double*) malloc (ny * sizeof(double)); double* gx_host = (double*) malloc (ny * nx * sizeof(double)); double* gxx_host = (double*) malloc (ny * nx * nx * sizeof(double)); for (i = 0; i < ny; i++) g_host[i] = mod->g_lev_2[i]; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) gx_host[ny*i + j] = mod->gx_lev_2[nx*j + i]; for (i = 0; i < nx*nx; i++) for (j = 0; j < ny; j++) gxx_host[ny*i + j] = mod->gxx_lev_2[nx*nx*j + i]; double* g_devc; double* gx_devc; double* gxx_devc; hipMalloc((void**)&g_devc, ny * sizeof(double)); hipMalloc((void**)&gx_devc, ny * nx * sizeof(double)); hipMalloc((void**)&gxx_devc, ny * nx * nx * sizeof(double)); hipMemcpy(g_devc, g_host, ny * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(gx_devc, gx_host, ny * nx * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(gxx_devc, gxx_host, ny * nx * nx * sizeof(double), hipMemcpyHostToDevice); free(g_host); free(gx_host); free(gxx_host); //----------------------------------- // allocating memory for the function double* X_devc; double* Xp_devc; double* Xp_devc_tmp; hipMalloc((void**)&X_devc, N*nx*sizeof(double)); hipMalloc((void**)&Xp_devc, N*nx*sizeof(double)); hipMalloc((void**)&Xp_devc_tmp, N*nx*sizeof(double)); double* XX_devc; hipMalloc((void**)&XX_devc, N*nx*nx*sizeof(double)); double* YmMu_devc; double* HYmMu_devc; hipMalloc((void**)&YmMu_devc, N*ny*sizeof(double)); hipMalloc((void**)&HYmMu_devc, N*ny*sizeof(double)); char* ms1_devc; char* ms2_devc; char* ms3_devc; char* ms_all_devc; char* ms_all_devc_tmp; hipMalloc((void**)&ms1_devc, N*sizeof(char)); hipMalloc((void**)&ms2_devc, N*sizeof(char)); hipMalloc((void**)&ms3_devc, N*sizeof(char)); hipMalloc((void**)&ms_all_devc, N*sizeof(char)); hipMalloc((void**)&ms_all_devc_tmp, N*sizeof(char)); double* data_devc; hipMalloc((void**)&data_devc, ny*sizeof(double)); //---------------------------------- // containers specific to likelihood double* W_host = (double*) malloc ((N+1) * sizeof(double)); W_host[0] = 0.0; double* W_devc; hipMalloc((void**)&W_devc, N*sizeof(double)); double LL_host = 0.0; double* U_host = (double*) malloc (N * sizeof(double)); int* N_host = (int*) malloc (N * sizeof(int)); int* N_devc; hipMalloc((void**)&N_devc, N*sizeof(int)); double d_offset; //------------------ // CUDA Organization int nthread = 128; int nblock = (N % nthread == 0) ? N/nthread : (N/nthread) + 1; //------------------------------- // set up random number generator hiprandGenerator_t gen; hiprandCreateGenerator(&gen, HIPRAND_RNG_PSEUDO_XORWOW); double* norms_devc; // for transition double* norms_devc_tmp; // (for masking transition) double* unif_devc; // for states hipMalloc((void**)&norms_devc, N * ne * sizeof(double)); hipMalloc((void**)&norms_devc_tmp, N * ne * sizeof(double)); hipMalloc((void**)&unif_devc, N * sizeof(double)); // seed the GPU P-RNG hiprandSetPseudoRandomGeneratorSeed(gen, opts->seed); // seed the C P-RNG (for resampling) srand(opts->seed); //------------------ // set up for cuBLAS hipblasHandle_t handle; hipsolverDnHandle_t s_handle; hipblasCreate(&handle); hipsolverDnCreate(&s_handle); int* info_devc; hipMalloc((void**)&info_devc, sizeof(int)); //-------------------------- // set the initial particles // first we set the markov-switching states hiprandGenerateUniformDouble(gen, unif_devc, N); hipLaunchKernelGGL(( set_states) , dim3(nblock), dim3(nthread), 0, 0, N, ms1_devc, unif_devc, p_s0_ss_1); hiprandGenerateUniformDouble(gen, unif_devc, N); hipLaunchKernelGGL(( set_states) , dim3(nblock), dim3(nthread), 0, 0, N, ms2_devc, unif_devc, p_s0_ss_2); hiprandGenerateUniformDouble(gen, unif_devc, N); hipLaunchKernelGGL(( set_states) , dim3(nblock), dim3(nthread), 0, 0, N, ms3_devc, unif_devc, p_s0_ss_3); hipLaunchKernelGGL(( set_total_states) , dim3(nblock), dim3(nthread), 0, 0, N, ms_all_devc, ms1_devc, ms2_devc, ms3_devc); // then the initial state vectors (which we assume are the same for all models) double* X0_host = (double*) malloc (nx * sizeof(double)); for (i = 0; i < nx; i++) X0_host[i] = mod->X0[i]; double* X0_devc; hipMalloc((void**)&X0_devc, nx * sizeof(double)); hipMemcpy(X0_devc, X0_host, nx*sizeof(double), hipMemcpyHostToDevice); /** * This is the same type of thing as the RQ_L, except slightly different size, * and you need to follow it up with the classic iteration over sampling, zeroing * out, and adding. */ double *P0_L_host = (double*) calloc (nx * nx, sizeof(double)); double **P0_L_devc_lookup = (double**) malloc (8 * sizeof(double*)); double *P0LD0, *P0LD1, *P0LD2, *P0LD3, *P0LD4, *P0LD5, *P0LD6, *P0LD7; #define SET_SS_COVARIANCE(___index___) \ for (i = 0; i < nx; i++) \ for (j = i; j < nx; j++) \ P0_L_host[nx*i + j] = mod->P0_L[___index___][nx*j + i]; \ hipMalloc((void**)&P0LD##___index___, nx * nx * sizeof(double)); \ hipMemcpy(P0LD##___index___, P0_L_host, nx*nx*sizeof(double), hipMemcpyHostToDevice); \ P0_L_devc_lookup[___index___] = P0LD##___index___; SET_SS_COVARIANCE(0) SET_SS_COVARIANCE(1) SET_SS_COVARIANCE(2) SET_SS_COVARIANCE(3) SET_SS_COVARIANCE(4) SET_SS_COVARIANCE(5) SET_SS_COVARIANCE(6) SET_SS_COVARIANCE(7) free(P0_L_host); /** * Note that this time we're not adding the shocks to an existing matrix, but propagating * a target matrix with the shocks themselves. Basically this just means that for the first * loop, we'll zero out the target, but that for all subsequent iterations, we wish to keep * it; this is why beta starts out as 0.0, and then becomes 1.0; */ hiprandGenerateNormalDouble(gen, Xp_devc, N*nx, 0.0, 1.0); alpha = 1.0; beta = 0.0; for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { for (k = 0; k < 2; k++) { hipMemcpy(Xp_devc_tmp, Xp_devc, N*nx*sizeof(double), hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( zero_out_shocks) , dim3(nblock), dim3(nthread), 0, 0, N, nx, Xp_devc_tmp, ms_all_devc, k+2*(j+2*i)); hipblasDgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N, nx,N,nx,&alpha, P0_L_devc_lookup[k+2*(j+2*i)],nx, Xp_devc_tmp,nx, &beta, X_devc,nx); beta = 1.0; } } } hipLaunchKernelGGL(( add_mean_vector) , dim3(nblock), dim3(nthread), 0, 0, N, nx, X_devc, X0_devc, 1.0); // set the initial weights as well double W_init = 1.0;// / ((double)N); hipLaunchKernelGGL(( set_weights) , dim3(nblock), dim3(nthread), 0, 0, N, W_devc, W_init); if (verbose == 1) { printf("======================\n"); printf(" Time Log-Likelihood \n"); printf(" ---- -------------- \n\n"); } for (t = 0; t < T; t++) { /** * The first thing we do is sample the states. Since we assume independence of markov * processes, we can simply handle each state one at a time using a Bernoulli random * variable. Subsequently, we set the index of the combination of states, using a row- * major indexing scheme. */ hiprandGenerateUniformDouble(gen, unif_devc, N); hipLaunchKernelGGL(( set_states) , dim3(nblock), dim3(nthread), 0, 0, N, ms1_devc, unif_devc, p_s0_ss_1); hiprandGenerateUniformDouble(gen, unif_devc, N); hipLaunchKernelGGL(( set_states) , dim3(nblock), dim3(nthread), 0, 0, N, ms2_devc, unif_devc, p_s0_ss_2); hiprandGenerateUniformDouble(gen, unif_devc, N); hipLaunchKernelGGL(( set_states) , dim3(nblock), dim3(nthread), 0, 0, N, ms3_devc, unif_devc, p_s0_ss_3); hipLaunchKernelGGL(( set_total_states) , dim3(nblock), dim3(nthread), 0, 0, N, ms_all_devc, ms1_devc, ms2_devc, ms3_devc); //---------------------------------------------------------------------- // sample: x(i) ~ A*x(i) alpha = 1.0; beta = 0.0; hipblasDgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N, nx,N,nx,&alpha, hx_devc,nx, X_devc,nx, &beta, Xp_devc,nx); // get standard normal draws (Big matrix multiplication) hiprandGenerateNormalDouble(gen, norms_devc, N*ne, 0.0, 1.0); /** * This is the part that gets affected by the markov-switching variances, * since the shocks can be of 8 different types. I'm thinking we make a copy * of the shocks, and then we zero out different ones for each of the state * combinations, and then just call the normal multiplication */ alpha = 1.0; beta = 1.0; for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { for (k = 0; k < 2; k++) { hipMemcpy(norms_devc_tmp, norms_devc, N*ne*sizeof(double), hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( zero_out_shocks) , dim3(nblock), dim3(nthread), 0, 0, N, ne, norms_devc_tmp, ms_all_devc, k+2*(j+2*i)); hipblasDgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N, nx,N,ne,&alpha, RQ_L_devc_lookup[k+2*(j+2*i)],nx, norms_devc,ne, &beta, Xp_devc,nx); } } } hipLaunchKernelGGL(( add_mean_vector) , dim3(nblock), dim3(nthread), 0, 0, N, nx, Xp_devc, h_devc, 1.0); //---------------------------------------------------------------------- // calculate weights using f(yt | xt) // obtain second order portions, which means making a huge matrix... hipLaunchKernelGGL(( set_X_kron_X) , dim3(nblock), dim3(nthread), 0, 0, N, Xp_devc, XX_devc, nx); alpha = 0.5; beta = 0.0; hipblasDgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N, ny,N,nx*nx,&alpha, gxx_devc,ny, XX_devc,nx*nx, &beta, YmMu_devc,ny); alpha = 1.0; beta = 1.0; hipblasDgemm(handle,HIPBLAS_OP_N,HIPBLAS_OP_N, ny,N,nx,&alpha, gx_devc,ny, Xp_devc,nx, &beta, YmMu_devc,ny); hipLaunchKernelGGL(( add_mean_vector) , dim3(nblock), dim3(nthread), 0, 0, N, ny, YmMu_devc, g_devc, 1); hipMemcpy(data_devc, data+(ny*t), ny*sizeof(double), hipMemcpyHostToDevice); hipLaunchKernelGGL(( add_mean_vector) , dim3(nblock), dim3(nthread), 0, 0, N, ny, YmMu_devc, data_devc, -1.0); // now that we have the means, we need to make a copy and then do the product hipMemcpy(HYmMu_devc, YmMu_devc, N*ny*sizeof(double), hipMemcpyDeviceToDevice); hipsolverDnDpotrs(s_handle, HIPBLAS_FILL_MODE_LOWER, ny,N, HmatL_devc,ny, HYmMu_devc,ny, info_devc); // painful dot products, update the weights hipLaunchKernelGGL(( all_dot_products) , dim3(nblock), dim3(nthread), 0, 0, N, YmMu_devc, HYmMu_devc, W_devc, y_coeff, ny); //---------------------------------------------------------------------- // work with the weights // compute likelihood (all positive, so use sum(abs()) ) hipblasDasum(handle, N, W_devc, 1, &LL_host); LL_host /= ((double)N); // obtain the likelihood if (verbose == 1) { printf(" %04d %14.2f \n", t, log(LL_host)); } *LL_out += log( LL_host ); LL_host = 1.0 / LL_host; // scale weights by the likelihood hipblasDscal(handle, N, &LL_host, W_devc, 1); // weights normalized if (resample == 0) { hipMemcpy(X_devc, Xp_devc, N*nx*sizeof(double), hipMemcpyDeviceToDevice); } else { // compute ESS hipblasDnrm2(handle, N, W_devc, 1, &LL_host); LL_host *= LL_host; if (LL_host <= (N * 32.0)) { hipMemcpy(X_devc, Xp_devc, N*nx*sizeof(double), hipMemcpyDeviceToDevice); } else { d_offset = (double)rand() / RAND_MAX; for (i = 0; i < N; i++) U_host[i] = (d_offset + (double)i) / ((double)N); LL_host = 1.0 / ((double)N); hipblasDscal(handle, N, &LL_host, W_devc, 1); // weights normalized gpu_cumsum(N, W_devc); hipMemcpy(W_host+1, W_devc, N*sizeof(double), hipMemcpyDeviceToHost); j = 0; for (i = 0; i < N; i++) { while (U_host[i] >= W_host[j+1]) j++; N_host[i] = j; } hipMemcpy(N_devc, N_host, N*sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( reassign_particles) , dim3(nblock), dim3(nthread), 0, 0, N, nx, N_devc, X_devc, Xp_devc); /** * You must also reassign the values for the particles, which we'll do by having * one extra array that gets sampled into, and then gives the info to the different * states. This is basically because we do in-place updating of the markov states, * so if we reassigned all three, then we would need to reassign three times, and * then copy back three times. Idk maybe this would be better. */ hipLaunchKernelGGL(( reassign_particles) , dim3(nblock), dim3(nthread), 0, 0, N, 1, N_devc, ms_all_devc_tmp, ms_all_devc); hipLaunchKernelGGL(( reassign_states) , dim3(nblock), dim3(nthread), 0, 0, N, ms_all_devc_tmp, ms1_devc, ms2_devc, ms3_devc); // lastly, reset the weights hipLaunchKernelGGL(( set_weights) , dim3(nblock), dim3(nthread), 0, 0, N, W_devc, W_init); } } } if (verbose == 1) { printf("======================\n"); } free(W_host); free(U_host); free(N_host); hipFree(hx_devc); hipFree(HmatL_devc); hipFree(RQLD0); hipFree(RQLD1); hipFree(RQLD2); hipFree(RQLD3); hipFree(RQLD4); hipFree(RQLD5); hipFree(RQLD6); hipFree(RQLD7); hipFree(P0LD0); hipFree(P0LD1); hipFree(P0LD2); hipFree(P0LD3); hipFree(P0LD4); hipFree(P0LD5); hipFree(P0LD6); hipFree(P0LD7); hipFree(g_devc); hipFree(gx_devc); hipFree(gxx_devc); hipFree(X_devc); hipFree(Xp_devc); hipFree(Xp_devc_tmp); hipFree(XX_devc); hipFree(ms1_devc); hipFree(ms2_devc); hipFree(ms3_devc); hipFree(ms_all_devc); hipFree(ms_all_devc_tmp); hipFree(YmMu_devc); hipFree(HYmMu_devc); hipFree(data_devc); hipFree(N_devc); hipFree(W_devc); hipFree(norms_devc); return 0; } /*printf("Device Number: %d\n\n", i); printf(" Device name: %s\n", prop.name); printf(" Compute capability: %d.%d\n\n", prop.major, prop.minor); printf(" Total global memory: %lu bytes\n", prop.totalGlobalMem); printf(" Max shared memory per block: %lu bytes\n", prop.sharedMemPerBlock); printf(" Number of doubles: %lu\n", prop.sharedMemPerBlock/8); printf(" Max 32-bit registers per block: %d\n", prop.regsPerBlock); printf(" Warp size: %d\n\n", prop.warpSize); printf(" Max threads / block: %d\n", prop.maxThreadsPerBlock); printf(" Max threads dim: [%d %d %d]\n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf(" Number of multiprocessors: %d\n\n", prop.multiProcessorCount);*/ /* This was the matlab version of the systematic resampler: function I = resample(W) % This is a function to resample from the weights W using the % systematic resampling approach. N = length(W); U1 = rand/N; U = U1 + ([1:N]-1)/N; W2 = cumsum(W); W1 = [0; W2(1:end-1)]; Ns = NaN(N,1); for i = 1:N Ns(i) = length(find(U >= W1(i) & U <= W2(i))); end I = NaN(N,1); k = 1; for i = 1:N for j = 1:Ns(i) I(k) = i; k = k + 1; end end end */
f4627971b66bff37c2d2eea9877e3e6f657bbc9a.cu
#include "gpu_pf.h" #include "gpu_pf_utilities.h" extern "C" ms3_gpu_model ms3_gpu_model_(int nx, int ny, int ne, double* g_lev_2, double* gx_lev_2, double* gxx_lev_2, double* h_lev_1, double* hx_lev_1, double** RQ_L, double* HmatL, double* det_Hmat, double* X0, double** P0_L, double** Pr_ss) { ms3_gpu_model mod; mod.nx = nx; mod.ny = ny; mod.ne = ne; mod.g_lev_2 = g_lev_2; mod.gx_lev_2 = gx_lev_2; mod.gxx_lev_2 = gxx_lev_2; mod.h_lev_1 = h_lev_1; mod.hx_lev_1 = hx_lev_1; mod.RQ_L = RQ_L; mod.HmatL = HmatL; mod.det_Hmat = det_Hmat; mod.X0 = X0; mod.P0_L = P0_L; mod.Pr_ss = Pr_ss; return mod; } void printmat(const double* M, int nrow, int ncol) { int i, j; for (i = 0; i < nrow; i++) { for (j = 0; j < ncol; j++) printf("%+8.4f ", M[ncol*i + j]); printf("\n"); } } void printmat(const int* M, int nrow, int ncol) { int i, j; for (i = 0; i < nrow; i++) { for (j = 0; j < ncol; j++) printf("%+4d ", M[ncol*i + j]); printf("\n"); } } /** * This is a markov-switching version of the ordinary PF. It has 3 (independent) markov switching states, * which are assumed to be unrelated to the transition equation (i.e. the particles move in the same way, * */ extern "C" int ms3_gpu_particle_filter(double* data, ms3_gpu_model* mod, gpu_options* opts, double* LL_out) { //--------------- // useful scalars int i, j, k, t; int verbose = 0; double alpha, beta; int resample = 1; *LL_out = 0.0; // steady state for markov switching double p_s0_ss_1 = mod->Pr_ss[0][0]; double p_s0_ss_2 = mod->Pr_ss[1][0]; double p_s0_ss_3 = mod->Pr_ss[2][0]; //---------------------------------- // importing parameters and matrices int T = opts->T; int N = opts->npart; int nx = mod->nx; int ny = mod->ny; int ne = mod->ne; double det_Hmat = mod->det_Hmat[0]; double log2pi = 1.8378770664093453390819377091; double y_coeff = -0.5 * (ny * log2pi + log(det_Hmat)); // fill in x-intercept double* h_host = (double*) malloc (nx * sizeof(double)); for (i = 0; i < nx; i++) h_host[i] = mod->h_lev_1[i]; double* h_devc; cudaMalloc((void**)&h_devc, nx * sizeof(double)); cudaMemcpy(h_devc, h_host, nx*sizeof(double), cudaMemcpyHostToDevice); free(h_host); // fill in trans mat. using column major (where source is row-major) double* hx_host = (double*) malloc (nx * nx * sizeof(double)); for (i = 0; i < nx; i++) for (j = 0; j < nx; j++) hx_host[nx*i + j] = mod->hx_lev_1[nx*j + i]; double* hx_devc; cudaMalloc((void**)&hx_devc, nx * nx * sizeof(double)); cudaMemcpy(hx_devc, hx_host, nx*nx*sizeof(double), cudaMemcpyHostToDevice); free(hx_host); /** * Since we assume that the Q matrix is different for each of the 8 combinations * of states, we must bake that into our program. My first thought is that I'll * explicitly make 8 matrices, and then just package their pointers. Also, we define * a macro to make life a little easier for ourselves. (I'd like to take a look at * the preprocessor output to make sure this does what I think it does.) */ double *RQ_L_host = (double*) calloc (nx * ne, sizeof(double)); double **RQ_L_devc_lookup = (double**) malloc (8 * sizeof(double*)); double *RQLD0, *RQLD1, *RQLD2, *RQLD3, *RQLD4, *RQLD5, *RQLD6, *RQLD7; #define SET_SHOCK_VARIANCE(___index___) \ for (i = 0; i < ne; i++) \ for (j = i; j < nx; j++) \ RQ_L_host[nx*i + j] = mod->RQ_L[___index___][ne*j + i]; \ cudaMalloc((void**)&RQLD##___index___, nx * nx * sizeof(double)); \ cudaMemcpy(RQLD##___index___, RQ_L_host, nx*ne*sizeof(double), cudaMemcpyHostToDevice); \ RQ_L_devc_lookup[___index___] = RQLD##___index___; SET_SHOCK_VARIANCE(0) SET_SHOCK_VARIANCE(1) SET_SHOCK_VARIANCE(2) SET_SHOCK_VARIANCE(3) SET_SHOCK_VARIANCE(4) SET_SHOCK_VARIANCE(5) SET_SHOCK_VARIANCE(6) SET_SHOCK_VARIANCE(7) free(RQ_L_host); // fill in M.E. cov. mat. using column major (where source is row-major) double* HmatL_host = (double*) malloc (ny * ny * sizeof(double)); for (i = 0; i < ny; i++) for (j = 0; j < ny; j++) HmatL_host[ny*i + j] = mod->HmatL[ny*j + i]; double* HmatL_devc; cudaMalloc((void**)&HmatL_devc, ny * ny * sizeof(double)); cudaMemcpy(HmatL_devc, HmatL_host, ny*ny*sizeof(double), cudaMemcpyHostToDevice); free(HmatL_host); // fill in gx & gxx using column major (where source is row-major) double* g_host = (double*) malloc (ny * sizeof(double)); double* gx_host = (double*) malloc (ny * nx * sizeof(double)); double* gxx_host = (double*) malloc (ny * nx * nx * sizeof(double)); for (i = 0; i < ny; i++) g_host[i] = mod->g_lev_2[i]; for (i = 0; i < nx; i++) for (j = 0; j < ny; j++) gx_host[ny*i + j] = mod->gx_lev_2[nx*j + i]; for (i = 0; i < nx*nx; i++) for (j = 0; j < ny; j++) gxx_host[ny*i + j] = mod->gxx_lev_2[nx*nx*j + i]; double* g_devc; double* gx_devc; double* gxx_devc; cudaMalloc((void**)&g_devc, ny * sizeof(double)); cudaMalloc((void**)&gx_devc, ny * nx * sizeof(double)); cudaMalloc((void**)&gxx_devc, ny * nx * nx * sizeof(double)); cudaMemcpy(g_devc, g_host, ny * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(gx_devc, gx_host, ny * nx * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(gxx_devc, gxx_host, ny * nx * nx * sizeof(double), cudaMemcpyHostToDevice); free(g_host); free(gx_host); free(gxx_host); //----------------------------------- // allocating memory for the function double* X_devc; double* Xp_devc; double* Xp_devc_tmp; cudaMalloc((void**)&X_devc, N*nx*sizeof(double)); cudaMalloc((void**)&Xp_devc, N*nx*sizeof(double)); cudaMalloc((void**)&Xp_devc_tmp, N*nx*sizeof(double)); double* XX_devc; cudaMalloc((void**)&XX_devc, N*nx*nx*sizeof(double)); double* YmMu_devc; double* HYmMu_devc; cudaMalloc((void**)&YmMu_devc, N*ny*sizeof(double)); cudaMalloc((void**)&HYmMu_devc, N*ny*sizeof(double)); char* ms1_devc; char* ms2_devc; char* ms3_devc; char* ms_all_devc; char* ms_all_devc_tmp; cudaMalloc((void**)&ms1_devc, N*sizeof(char)); cudaMalloc((void**)&ms2_devc, N*sizeof(char)); cudaMalloc((void**)&ms3_devc, N*sizeof(char)); cudaMalloc((void**)&ms_all_devc, N*sizeof(char)); cudaMalloc((void**)&ms_all_devc_tmp, N*sizeof(char)); double* data_devc; cudaMalloc((void**)&data_devc, ny*sizeof(double)); //---------------------------------- // containers specific to likelihood double* W_host = (double*) malloc ((N+1) * sizeof(double)); W_host[0] = 0.0; double* W_devc; cudaMalloc((void**)&W_devc, N*sizeof(double)); double LL_host = 0.0; double* U_host = (double*) malloc (N * sizeof(double)); int* N_host = (int*) malloc (N * sizeof(int)); int* N_devc; cudaMalloc((void**)&N_devc, N*sizeof(int)); double d_offset; //------------------ // CUDA Organization int nthread = 128; int nblock = (N % nthread == 0) ? N/nthread : (N/nthread) + 1; //------------------------------- // set up random number generator curandGenerator_t gen; curandCreateGenerator(&gen, CURAND_RNG_PSEUDO_XORWOW); double* norms_devc; // for transition double* norms_devc_tmp; // (for masking transition) double* unif_devc; // for states cudaMalloc((void**)&norms_devc, N * ne * sizeof(double)); cudaMalloc((void**)&norms_devc_tmp, N * ne * sizeof(double)); cudaMalloc((void**)&unif_devc, N * sizeof(double)); // seed the GPU P-RNG curandSetPseudoRandomGeneratorSeed(gen, opts->seed); // seed the C P-RNG (for resampling) srand(opts->seed); //------------------ // set up for cuBLAS cublasHandle_t handle; cusolverDnHandle_t s_handle; cublasCreate(&handle); cusolverDnCreate(&s_handle); int* info_devc; cudaMalloc((void**)&info_devc, sizeof(int)); //-------------------------- // set the initial particles // first we set the markov-switching states curandGenerateUniformDouble(gen, unif_devc, N); set_states <<<nblock, nthread>>> (N, ms1_devc, unif_devc, p_s0_ss_1); curandGenerateUniformDouble(gen, unif_devc, N); set_states <<<nblock, nthread>>> (N, ms2_devc, unif_devc, p_s0_ss_2); curandGenerateUniformDouble(gen, unif_devc, N); set_states <<<nblock, nthread>>> (N, ms3_devc, unif_devc, p_s0_ss_3); set_total_states <<<nblock, nthread>>> (N, ms_all_devc, ms1_devc, ms2_devc, ms3_devc); // then the initial state vectors (which we assume are the same for all models) double* X0_host = (double*) malloc (nx * sizeof(double)); for (i = 0; i < nx; i++) X0_host[i] = mod->X0[i]; double* X0_devc; cudaMalloc((void**)&X0_devc, nx * sizeof(double)); cudaMemcpy(X0_devc, X0_host, nx*sizeof(double), cudaMemcpyHostToDevice); /** * This is the same type of thing as the RQ_L, except slightly different size, * and you need to follow it up with the classic iteration over sampling, zeroing * out, and adding. */ double *P0_L_host = (double*) calloc (nx * nx, sizeof(double)); double **P0_L_devc_lookup = (double**) malloc (8 * sizeof(double*)); double *P0LD0, *P0LD1, *P0LD2, *P0LD3, *P0LD4, *P0LD5, *P0LD6, *P0LD7; #define SET_SS_COVARIANCE(___index___) \ for (i = 0; i < nx; i++) \ for (j = i; j < nx; j++) \ P0_L_host[nx*i + j] = mod->P0_L[___index___][nx*j + i]; \ cudaMalloc((void**)&P0LD##___index___, nx * nx * sizeof(double)); \ cudaMemcpy(P0LD##___index___, P0_L_host, nx*nx*sizeof(double), cudaMemcpyHostToDevice); \ P0_L_devc_lookup[___index___] = P0LD##___index___; SET_SS_COVARIANCE(0) SET_SS_COVARIANCE(1) SET_SS_COVARIANCE(2) SET_SS_COVARIANCE(3) SET_SS_COVARIANCE(4) SET_SS_COVARIANCE(5) SET_SS_COVARIANCE(6) SET_SS_COVARIANCE(7) free(P0_L_host); /** * Note that this time we're not adding the shocks to an existing matrix, but propagating * a target matrix with the shocks themselves. Basically this just means that for the first * loop, we'll zero out the target, but that for all subsequent iterations, we wish to keep * it; this is why beta starts out as 0.0, and then becomes 1.0; */ curandGenerateNormalDouble(gen, Xp_devc, N*nx, 0.0, 1.0); alpha = 1.0; beta = 0.0; for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { for (k = 0; k < 2; k++) { cudaMemcpy(Xp_devc_tmp, Xp_devc, N*nx*sizeof(double), cudaMemcpyDeviceToDevice); zero_out_shocks <<<nblock, nthread>>> (N, nx, Xp_devc_tmp, ms_all_devc, k+2*(j+2*i)); cublasDgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N, nx,N,nx,&alpha, P0_L_devc_lookup[k+2*(j+2*i)],nx, Xp_devc_tmp,nx, &beta, X_devc,nx); beta = 1.0; } } } add_mean_vector <<<nblock, nthread>>> (N, nx, X_devc, X0_devc, 1.0); // set the initial weights as well double W_init = 1.0;// / ((double)N); set_weights <<<nblock, nthread>>> (N, W_devc, W_init); if (verbose == 1) { printf("======================\n"); printf(" Time Log-Likelihood \n"); printf(" ---- -------------- \n\n"); } for (t = 0; t < T; t++) { /** * The first thing we do is sample the states. Since we assume independence of markov * processes, we can simply handle each state one at a time using a Bernoulli random * variable. Subsequently, we set the index of the combination of states, using a row- * major indexing scheme. */ curandGenerateUniformDouble(gen, unif_devc, N); set_states <<<nblock, nthread>>> (N, ms1_devc, unif_devc, p_s0_ss_1); curandGenerateUniformDouble(gen, unif_devc, N); set_states <<<nblock, nthread>>> (N, ms2_devc, unif_devc, p_s0_ss_2); curandGenerateUniformDouble(gen, unif_devc, N); set_states <<<nblock, nthread>>> (N, ms3_devc, unif_devc, p_s0_ss_3); set_total_states <<<nblock, nthread>>> (N, ms_all_devc, ms1_devc, ms2_devc, ms3_devc); //---------------------------------------------------------------------- // sample: x(i) ~ A*x(i) alpha = 1.0; beta = 0.0; cublasDgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N, nx,N,nx,&alpha, hx_devc,nx, X_devc,nx, &beta, Xp_devc,nx); // get standard normal draws (Big matrix multiplication) curandGenerateNormalDouble(gen, norms_devc, N*ne, 0.0, 1.0); /** * This is the part that gets affected by the markov-switching variances, * since the shocks can be of 8 different types. I'm thinking we make a copy * of the shocks, and then we zero out different ones for each of the state * combinations, and then just call the normal multiplication */ alpha = 1.0; beta = 1.0; for (i = 0; i < 2; i++) { for (j = 0; j < 2; j++) { for (k = 0; k < 2; k++) { cudaMemcpy(norms_devc_tmp, norms_devc, N*ne*sizeof(double), cudaMemcpyDeviceToDevice); zero_out_shocks <<<nblock, nthread>>> (N, ne, norms_devc_tmp, ms_all_devc, k+2*(j+2*i)); cublasDgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N, nx,N,ne,&alpha, RQ_L_devc_lookup[k+2*(j+2*i)],nx, norms_devc,ne, &beta, Xp_devc,nx); } } } add_mean_vector <<<nblock, nthread>>> (N, nx, Xp_devc, h_devc, 1.0); //---------------------------------------------------------------------- // calculate weights using f(yt | xt) // obtain second order portions, which means making a huge matrix... set_X_kron_X <<<nblock, nthread>>> (N, Xp_devc, XX_devc, nx); alpha = 0.5; beta = 0.0; cublasDgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N, ny,N,nx*nx,&alpha, gxx_devc,ny, XX_devc,nx*nx, &beta, YmMu_devc,ny); alpha = 1.0; beta = 1.0; cublasDgemm(handle,CUBLAS_OP_N,CUBLAS_OP_N, ny,N,nx,&alpha, gx_devc,ny, Xp_devc,nx, &beta, YmMu_devc,ny); add_mean_vector <<<nblock, nthread>>> (N, ny, YmMu_devc, g_devc, 1); cudaMemcpy(data_devc, data+(ny*t), ny*sizeof(double), cudaMemcpyHostToDevice); add_mean_vector <<<nblock, nthread>>> (N, ny, YmMu_devc, data_devc, -1.0); // now that we have the means, we need to make a copy and then do the product cudaMemcpy(HYmMu_devc, YmMu_devc, N*ny*sizeof(double), cudaMemcpyDeviceToDevice); cusolverDnDpotrs(s_handle, CUBLAS_FILL_MODE_LOWER, ny,N, HmatL_devc,ny, HYmMu_devc,ny, info_devc); // painful dot products, update the weights all_dot_products <<<nblock, nthread>>> (N, YmMu_devc, HYmMu_devc, W_devc, y_coeff, ny); //---------------------------------------------------------------------- // work with the weights // compute likelihood (all positive, so use sum(abs()) ) cublasDasum(handle, N, W_devc, 1, &LL_host); LL_host /= ((double)N); // obtain the likelihood if (verbose == 1) { printf(" %04d %14.2f \n", t, log(LL_host)); } *LL_out += log( LL_host ); LL_host = 1.0 / LL_host; // scale weights by the likelihood cublasDscal(handle, N, &LL_host, W_devc, 1); // weights normalized if (resample == 0) { cudaMemcpy(X_devc, Xp_devc, N*nx*sizeof(double), cudaMemcpyDeviceToDevice); } else { // compute ESS cublasDnrm2(handle, N, W_devc, 1, &LL_host); LL_host *= LL_host; if (LL_host <= (N * 32.0)) { cudaMemcpy(X_devc, Xp_devc, N*nx*sizeof(double), cudaMemcpyDeviceToDevice); } else { d_offset = (double)rand() / RAND_MAX; for (i = 0; i < N; i++) U_host[i] = (d_offset + (double)i) / ((double)N); LL_host = 1.0 / ((double)N); cublasDscal(handle, N, &LL_host, W_devc, 1); // weights normalized gpu_cumsum(N, W_devc); cudaMemcpy(W_host+1, W_devc, N*sizeof(double), cudaMemcpyDeviceToHost); j = 0; for (i = 0; i < N; i++) { while (U_host[i] >= W_host[j+1]) j++; N_host[i] = j; } cudaMemcpy(N_devc, N_host, N*sizeof(int), cudaMemcpyHostToDevice); reassign_particles <<<nblock, nthread>>> (N, nx, N_devc, X_devc, Xp_devc); /** * You must also reassign the values for the particles, which we'll do by having * one extra array that gets sampled into, and then gives the info to the different * states. This is basically because we do in-place updating of the markov states, * so if we reassigned all three, then we would need to reassign three times, and * then copy back three times. Idk maybe this would be better. */ reassign_particles <<<nblock, nthread>>> (N, 1, N_devc, ms_all_devc_tmp, ms_all_devc); reassign_states <<<nblock, nthread>>> (N, ms_all_devc_tmp, ms1_devc, ms2_devc, ms3_devc); // lastly, reset the weights set_weights <<<nblock, nthread>>> (N, W_devc, W_init); } } } if (verbose == 1) { printf("======================\n"); } free(W_host); free(U_host); free(N_host); cudaFree(hx_devc); cudaFree(HmatL_devc); cudaFree(RQLD0); cudaFree(RQLD1); cudaFree(RQLD2); cudaFree(RQLD3); cudaFree(RQLD4); cudaFree(RQLD5); cudaFree(RQLD6); cudaFree(RQLD7); cudaFree(P0LD0); cudaFree(P0LD1); cudaFree(P0LD2); cudaFree(P0LD3); cudaFree(P0LD4); cudaFree(P0LD5); cudaFree(P0LD6); cudaFree(P0LD7); cudaFree(g_devc); cudaFree(gx_devc); cudaFree(gxx_devc); cudaFree(X_devc); cudaFree(Xp_devc); cudaFree(Xp_devc_tmp); cudaFree(XX_devc); cudaFree(ms1_devc); cudaFree(ms2_devc); cudaFree(ms3_devc); cudaFree(ms_all_devc); cudaFree(ms_all_devc_tmp); cudaFree(YmMu_devc); cudaFree(HYmMu_devc); cudaFree(data_devc); cudaFree(N_devc); cudaFree(W_devc); cudaFree(norms_devc); return 0; } /*printf("Device Number: %d\n\n", i); printf(" Device name: %s\n", prop.name); printf(" Compute capability: %d.%d\n\n", prop.major, prop.minor); printf(" Total global memory: %lu bytes\n", prop.totalGlobalMem); printf(" Max shared memory per block: %lu bytes\n", prop.sharedMemPerBlock); printf(" Number of doubles: %lu\n", prop.sharedMemPerBlock/8); printf(" Max 32-bit registers per block: %d\n", prop.regsPerBlock); printf(" Warp size: %d\n\n", prop.warpSize); printf(" Max threads / block: %d\n", prop.maxThreadsPerBlock); printf(" Max threads dim: [%d %d %d]\n",prop.maxThreadsDim[0], prop.maxThreadsDim[1], prop.maxThreadsDim[2]); printf(" Number of multiprocessors: %d\n\n", prop.multiProcessorCount);*/ /* This was the matlab version of the systematic resampler: function I = resample(W) % This is a function to resample from the weights W using the % systematic resampling approach. N = length(W); U1 = rand/N; U = U1 + ([1:N]-1)/N; W2 = cumsum(W); W1 = [0; W2(1:end-1)]; Ns = NaN(N,1); for i = 1:N Ns(i) = length(find(U >= W1(i) & U <= W2(i))); end I = NaN(N,1); k = 1; for i = 1:N for j = 1:Ns(i) I(k) = i; k = k + 1; end end end */
81348474f01e8f52d57c2ef334f9831b0eed1e75.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * Main entry of dense matrix-matrix multiplication kernel */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <malloc.h> #include <vector> #include <parboil.h> #include <iostream> #include "sgemm_kernel.hip" // I/O routines extern bool readColMajorMatrixFile(const char *fn, int &nr_row, int &nr_col, std::vector<float>&v); //extern bool writeColMajorMatrixFile(const char *fn, int, int, std::vector<float>&); extern bool writeColMajorMatrixFile(const char *fn, int, int, float *v, int v_n_elem); extern "C" void computeGold(float *, const float*, const float*, unsigned int, unsigned int, unsigned int); int main (int argc, char *argv[]) { struct pb_Parameters *params; struct pb_TimerSet timers; float *dA, *dB, *dC; size_t A_sz, B_sz, C_sz; int matArow, matAcol; int matBrow, matBcol; std::vector<float> matA, matBT; pb_InitializeTimerSet(&timers); /* Read command line. Expect 3 inputs: A, B and B^T in column-major layout*/ params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] == NULL) || (params->inpFiles[2] == NULL) || (params->inpFiles[3] != NULL)) { fprintf(stderr, "Expecting three input filenames\n"); exit(-1); } /* Read in data */ pb_SwitchToTimer(&timers, pb_TimerID_IO); // load A readColMajorMatrixFile(params->inpFiles[0], matArow, matAcol, matA); // copy A to device memory A_sz = matArow*matAcol*sizeof(float); // load B^T readColMajorMatrixFile(params->inpFiles[2], matBcol, matBrow, matBT); pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); B_sz = matBrow*matBcol*sizeof(float); // allocate space for C C_sz = matArow*matBcol*sizeof(float); // CUDA memory allocation //std::vector<float> matC(matArow*matBcol); float *matC = (float *)calloc(matArow*matBcol, sizeof(float)); hipMalloc((void**)&dA, A_sz); hipMalloc((void**)&dB, B_sz); hipMalloc((void**)&dC, C_sz); // Copy A and B^T into device memory pb_SwitchToTimer( &timers, pb_TimerID_COPY ); hipMemcpy(dA, &matA.front(), A_sz, hipMemcpyHostToDevice); hipMemcpy(dB, &matBT.front(), B_sz, hipMemcpyHostToDevice); pb_SwitchToTimer( &timers, pb_TimerID_KERNEL ); // Use standard sgemm interface regtileSgemm('N', 'T', matArow, matBcol, matAcol, 1.0f, \ dA, matArow, dB, matBcol, 0.0f, dC, matArow); if (params->outFile) { pb_SwitchToTimer( &timers, pb_TimerID_COPY ); //hipMemcpy(&matC.front(), dC, C_sz, hipMemcpyDeviceToHost); hipMemcpy(matC, dC, C_sz, hipMemcpyDeviceToHost); /* Write C to file */ pb_SwitchToTimer(&timers, pb_TimerID_IO); writeColMajorMatrixFile(params->outFile, matArow, matBcol, matC, matArow*matBcol); } pb_SwitchToTimer(&timers, pb_TimerID_NONE); double GPUtime = pb_GetElapsedTime(&(timers.timers[pb_TimerID_KERNEL])); std::cout<< "GFLOPs = " << 2.* matArow * matBcol * matAcol/GPUtime/1e9 << std::endl; pb_PrintTimerSet(&timers); pb_FreeParameters(params); hipFree(dA); hipFree(dB); hipFree(dC); return 0; }
81348474f01e8f52d57c2ef334f9831b0eed1e75.cu
/*************************************************************************** *cr *cr (C) Copyright 2010 The Board of Trustees of the *cr University of Illinois *cr All Rights Reserved *cr ***************************************************************************/ /* * Main entry of dense matrix-matrix multiplication kernel */ #include <stdio.h> #include <math.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <malloc.h> #include <vector> #include <parboil.h> #include <iostream> #include "sgemm_kernel.cu" // I/O routines extern bool readColMajorMatrixFile(const char *fn, int &nr_row, int &nr_col, std::vector<float>&v); //extern bool writeColMajorMatrixFile(const char *fn, int, int, std::vector<float>&); extern bool writeColMajorMatrixFile(const char *fn, int, int, float *v, int v_n_elem); extern "C" void computeGold(float *, const float*, const float*, unsigned int, unsigned int, unsigned int); int main (int argc, char *argv[]) { struct pb_Parameters *params; struct pb_TimerSet timers; float *dA, *dB, *dC; size_t A_sz, B_sz, C_sz; int matArow, matAcol; int matBrow, matBcol; std::vector<float> matA, matBT; pb_InitializeTimerSet(&timers); /* Read command line. Expect 3 inputs: A, B and B^T in column-major layout*/ params = pb_ReadParameters(&argc, argv); if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] == NULL) || (params->inpFiles[2] == NULL) || (params->inpFiles[3] != NULL)) { fprintf(stderr, "Expecting three input filenames\n"); exit(-1); } /* Read in data */ pb_SwitchToTimer(&timers, pb_TimerID_IO); // load A readColMajorMatrixFile(params->inpFiles[0], matArow, matAcol, matA); // copy A to device memory A_sz = matArow*matAcol*sizeof(float); // load B^T readColMajorMatrixFile(params->inpFiles[2], matBcol, matBrow, matBT); pb_SwitchToTimer( &timers, pb_TimerID_COMPUTE ); B_sz = matBrow*matBcol*sizeof(float); // allocate space for C C_sz = matArow*matBcol*sizeof(float); // CUDA memory allocation //std::vector<float> matC(matArow*matBcol); float *matC = (float *)calloc(matArow*matBcol, sizeof(float)); cudaMalloc((void**)&dA, A_sz); cudaMalloc((void**)&dB, B_sz); cudaMalloc((void**)&dC, C_sz); // Copy A and B^T into device memory pb_SwitchToTimer( &timers, pb_TimerID_COPY ); cudaMemcpy(dA, &matA.front(), A_sz, cudaMemcpyHostToDevice); cudaMemcpy(dB, &matBT.front(), B_sz, cudaMemcpyHostToDevice); pb_SwitchToTimer( &timers, pb_TimerID_KERNEL ); // Use standard sgemm interface regtileSgemm('N', 'T', matArow, matBcol, matAcol, 1.0f, \ dA, matArow, dB, matBcol, 0.0f, dC, matArow); if (params->outFile) { pb_SwitchToTimer( &timers, pb_TimerID_COPY ); //cudaMemcpy(&matC.front(), dC, C_sz, cudaMemcpyDeviceToHost); cudaMemcpy(matC, dC, C_sz, cudaMemcpyDeviceToHost); /* Write C to file */ pb_SwitchToTimer(&timers, pb_TimerID_IO); writeColMajorMatrixFile(params->outFile, matArow, matBcol, matC, matArow*matBcol); } pb_SwitchToTimer(&timers, pb_TimerID_NONE); double GPUtime = pb_GetElapsedTime(&(timers.timers[pb_TimerID_KERNEL])); std::cout<< "GFLOPs = " << 2.* matArow * matBcol * matAcol/GPUtime/1e9 << std::endl; pb_PrintTimerSet(&timers); pb_FreeParameters(params); cudaFree(dA); cudaFree(dB); cudaFree(dC); return 0; }
0460bdd0e038aaedf48089a30ff8a610adb44bf3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto* _pp_var_record_z __attribute__((unused)) = params_.state_vars[0];\ auto& _pp_var_ion_ca __attribute__((unused)) = params_.ion_states[0];\ auto* _pp_var_ion_ca_index __attribute__((unused)) = params_.ion_states[0].index;\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { arb_value_type zca = _pp_var_ion_ca.ionic_charge[0]; _pp_var_record_z[tid_] = zca; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } } // namespace void mechanism_test_ca_read_valence_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); hipLaunchKernelGGL(( init), dim3(grid_dim), dim3(block_dim), 0, 0, *p); if (!p->multiplicity) return; hipLaunchKernelGGL(( multiply), dim3(dim3{grid_dim), dim3(1}), block_dim, 0, *p); } void mechanism_test_ca_read_valence_gpu_compute_currents_(arb_mechanism_ppack* p) {} void mechanism_test_ca_read_valence_gpu_advance_state_(arb_mechanism_ppack* p) {} void mechanism_test_ca_read_valence_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_test_ca_read_valence_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_test_ca_read_valence_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace testing
0460bdd0e038aaedf48089a30ff8a610adb44bf3.cu
#include <arbor/gpu/gpu_common.hpp> #include <arbor/gpu/math_cu.hpp> #include <arbor/gpu/reduce_by_key.hpp> #include <arbor/mechanism_abi.h> namespace testing { #define PPACK_IFACE_BLOCK \ auto _pp_var_width __attribute__((unused)) = params_.width;\ auto _pp_var_n_detectors __attribute__((unused)) = params_.n_detectors;\ auto* _pp_var_vec_ci __attribute__((unused)) = params_.vec_ci;\ auto* _pp_var_vec_di __attribute__((unused)) = params_.vec_di;\ auto* _pp_var_vec_t __attribute__((unused)) = params_.vec_t;\ auto* _pp_var_vec_dt __attribute__((unused)) = params_.vec_dt;\ auto* _pp_var_vec_v __attribute__((unused)) = params_.vec_v;\ auto* _pp_var_vec_i __attribute__((unused)) = params_.vec_i;\ auto* _pp_var_vec_g __attribute__((unused)) = params_.vec_g;\ auto* _pp_var_temperature_degC __attribute__((unused)) = params_.temperature_degC;\ auto* _pp_var_diam_um __attribute__((unused)) = params_.diam_um;\ auto* _pp_var_time_since_spike __attribute__((unused)) = params_.time_since_spike;\ auto* _pp_var_node_index __attribute__((unused)) = params_.node_index;\ auto* _pp_var_peer_index __attribute__((unused)) = params_.peer_index;\ auto* _pp_var_multiplicity __attribute__((unused)) = params_.multiplicity;\ auto* _pp_var_state_vars __attribute__((unused)) = params_.state_vars;\ auto* _pp_var_weight __attribute__((unused)) = params_.weight;\ auto& _pp_var_events __attribute__((unused)) = params_.events;\ auto& _pp_var_mechanism_id __attribute__((unused)) = params_.mechanism_id;\ auto& _pp_var_index_constraints __attribute__((unused)) = params_.index_constraints;\ auto* _pp_var_record_z __attribute__((unused)) = params_.state_vars[0];\ auto& _pp_var_ion_ca __attribute__((unused)) = params_.ion_states[0];\ auto* _pp_var_ion_ca_index __attribute__((unused)) = params_.ion_states[0].index;\ //End of IFACEBLOCK namespace { using ::arb::gpu::exprelr; using ::arb::gpu::safeinv; using ::arb::gpu::min; using ::arb::gpu::max; __global__ void init(arb_mechanism_ppack params_) { int n_ = params_.width; int tid_ = threadIdx.x + blockDim.x*blockIdx.x; PPACK_IFACE_BLOCK; if (tid_<n_) { arb_value_type zca = _pp_var_ion_ca.ionic_charge[0]; _pp_var_record_z[tid_] = zca; } } __global__ void multiply(arb_mechanism_ppack params_) { PPACK_IFACE_BLOCK; auto tid_ = threadIdx.x + blockDim.x*blockIdx.x; auto idx_ = blockIdx.y; if(tid_<_pp_var_width) { _pp_var_state_vars[idx_][tid_] *= _pp_var_multiplicity[tid_]; } } } // namespace void mechanism_test_ca_read_valence_gpu_init_(arb_mechanism_ppack* p) { auto n = p->width; unsigned block_dim = 128; unsigned grid_dim = ::arb::gpu::impl::block_count(n, block_dim); init<<<grid_dim, block_dim>>>(*p); if (!p->multiplicity) return; multiply<<<dim3{grid_dim, 1}, block_dim>>>(*p); } void mechanism_test_ca_read_valence_gpu_compute_currents_(arb_mechanism_ppack* p) {} void mechanism_test_ca_read_valence_gpu_advance_state_(arb_mechanism_ppack* p) {} void mechanism_test_ca_read_valence_gpu_write_ions_(arb_mechanism_ppack* p) {} void mechanism_test_ca_read_valence_gpu_post_event_(arb_mechanism_ppack* p) {} void mechanism_test_ca_read_valence_gpu_apply_events_(arb_mechanism_ppack* p, arb_deliverable_event_stream* events) {} } // namespace testing
b6ac28dff197f251e2edadd7bad6ae5e8e9422d4.hip
// !!! This is a file automatically generated by hipify!!! /**** * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ****/ #include "mpi.h" #include "mp.h" #include "hip/hip_runtime.h" #include "hip/hip_runtime.h" #include <string.h> #include <stdio.h> #include "assert.h" #include <limits.h> #include <sys/types.h> #include <unistd.h> #include "prof.h" #include "hip/hip_runtime_api.h" #define CUDA_CHECK(stmt) \ do { \ hipError_t result = (stmt); \ if (hipSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", \ __FILE__, __LINE__,hipGetErrorString(result));\ exit(-1); \ } \ assert(hipSuccess == result); \ } while (0) #define MP_CHECK(stmt) \ do { \ int result = (stmt); \ if (0 != result) { \ fprintf(stderr, "[%s:%d] mp call failed \n", \ __FILE__, __LINE__); \ MPI_Abort(MPI_COMM_WORLD, -1); \ } \ assert(0 == result); \ } while (0) int enable_debug_prints = 0; #define mp_dbg_msg(FMT, ARGS...) do \ { \ if (enable_debug_prints) { \ fprintf(stderr, "[%d] [%d] MP DBG %s() " FMT, getpid(), my_rank, __FUNCTION__ , ## ARGS); \ fflush(stderr); \ } \ } while(0) #define MAX_SIZE 4096 //128*1024 //64*1024 #define ITER_COUNT_SMALL (2*1024) #define ITER_COUNT_LARGE 256 struct prof prof_normal; struct prof prof_async; struct prof prof_mpi; int prof_start = 0; int prof_idx = 0; int comm_size, my_rank, peer; int steps_per_batch = 16, batches_inflight = 4; int enable_async = 1; __device__ int counter; __device__ int clockrate; __global__ void dummy_kernel(double time) { long long int start, stop; double usec; start = clock64(); do { stop = clock64(); usec = ((double)(stop-start)*1000)/((double)clockrate); counter = usec; } while(usec < time); } int use_gpu_buffers=0; /*application and pack buffers*/ void *buf, *sbuf_d, *rbuf_d; int req_max_inflight = 0, rreq_max_inflight = 0, prepost_depth = 0; hipStream_t stream; size_t buf_size; int gpu_id = -1; int wait_key = 0; /*mp specific objects*/ mp_request_t *sreq; mp_request_t *rreq; MPI_Request * sreq_mpi; MPI_Request * rreq_mpi; mp_reg_t sreg, rreg; double time_start, time_stop; double sr_exchange (MPI_Comm comm, int size, int iter_count, int validate, double kernel_time, int use_async, struct prof *prof) { int i, j; double latency; double time_start, time_stop; int req_idx = 0, rreq_idx = 0, complete_req_idx = 0, complete_rreq_idx = 0; int req_inflight = 0, rreq_inflight = 0; mp_dbg_msg("size=%d iter_count=%d kernel_time=%f use_async=%d\n", size, iter_count, kernel_time, use_async); if (validate) { mp_dbg_msg("initializing the buffer \n"); if(use_gpu_buffers == 0) { memset(sbuf_d, (size + 1)%CHAR_MAX, buf_size); memset(rbuf_d, 0, buf_size); } else { CUDA_CHECK(hipMemset(sbuf_d, (size + 1)%CHAR_MAX, buf_size)); CUDA_CHECK(hipMemset(rbuf_d, 0, buf_size)); } CUDA_CHECK(hipDeviceSynchronize()); } time_start = MPI_Wtime(); for (j=0; j<prepost_depth; j++) { mp_dbg_msg("posted recv request: %d \n", rreq_idx); MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*j), size, peer, &rreg, &rreq[rreq_idx])); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } uint32_t wait_flag; if (use_async && wait_key && (1 == my_rank)) { fprintf(stdout, "[%d] waiting enabled, inserting a wait32_on_stream\n", my_rank); fflush(stdout); ACCESS_ONCE(wait_flag) = 0; MP_CHECK(mp_wait32_on_stream(&wait_flag, 1, MP_WAIT_GEQ, stream)); } prof_idx = 0; for (j = 0; j < iter_count; j++) { mp_dbg_msg("iteration :%d \n", j); if (!my_rank) { if (prof) PROF(prof, prof_idx++); req_idx = j%rreq_max_inflight; if (!use_async) { MP_CHECK(mp_wait(&rreq[req_idx])); } else { MP_CHECK(mp_wait_on_stream(&rreq[req_idx], stream)); } if (prof) PROF(prof, prof_idx++); if (kernel_time > 0) { hipLaunchKernelGGL(( dummy_kernel) , dim3(1), dim3(1), 0, stream, kernel_time); if (!use_async) { CUDA_CHECK(hipStreamSynchronize(stream)); } } if (prof) PROF(prof, prof_idx++); req_idx = j%req_max_inflight; if (!use_async) { MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx])); } else { MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx], stream)); mp_dbg_msg("posted send request: %d \n", req_idx); } } else { req_idx = j%req_max_inflight; if (!use_async) { MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx])); } else { MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx], stream)); } mp_dbg_msg("posted send request: %d\n", req_idx); req_idx = j%rreq_max_inflight; if (!use_async) { MP_CHECK(mp_wait(&rreq[req_idx])); } else { MP_CHECK(mp_wait_on_stream(&rreq[req_idx], stream)); } if (kernel_time > 0) { hipLaunchKernelGGL(( dummy_kernel) , dim3(1), dim3(1), 0, stream, kernel_time); if (!use_async) { CUDA_CHECK(hipStreamSynchronize(stream)); } } } req_inflight++; mp_dbg_msg("requests inflight: %d \n", req_inflight); if (!my_rank && prof) PROF(prof, prof_idx++); if ((j + prepost_depth) < iter_count) { mp_dbg_msg("posted recv request: %d\n", rreq_idx); int buf_idx = (j + prepost_depth); MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*buf_idx), size, peer, &rreg, &rreq[rreq_idx])); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } if (!my_rank && prof) PROF(prof, prof_idx++); if (use_async && wait_key && (1 == my_rank)) { fprintf(stdout, "[%d] sleeping 15s\n", my_rank); sleep(15); ACCESS_ONCE(wait_flag) = 1; // disabling wait_key for subsequent calls wait_key = 0; fprintf(stdout, "[%d] sleeping 20us to let previous batches to run\n", my_rank); usleep(20); fprintf(stdout, "[%d] resuming...\n", my_rank); fflush(stdout); } /*synchronize on oldest batch*/ if (req_inflight == req_max_inflight) { if (use_async) { for (i=0; i<steps_per_batch; i++) { mp_dbg_msg("waiting on recv request: %d\n", complete_rreq_idx); MP_CHECK(mp_wait(&rreq[complete_rreq_idx])); mp_dbg_msg("completed recv request: %d\n", complete_rreq_idx); complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight; rreq_inflight--; } mp_dbg_msg("after waiting on recv, rreq_inflight: %d \n", rreq_inflight); } for (i=0; i<steps_per_batch; i++) { mp_dbg_msg("waiting on send request: %d \n", complete_req_idx); MP_CHECK(mp_wait(&sreq[complete_req_idx])); mp_dbg_msg("completed send request: %d \n", complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } mp_dbg_msg("after waiting on send, req_inflight: %d \n", req_inflight); } if (j == (iter_count - 1)) { /*ideally, there should be validation here*/ if (use_async) { while (rreq_inflight > 0) { mp_wait(&rreq[complete_rreq_idx]); mp_dbg_msg("completed recv request: %d \n", complete_rreq_idx); complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight; rreq_inflight--; } } while (req_inflight > 0) { mp_wait(&sreq[complete_req_idx]); mp_dbg_msg("completed send request: %d \n", complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } } if (!my_rank && prof) { PROF(prof, prof_idx++); prof_update(prof); prof_idx = 0; } } // TODO: move validate after timing if (validate) { if(use_gpu_buffers == 0) memcpy(buf, rbuf_d, buf_size); else CUDA_CHECK(hipMemcpy((void *)((uintptr_t)buf), (void *)((uintptr_t)rbuf_d), buf_size, hipMemcpyDefault)); //CUDA_CHECK(hipDeviceSynchronize()); char *value = (char *)((uintptr_t)buf); for (i=0; i<buf_size; i++) { if (value[i] != (size + 1)%CHAR_MAX) { mp_dbg_msg("MPI validation check failed index: %d expected: %d actual: %d \n", i, (size + 1)%CHAR_MAX, value[i]); exit(-1); } } } MPI_Barrier(comm); time_stop = MPI_Wtime(); latency = (((time_stop - time_start)*1e6)/(iter_count*2)); CUDA_CHECK(hipDeviceSynchronize()); return latency; } double sr_exchange_MPI (MPI_Comm comm, int size, int iter_count, int validate, double kernel_time, int use_async, struct prof *prof) { int i, j; double latency; double time_start, time_stop; int req_idx = 0, rreq_idx = 0, complete_req_idx = 0, complete_rreq_idx = 0; int req_inflight = 0, rreq_inflight = 0; use_async=0; //always! mp_dbg_msg("size=%d iter_count=%d kernel_time=%f use_async=%d\n", size, iter_count, kernel_time, use_async); if (validate) { mp_dbg_msg("initializing the buffer \n"); if(use_gpu_buffers == 0) { memset(sbuf_d, (size + 1)%CHAR_MAX, buf_size); memset(rbuf_d, 0, buf_size); } else { CUDA_CHECK(hipMemset(sbuf_d, (size + 1)%CHAR_MAX, buf_size)); CUDA_CHECK(hipMemset(rbuf_d, 0, buf_size)); } CUDA_CHECK(hipDeviceSynchronize()); } time_start = MPI_Wtime(); for (j=0; j<prepost_depth; j++) { mp_dbg_msg("posted recv request: %d size=%d, j=%d\n", rreq_idx, size, j); MPI_Irecv((void *)((uintptr_t)rbuf_d + size*j), size, MPI_CHAR, peer, my_rank, comm, &rreq_mpi[rreq_idx]); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } prof_idx = 0; for (j = 0; j < iter_count; j++) { mp_dbg_msg("iteration :%d \n", j); if (!my_rank) { if (prof) PROF(prof, prof_idx++); req_idx = j%rreq_max_inflight; MPI_Wait(&rreq_mpi[req_idx], NULL); if (prof) PROF(prof, prof_idx++); if (kernel_time > 0) { hipLaunchKernelGGL(( dummy_kernel) , dim3(1), dim3(1), 0, stream, kernel_time); CUDA_CHECK(hipStreamSynchronize(stream)); } if (prof) PROF(prof, prof_idx++); req_idx = j%req_max_inflight; MPI_Isend((void *)((uintptr_t)sbuf_d + size*j), size, MPI_CHAR, peer, peer, comm, &sreq_mpi[req_idx]); } else { req_idx = j%req_max_inflight; MPI_Isend((void *)((uintptr_t)sbuf_d + size*j), size, MPI_CHAR, peer, peer, comm, &sreq_mpi[req_idx]); mp_dbg_msg("posted send request: %d\n", req_idx); req_idx = j%rreq_max_inflight; MPI_Wait(&rreq_mpi[req_idx], NULL); if (kernel_time > 0) { hipLaunchKernelGGL(( dummy_kernel) , dim3(1), dim3(1), 0, stream, kernel_time); CUDA_CHECK(hipStreamSynchronize(stream)); } } req_inflight++; mp_dbg_msg("requests inflight: %d \n", req_inflight); if (my_rank == 0 && prof) PROF(prof, prof_idx++); if ((j + prepost_depth) < iter_count) { mp_dbg_msg("posted recv request: %d\n", rreq_idx); int buf_idx = (j + prepost_depth); MPI_Irecv((void *)((uintptr_t)rbuf_d + size*buf_idx), size, MPI_CHAR, peer, my_rank, comm, &rreq_mpi[rreq_idx]); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } if (my_rank == 0 && prof) PROF(prof, prof_idx++); /*synchronize on oldest batch*/ if (req_inflight == req_max_inflight) { for (i=0; i<steps_per_batch; i++) { mp_dbg_msg("waiting on send request: %d \n", complete_req_idx); MPI_Wait(&sreq_mpi[complete_req_idx], NULL); mp_dbg_msg("completed send request: %d \n", complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } mp_dbg_msg("after waiting on send, req_inflight: %d \n", req_inflight); } if (j == (iter_count - 1)) { /*ideally, there should be validation here*/ while (req_inflight > 0) { MPI_Wait(&sreq_mpi[complete_req_idx], NULL); mp_dbg_msg("completed send request: %d \n", complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } } if (my_rank == 0 && prof) { PROF(prof, prof_idx++); prof_update(prof); prof_idx = 0; } } // TODO: move validate after timing if (validate) { if(use_gpu_buffers == 0) memcpy(buf, rbuf_d, buf_size); else CUDA_CHECK(hipMemcpy((void *)((uintptr_t)buf), (void *)((uintptr_t)rbuf_d), buf_size, hipMemcpyDefault)); //CUDA_CHECK(hipDeviceSynchronize()); char *value = (char *)((uintptr_t)buf); for (i=0; i<buf_size; i++) { if (value[i] != (size + 1)%CHAR_MAX) { mp_dbg_msg("MPI validation check failed index: %d expected: %d actual: %d \n", i, (size + 1)%CHAR_MAX, value[i]); exit(-1); } } } MPI_Barrier(comm); time_stop = MPI_Wtime(); latency = (((time_stop - time_start)*1e6)/(iter_count*2)); CUDA_CHECK(hipDeviceSynchronize()); return latency; } int main (int argc, char *argv[]) { int iter_count = 0, size = 0, dev_count = 0, local_rank = 0, dev_id = 0; int kernel_time = 20; int comm_comp_ratio = 0; int validate = 0; int max_size = MAX_SIZE; int i,j; char *value = getenv("ENABLE_VALIDATION"); if (value != NULL) { validate = atoi(value); } value = getenv("ENABLE_DEBUG_MSG"); if (value != NULL) { enable_debug_prints = atoi(value); } value = getenv("KERNEL_TIME"); if (value != NULL) { kernel_time = atoi(value); } value = getenv("COMM_COMP_RATIO"); if (value != NULL) { comm_comp_ratio = atoi(value); } size = 1; value = getenv("SIZE"); if (value != NULL) { size = atoi(value); } value = getenv("MAX_SIZE"); if (value != NULL) { max_size = atoi(value); } int event_async = 0; value = getenv("MP_EVENT_ASYNC"); if (value != NULL) { event_async = atoi(value); } value = getenv("USE_GPU_BUFFERS"); if (value != NULL) { use_gpu_buffers = atoi(value); } printf("use_gpu_buffers=%d\n", use_gpu_buffers); while(1) { int c; c = getopt(argc, argv, "d:W:"); if (c == -1) break; switch(c) { case 'd': gpu_id = strtol(optarg, NULL, 0); break; case 'W': wait_key = strtol(optarg, NULL, 0); break; default: printf("ERROR: invalid option\n"); exit(EXIT_FAILURE); } } MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &comm_size); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (comm_size != 2) { fprintf(stderr, "this test requires exactly two processes \n"); exit(-1); } CUDA_CHECK(hipGetDeviceCount(&dev_count)); if (dev_count <= 0) { fprintf(stderr, "no CUDA devices found \n"); exit(-1); } if (gpu_id >= 0) { local_rank = gpu_id; } else if (getenv("USE_GPU")) { local_rank = atoi(getenv("USE_GPU")); } else if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK")); } else if (getenv("OMPI_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("OMPI_COMM_WORLD_LOCAL_RANK")); } dev_id = local_rank%dev_count; fprintf(stdout, "[%d] local_rank: %d dev_count: %d using GPU device: %d\n", my_rank, local_rank, dev_count, dev_id); fprintf(stdout, "[%d] validate=%d event_async=%d, gpu buffers: %d\n", my_rank, validate, event_async, use_gpu_buffers); CUDA_CHECK(hipSetDevice(dev_id)); CUDA_CHECK(hipFree(0)); hipDeviceProp_t prop; CUDA_CHECK(hipGetDeviceProperties(&prop, dev_id)); CUDA_CHECK(hipMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, hipMemcpyHostToDevice)); fprintf(stdout, "[%d] GPU name=%s\n", my_rank, prop.name); peer = !my_rank; MP_CHECK(mp_init (MPI_COMM_WORLD, &peer, 1, MP_INIT_DEFAULT, dev_id)); iter_count = ITER_COUNT_SMALL; if (my_rank == 0) { fprintf(stdout, "steps_per_batch: %d batches_inflight: %d \n", steps_per_batch, batches_inflight); fprintf(stdout, "WARNING: dumping half round-trip latency!!!\n"); } prepost_depth = (steps_per_batch < iter_count) ? steps_per_batch : iter_count; req_max_inflight = steps_per_batch*batches_inflight; rreq_max_inflight = (steps_per_batch*batches_inflight + prepost_depth); sreq = (mp_request_t *) malloc(req_max_inflight*sizeof(mp_request_t)); rreq = (mp_request_t *) malloc(rreq_max_inflight*sizeof(mp_request_t)); sreq_mpi = (MPI_Request *) malloc(req_max_inflight*sizeof(MPI_Request)); rreq_mpi = (MPI_Request *) malloc(rreq_max_inflight*sizeof(MPI_Request)); CUDA_CHECK(hipStreamCreateWithFlags(&stream, hipStreamNonBlocking)); if (my_rank == 0) fprintf(stdout, "%10s\t %10s\t %10s\t %10s %10s\t %10s %10s\t %10s\n", "Size", "KernelTime", "No-async", "No-async+Kern", "Async", "Async+Kern", "MPI", "MPI+Kern"); if (size != 1) size = max_size = size; for (; size<=max_size; size*=2) { double latency; const char *tags = "kernel|send|recv|prepost|wait|"; if (size > 1024) { iter_count = ITER_COUNT_LARGE; } buf_size = size*iter_count; buf = malloc (buf_size); memset(buf, 0, buf_size); if(use_gpu_buffers == 0) { CUDA_CHECK(hipHostMalloc((void **)&sbuf_d, buf_size)); memset(sbuf_d, 0, buf_size); CUDA_CHECK(hipHostMalloc((void **)&rbuf_d, buf_size)); memset(rbuf_d, 0, buf_size); } else { CUDA_CHECK(hipMalloc((void **)&sbuf_d, buf_size)); CUDA_CHECK(hipMemset(sbuf_d, 0, buf_size)); CUDA_CHECK(hipMalloc((void **)&rbuf_d, buf_size)); CUDA_CHECK(hipMemset(rbuf_d, 0, buf_size)); } MP_CHECK(mp_register(sbuf_d, buf_size, &sreg)); MP_CHECK(mp_register(rbuf_d, buf_size, &rreg)); if (my_rank == 0) { if (prof_init(&prof_normal, 1000, 1000, "1us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } if (prof_init(&prof_async, 1000, 1000, "1us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } if (prof_init(&prof_mpi, 1000, 1000, "1us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } prof_start = 1; } if (my_rank == 0) fprintf(stdout, "%10d", size); /*warmup*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/); latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 1/*kernel_time*/, 1/*use_async*/, NULL/*prof*/); latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 1/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); latency = sr_exchange_MPI(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); latency = sr_exchange_MPI(MPI_COMM_WORLD, size, iter_count, validate, 1/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); /*Normal*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); kernel_time = (comm_comp_ratio > 0) ? comm_comp_ratio*latency : kernel_time; if (my_rank == 0) fprintf(stdout, "\t %10d", kernel_time); if (my_rank == 0) fprintf(stdout, "\t %8.2lf", latency); hipProfilerStart(); /*Normal + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 0/*use_async*/, &prof_normal/*prof*/); if (my_rank == 0) fprintf(stdout, "\t %8.2lf", latency); hipProfilerStop(); /*Async*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/); if (my_rank == 0) fprintf(stdout, "\t %8.2lf", latency); hipProfilerStart(); /*Async + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 1/*use_async*/, &prof_async/*prof*/); if (my_rank == 0) fprintf(stdout, "\t %8.2lf", latency); hipProfilerStop(); latency = sr_exchange_MPI(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); if (my_rank == 0) fprintf(stdout, "\t %8.2lf", latency); hipProfilerStop(); latency = sr_exchange_MPI(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 0/*use_async*/, &prof_mpi/*prof*/); if (my_rank == 0) fprintf(stdout, "\t %8.2lf \n", latency); hipProfilerStop(); if (my_rank == 0 && validate) fprintf(stdout, "SendRecv test passed validation with message size: %d \n", size); if (my_rank == 0) { prof_dump(&prof_normal); prof_dump(&prof_async); prof_dump(&prof_mpi); } mp_deregister(&sreg); mp_deregister(&rreg); if(use_gpu_buffers == 0) { CUDA_CHECK(hipHostFree(sbuf_d)); CUDA_CHECK(hipHostFree(rbuf_d)); } else { CUDA_CHECK(hipFree(sbuf_d)); CUDA_CHECK(hipFree(rbuf_d)); } free(buf); } CUDA_CHECK(hipStreamDestroy(stream)); free(sreq); free(rreq); free(sreq_mpi); free(rreq_mpi); mp_finalize (); MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); return 0; }
b6ac28dff197f251e2edadd7bad6ae5e8e9422d4.cu
/**** * Copyright (c) 2011-2018, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ****/ #include "mpi.h" #include "mp.h" #include "cuda.h" #include "cuda_runtime.h" #include <string.h> #include <stdio.h> #include "assert.h" #include <limits.h> #include <sys/types.h> #include <unistd.h> #include "prof.h" #include "cuda_profiler_api.h" #define CUDA_CHECK(stmt) \ do { \ cudaError_t result = (stmt); \ if (cudaSuccess != result) { \ fprintf(stderr, "[%s:%d] cuda failed with %s \n", \ __FILE__, __LINE__,cudaGetErrorString(result));\ exit(-1); \ } \ assert(cudaSuccess == result); \ } while (0) #define MP_CHECK(stmt) \ do { \ int result = (stmt); \ if (0 != result) { \ fprintf(stderr, "[%s:%d] mp call failed \n", \ __FILE__, __LINE__); \ MPI_Abort(MPI_COMM_WORLD, -1); \ } \ assert(0 == result); \ } while (0) int enable_debug_prints = 0; #define mp_dbg_msg(FMT, ARGS...) do \ { \ if (enable_debug_prints) { \ fprintf(stderr, "[%d] [%d] MP DBG %s() " FMT, getpid(), my_rank, __FUNCTION__ , ## ARGS); \ fflush(stderr); \ } \ } while(0) #define MAX_SIZE 4096 //128*1024 //64*1024 #define ITER_COUNT_SMALL (2*1024) #define ITER_COUNT_LARGE 256 struct prof prof_normal; struct prof prof_async; struct prof prof_mpi; int prof_start = 0; int prof_idx = 0; int comm_size, my_rank, peer; int steps_per_batch = 16, batches_inflight = 4; int enable_async = 1; __device__ int counter; __device__ int clockrate; __global__ void dummy_kernel(double time) { long long int start, stop; double usec; start = clock64(); do { stop = clock64(); usec = ((double)(stop-start)*1000)/((double)clockrate); counter = usec; } while(usec < time); } int use_gpu_buffers=0; /*application and pack buffers*/ void *buf, *sbuf_d, *rbuf_d; int req_max_inflight = 0, rreq_max_inflight = 0, prepost_depth = 0; cudaStream_t stream; size_t buf_size; int gpu_id = -1; int wait_key = 0; /*mp specific objects*/ mp_request_t *sreq; mp_request_t *rreq; MPI_Request * sreq_mpi; MPI_Request * rreq_mpi; mp_reg_t sreg, rreg; double time_start, time_stop; double sr_exchange (MPI_Comm comm, int size, int iter_count, int validate, double kernel_time, int use_async, struct prof *prof) { int i, j; double latency; double time_start, time_stop; int req_idx = 0, rreq_idx = 0, complete_req_idx = 0, complete_rreq_idx = 0; int req_inflight = 0, rreq_inflight = 0; mp_dbg_msg("size=%d iter_count=%d kernel_time=%f use_async=%d\n", size, iter_count, kernel_time, use_async); if (validate) { mp_dbg_msg("initializing the buffer \n"); if(use_gpu_buffers == 0) { memset(sbuf_d, (size + 1)%CHAR_MAX, buf_size); memset(rbuf_d, 0, buf_size); } else { CUDA_CHECK(cudaMemset(sbuf_d, (size + 1)%CHAR_MAX, buf_size)); CUDA_CHECK(cudaMemset(rbuf_d, 0, buf_size)); } CUDA_CHECK(cudaDeviceSynchronize()); } time_start = MPI_Wtime(); for (j=0; j<prepost_depth; j++) { mp_dbg_msg("posted recv request: %d \n", rreq_idx); MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*j), size, peer, &rreg, &rreq[rreq_idx])); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } uint32_t wait_flag; if (use_async && wait_key && (1 == my_rank)) { fprintf(stdout, "[%d] waiting enabled, inserting a wait32_on_stream\n", my_rank); fflush(stdout); ACCESS_ONCE(wait_flag) = 0; MP_CHECK(mp_wait32_on_stream(&wait_flag, 1, MP_WAIT_GEQ, stream)); } prof_idx = 0; for (j = 0; j < iter_count; j++) { mp_dbg_msg("iteration :%d \n", j); if (!my_rank) { if (prof) PROF(prof, prof_idx++); req_idx = j%rreq_max_inflight; if (!use_async) { MP_CHECK(mp_wait(&rreq[req_idx])); } else { MP_CHECK(mp_wait_on_stream(&rreq[req_idx], stream)); } if (prof) PROF(prof, prof_idx++); if (kernel_time > 0) { dummy_kernel <<<1, 1, 0, stream>>> (kernel_time); if (!use_async) { CUDA_CHECK(cudaStreamSynchronize(stream)); } } if (prof) PROF(prof, prof_idx++); req_idx = j%req_max_inflight; if (!use_async) { MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx])); } else { MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx], stream)); mp_dbg_msg("posted send request: %d \n", req_idx); } } else { req_idx = j%req_max_inflight; if (!use_async) { MP_CHECK(mp_isend ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx])); } else { MP_CHECK(mp_isend_on_stream ((void *)((uintptr_t)sbuf_d + size*j), size, peer, &sreg, &sreq[req_idx], stream)); } mp_dbg_msg("posted send request: %d\n", req_idx); req_idx = j%rreq_max_inflight; if (!use_async) { MP_CHECK(mp_wait(&rreq[req_idx])); } else { MP_CHECK(mp_wait_on_stream(&rreq[req_idx], stream)); } if (kernel_time > 0) { dummy_kernel <<<1, 1, 0, stream>>> (kernel_time); if (!use_async) { CUDA_CHECK(cudaStreamSynchronize(stream)); } } } req_inflight++; mp_dbg_msg("requests inflight: %d \n", req_inflight); if (!my_rank && prof) PROF(prof, prof_idx++); if ((j + prepost_depth) < iter_count) { mp_dbg_msg("posted recv request: %d\n", rreq_idx); int buf_idx = (j + prepost_depth); MP_CHECK(mp_irecv ((void *)((uintptr_t)rbuf_d + size*buf_idx), size, peer, &rreg, &rreq[rreq_idx])); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } if (!my_rank && prof) PROF(prof, prof_idx++); if (use_async && wait_key && (1 == my_rank)) { fprintf(stdout, "[%d] sleeping 15s\n", my_rank); sleep(15); ACCESS_ONCE(wait_flag) = 1; // disabling wait_key for subsequent calls wait_key = 0; fprintf(stdout, "[%d] sleeping 20us to let previous batches to run\n", my_rank); usleep(20); fprintf(stdout, "[%d] resuming...\n", my_rank); fflush(stdout); } /*synchronize on oldest batch*/ if (req_inflight == req_max_inflight) { if (use_async) { for (i=0; i<steps_per_batch; i++) { mp_dbg_msg("waiting on recv request: %d\n", complete_rreq_idx); MP_CHECK(mp_wait(&rreq[complete_rreq_idx])); mp_dbg_msg("completed recv request: %d\n", complete_rreq_idx); complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight; rreq_inflight--; } mp_dbg_msg("after waiting on recv, rreq_inflight: %d \n", rreq_inflight); } for (i=0; i<steps_per_batch; i++) { mp_dbg_msg("waiting on send request: %d \n", complete_req_idx); MP_CHECK(mp_wait(&sreq[complete_req_idx])); mp_dbg_msg("completed send request: %d \n", complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } mp_dbg_msg("after waiting on send, req_inflight: %d \n", req_inflight); } if (j == (iter_count - 1)) { /*ideally, there should be validation here*/ if (use_async) { while (rreq_inflight > 0) { mp_wait(&rreq[complete_rreq_idx]); mp_dbg_msg("completed recv request: %d \n", complete_rreq_idx); complete_rreq_idx = (complete_rreq_idx + 1)%rreq_max_inflight; rreq_inflight--; } } while (req_inflight > 0) { mp_wait(&sreq[complete_req_idx]); mp_dbg_msg("completed send request: %d \n", complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } } if (!my_rank && prof) { PROF(prof, prof_idx++); prof_update(prof); prof_idx = 0; } } // TODO: move validate after timing if (validate) { if(use_gpu_buffers == 0) memcpy(buf, rbuf_d, buf_size); else CUDA_CHECK(cudaMemcpy((void *)((uintptr_t)buf), (void *)((uintptr_t)rbuf_d), buf_size, cudaMemcpyDefault)); //CUDA_CHECK(cudaDeviceSynchronize()); char *value = (char *)((uintptr_t)buf); for (i=0; i<buf_size; i++) { if (value[i] != (size + 1)%CHAR_MAX) { mp_dbg_msg("MPI validation check failed index: %d expected: %d actual: %d \n", i, (size + 1)%CHAR_MAX, value[i]); exit(-1); } } } MPI_Barrier(comm); time_stop = MPI_Wtime(); latency = (((time_stop - time_start)*1e6)/(iter_count*2)); CUDA_CHECK(cudaDeviceSynchronize()); return latency; } double sr_exchange_MPI (MPI_Comm comm, int size, int iter_count, int validate, double kernel_time, int use_async, struct prof *prof) { int i, j; double latency; double time_start, time_stop; int req_idx = 0, rreq_idx = 0, complete_req_idx = 0, complete_rreq_idx = 0; int req_inflight = 0, rreq_inflight = 0; use_async=0; //always! mp_dbg_msg("size=%d iter_count=%d kernel_time=%f use_async=%d\n", size, iter_count, kernel_time, use_async); if (validate) { mp_dbg_msg("initializing the buffer \n"); if(use_gpu_buffers == 0) { memset(sbuf_d, (size + 1)%CHAR_MAX, buf_size); memset(rbuf_d, 0, buf_size); } else { CUDA_CHECK(cudaMemset(sbuf_d, (size + 1)%CHAR_MAX, buf_size)); CUDA_CHECK(cudaMemset(rbuf_d, 0, buf_size)); } CUDA_CHECK(cudaDeviceSynchronize()); } time_start = MPI_Wtime(); for (j=0; j<prepost_depth; j++) { mp_dbg_msg("posted recv request: %d size=%d, j=%d\n", rreq_idx, size, j); MPI_Irecv((void *)((uintptr_t)rbuf_d + size*j), size, MPI_CHAR, peer, my_rank, comm, &rreq_mpi[rreq_idx]); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } prof_idx = 0; for (j = 0; j < iter_count; j++) { mp_dbg_msg("iteration :%d \n", j); if (!my_rank) { if (prof) PROF(prof, prof_idx++); req_idx = j%rreq_max_inflight; MPI_Wait(&rreq_mpi[req_idx], NULL); if (prof) PROF(prof, prof_idx++); if (kernel_time > 0) { dummy_kernel <<<1, 1, 0, stream>>> (kernel_time); CUDA_CHECK(cudaStreamSynchronize(stream)); } if (prof) PROF(prof, prof_idx++); req_idx = j%req_max_inflight; MPI_Isend((void *)((uintptr_t)sbuf_d + size*j), size, MPI_CHAR, peer, peer, comm, &sreq_mpi[req_idx]); } else { req_idx = j%req_max_inflight; MPI_Isend((void *)((uintptr_t)sbuf_d + size*j), size, MPI_CHAR, peer, peer, comm, &sreq_mpi[req_idx]); mp_dbg_msg("posted send request: %d\n", req_idx); req_idx = j%rreq_max_inflight; MPI_Wait(&rreq_mpi[req_idx], NULL); if (kernel_time > 0) { dummy_kernel <<<1, 1, 0, stream>>> (kernel_time); CUDA_CHECK(cudaStreamSynchronize(stream)); } } req_inflight++; mp_dbg_msg("requests inflight: %d \n", req_inflight); if (my_rank == 0 && prof) PROF(prof, prof_idx++); if ((j + prepost_depth) < iter_count) { mp_dbg_msg("posted recv request: %d\n", rreq_idx); int buf_idx = (j + prepost_depth); MPI_Irecv((void *)((uintptr_t)rbuf_d + size*buf_idx), size, MPI_CHAR, peer, my_rank, comm, &rreq_mpi[rreq_idx]); rreq_idx = (rreq_idx + 1)%rreq_max_inflight; rreq_inflight++; } if (my_rank == 0 && prof) PROF(prof, prof_idx++); /*synchronize on oldest batch*/ if (req_inflight == req_max_inflight) { for (i=0; i<steps_per_batch; i++) { mp_dbg_msg("waiting on send request: %d \n", complete_req_idx); MPI_Wait(&sreq_mpi[complete_req_idx], NULL); mp_dbg_msg("completed send request: %d \n", complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } mp_dbg_msg("after waiting on send, req_inflight: %d \n", req_inflight); } if (j == (iter_count - 1)) { /*ideally, there should be validation here*/ while (req_inflight > 0) { MPI_Wait(&sreq_mpi[complete_req_idx], NULL); mp_dbg_msg("completed send request: %d \n", complete_req_idx); complete_req_idx = (complete_req_idx + 1)%req_max_inflight; req_inflight--; } } if (my_rank == 0 && prof) { PROF(prof, prof_idx++); prof_update(prof); prof_idx = 0; } } // TODO: move validate after timing if (validate) { if(use_gpu_buffers == 0) memcpy(buf, rbuf_d, buf_size); else CUDA_CHECK(cudaMemcpy((void *)((uintptr_t)buf), (void *)((uintptr_t)rbuf_d), buf_size, cudaMemcpyDefault)); //CUDA_CHECK(cudaDeviceSynchronize()); char *value = (char *)((uintptr_t)buf); for (i=0; i<buf_size; i++) { if (value[i] != (size + 1)%CHAR_MAX) { mp_dbg_msg("MPI validation check failed index: %d expected: %d actual: %d \n", i, (size + 1)%CHAR_MAX, value[i]); exit(-1); } } } MPI_Barrier(comm); time_stop = MPI_Wtime(); latency = (((time_stop - time_start)*1e6)/(iter_count*2)); CUDA_CHECK(cudaDeviceSynchronize()); return latency; } int main (int argc, char *argv[]) { int iter_count = 0, size = 0, dev_count = 0, local_rank = 0, dev_id = 0; int kernel_time = 20; int comm_comp_ratio = 0; int validate = 0; int max_size = MAX_SIZE; int i,j; char *value = getenv("ENABLE_VALIDATION"); if (value != NULL) { validate = atoi(value); } value = getenv("ENABLE_DEBUG_MSG"); if (value != NULL) { enable_debug_prints = atoi(value); } value = getenv("KERNEL_TIME"); if (value != NULL) { kernel_time = atoi(value); } value = getenv("COMM_COMP_RATIO"); if (value != NULL) { comm_comp_ratio = atoi(value); } size = 1; value = getenv("SIZE"); if (value != NULL) { size = atoi(value); } value = getenv("MAX_SIZE"); if (value != NULL) { max_size = atoi(value); } int event_async = 0; value = getenv("MP_EVENT_ASYNC"); if (value != NULL) { event_async = atoi(value); } value = getenv("USE_GPU_BUFFERS"); if (value != NULL) { use_gpu_buffers = atoi(value); } printf("use_gpu_buffers=%d\n", use_gpu_buffers); while(1) { int c; c = getopt(argc, argv, "d:W:"); if (c == -1) break; switch(c) { case 'd': gpu_id = strtol(optarg, NULL, 0); break; case 'W': wait_key = strtol(optarg, NULL, 0); break; default: printf("ERROR: invalid option\n"); exit(EXIT_FAILURE); } } MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &comm_size); MPI_Comm_rank(MPI_COMM_WORLD, &my_rank); if (comm_size != 2) { fprintf(stderr, "this test requires exactly two processes \n"); exit(-1); } CUDA_CHECK(cudaGetDeviceCount(&dev_count)); if (dev_count <= 0) { fprintf(stderr, "no CUDA devices found \n"); exit(-1); } if (gpu_id >= 0) { local_rank = gpu_id; } else if (getenv("USE_GPU")) { local_rank = atoi(getenv("USE_GPU")); } else if (getenv("MV2_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("MV2_COMM_WORLD_LOCAL_RANK")); } else if (getenv("OMPI_COMM_WORLD_LOCAL_RANK") != NULL) { local_rank = atoi(getenv("OMPI_COMM_WORLD_LOCAL_RANK")); } dev_id = local_rank%dev_count; fprintf(stdout, "[%d] local_rank: %d dev_count: %d using GPU device: %d\n", my_rank, local_rank, dev_count, dev_id); fprintf(stdout, "[%d] validate=%d event_async=%d, gpu buffers: %d\n", my_rank, validate, event_async, use_gpu_buffers); CUDA_CHECK(cudaSetDevice(dev_id)); CUDA_CHECK(cudaFree(0)); cudaDeviceProp prop; CUDA_CHECK(cudaGetDeviceProperties(&prop, dev_id)); CUDA_CHECK(cudaMemcpyToSymbol(clockrate, (void *)&prop.clockRate, sizeof(int), 0, cudaMemcpyHostToDevice)); fprintf(stdout, "[%d] GPU name=%s\n", my_rank, prop.name); peer = !my_rank; MP_CHECK(mp_init (MPI_COMM_WORLD, &peer, 1, MP_INIT_DEFAULT, dev_id)); iter_count = ITER_COUNT_SMALL; if (my_rank == 0) { fprintf(stdout, "steps_per_batch: %d batches_inflight: %d \n", steps_per_batch, batches_inflight); fprintf(stdout, "WARNING: dumping half round-trip latency!!!\n"); } prepost_depth = (steps_per_batch < iter_count) ? steps_per_batch : iter_count; req_max_inflight = steps_per_batch*batches_inflight; rreq_max_inflight = (steps_per_batch*batches_inflight + prepost_depth); sreq = (mp_request_t *) malloc(req_max_inflight*sizeof(mp_request_t)); rreq = (mp_request_t *) malloc(rreq_max_inflight*sizeof(mp_request_t)); sreq_mpi = (MPI_Request *) malloc(req_max_inflight*sizeof(MPI_Request)); rreq_mpi = (MPI_Request *) malloc(rreq_max_inflight*sizeof(MPI_Request)); CUDA_CHECK(cudaStreamCreateWithFlags(&stream, cudaStreamNonBlocking)); if (my_rank == 0) fprintf(stdout, "%10s\t %10s\t %10s\t %10s %10s\t %10s %10s\t %10s\n", "Size", "KernelTime", "No-async", "No-async+Kern", "Async", "Async+Kern", "MPI", "MPI+Kern"); if (size != 1) size = max_size = size; for (; size<=max_size; size*=2) { double latency; const char *tags = "kernel|send|recv|prepost|wait|"; if (size > 1024) { iter_count = ITER_COUNT_LARGE; } buf_size = size*iter_count; buf = malloc (buf_size); memset(buf, 0, buf_size); if(use_gpu_buffers == 0) { CUDA_CHECK(cudaMallocHost((void **)&sbuf_d, buf_size)); memset(sbuf_d, 0, buf_size); CUDA_CHECK(cudaMallocHost((void **)&rbuf_d, buf_size)); memset(rbuf_d, 0, buf_size); } else { CUDA_CHECK(cudaMalloc((void **)&sbuf_d, buf_size)); CUDA_CHECK(cudaMemset(sbuf_d, 0, buf_size)); CUDA_CHECK(cudaMalloc((void **)&rbuf_d, buf_size)); CUDA_CHECK(cudaMemset(rbuf_d, 0, buf_size)); } MP_CHECK(mp_register(sbuf_d, buf_size, &sreg)); MP_CHECK(mp_register(rbuf_d, buf_size, &rreg)); if (my_rank == 0) { if (prof_init(&prof_normal, 1000, 1000, "1us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } if (prof_init(&prof_async, 1000, 1000, "1us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } if (prof_init(&prof_mpi, 1000, 1000, "1us", 100, 1, tags)) { fprintf(stderr, "error in prof_init init.\n"); exit(-1); } prof_start = 1; } if (my_rank == 0) fprintf(stdout, "%10d", size); /*warmup*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/); latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 1/*kernel_time*/, 1/*use_async*/, NULL/*prof*/); latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 1/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); latency = sr_exchange_MPI(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); latency = sr_exchange_MPI(MPI_COMM_WORLD, size, iter_count, validate, 1/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); /*Normal*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); kernel_time = (comm_comp_ratio > 0) ? comm_comp_ratio*latency : kernel_time; if (my_rank == 0) fprintf(stdout, "\t %10d", kernel_time); if (my_rank == 0) fprintf(stdout, "\t %8.2lf", latency); cudaProfilerStart(); /*Normal + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 0/*use_async*/, &prof_normal/*prof*/); if (my_rank == 0) fprintf(stdout, "\t %8.2lf", latency); cudaProfilerStop(); /*Async*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 1/*use_async*/, NULL/*prof*/); if (my_rank == 0) fprintf(stdout, "\t %8.2lf", latency); cudaProfilerStart(); /*Async + Kernel*/ latency = sr_exchange(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 1/*use_async*/, &prof_async/*prof*/); if (my_rank == 0) fprintf(stdout, "\t %8.2lf", latency); cudaProfilerStop(); latency = sr_exchange_MPI(MPI_COMM_WORLD, size, iter_count, validate, 0/*kernel_time*/, 0/*use_async*/, NULL/*prof*/); if (my_rank == 0) fprintf(stdout, "\t %8.2lf", latency); cudaProfilerStop(); latency = sr_exchange_MPI(MPI_COMM_WORLD, size, iter_count, validate, kernel_time, 0/*use_async*/, &prof_mpi/*prof*/); if (my_rank == 0) fprintf(stdout, "\t %8.2lf \n", latency); cudaProfilerStop(); if (my_rank == 0 && validate) fprintf(stdout, "SendRecv test passed validation with message size: %d \n", size); if (my_rank == 0) { prof_dump(&prof_normal); prof_dump(&prof_async); prof_dump(&prof_mpi); } mp_deregister(&sreg); mp_deregister(&rreg); if(use_gpu_buffers == 0) { CUDA_CHECK(cudaFreeHost(sbuf_d)); CUDA_CHECK(cudaFreeHost(rbuf_d)); } else { CUDA_CHECK(cudaFree(sbuf_d)); CUDA_CHECK(cudaFree(rbuf_d)); } free(buf); } CUDA_CHECK(cudaStreamDestroy(stream)); free(sreq); free(rreq); free(sreq_mpi); free(rreq_mpi); mp_finalize (); MPI_Barrier(MPI_COMM_WORLD); MPI_Finalize(); return 0; }
7566e4adfbaea02a6a52a159d5349beb3827c0b9.hip
// !!! This is a file automatically generated by hipify!!! /** * @file dct_lee_cuda_kernel.cu * @author Yibo Lin * @date Oct 2018 */ //#include <stdexcept> //#include <algorithm> #include <cassert> #include <stdio.h> #include <math.h> #include <float.h> #include "hip/hip_runtime.h" // #include "dct_lee_cuda.h" #include "dct_lee_cuda_kernel.h" DREAMPLACE_BEGIN_NAMESPACE namespace lee { constexpr double PI = 3.14159265358979323846; /// Return true if a number is power of 2 template <typename T> inline bool isPowerOf2(T val) { return val && (val & (val - 1)) == 0; } template <typename T> inline void swap(T& x, T& y) { T tmp = x; x = y; y = tmp; } /// Precompute cosine values needed for N-point dct /// @param cos size N - 1 buffer on GPU, contains the result after function call /// @param N the length of target dct, must be power of 2 template <typename TValue> void precompute_dct_cos(TValue *cos, int N) { // The input length must be power of 2 if (! isPowerOf2<int>(N)) { printf("Input length is not power of 2.\n"); assert(0); } // create the array on host TValue* cos_host = new TValue [N]; int offset = 0; int halfLen = N / 2; while (halfLen) { TValue phaseStep = 0.5 * PI / halfLen; TValue phase = 0.5 * phaseStep; for (int i = 0; i < halfLen; ++i) { cos_host[offset + i] = 0.5 / std::cos(phase); phase += phaseStep; } offset += halfLen; halfLen /= 2; } // copy to GPU hipMemcpy(cos, cos_host, N*sizeof(TValue), hipMemcpyHostToDevice); delete [] cos_host; } /// Precompute cosine values needed for N-point idct /// @param cos size N - 1 buffer on GPU, contains the result after function call /// @param N the length of target idct, must be power of 2 template <typename TValue> void precompute_idct_cos(TValue *cos, int N) { // The input length must be power of 2 if (! isPowerOf2<int>(N)) { printf("Input length is not power of 2.\n"); assert(0); } // create the array on host TValue* cos_host = new TValue [N]; int offset = 0; int halfLen = 1; while(halfLen < N) { TValue phaseStep = 0.5 * PI / halfLen; TValue phase = 0.5 * phaseStep; for (int i = 0; i < halfLen; ++i) { cos_host[offset + i] = 0.5 / std::cos(phase); phase += phaseStep; } offset += halfLen; halfLen *= 2; } // copy to GPU hipMemcpy(cos, cos_host, N*sizeof(TValue), hipMemcpyHostToDevice); delete [] cos_host; } /// The implementation of fast Discrete Cosine Transform (DCT) algorithm and its inverse (IDCT) are Lee's algorithms /// Algorithm reference: A New Algorithm to Compute the Discrete Cosine Transform, by Byeong Gi Lee, 1984 /// /// Lee's algorithm has a recursive structure in nature. /// Here is a sample recursive implementation: https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms /// /// My implementation here is iterative, which is more efficient than the recursive version. /// Here is a sample iterative implementation: https://www.codeproject.com/Articles/151043/Iterative-Fast-1D-Forvard-DCT /// Compute y[k] = sum_n=0..N-1 (x[n] * cos((n + 0.5) * k * PI / N)), for k = 0..N-1 /// /// @param vec length M * N sequence to be transformed in last dimension /// @param out length M * N helping buffer, which is also the output /// @param buf length M * N helping buffer /// @param cos length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' /// @param M length of dimension 0 of vec /// @param N length of dimension 1 of vec, must be power of 2 template <typename TValue> void dct(const TValue *vec, TValue *out, TValue* buf, const TValue *cos, int M, int N) { int block_count = 2048; int thread_count = 512; // The input length must be power of 2 if (! isPowerOf2<int>(N)) { printf("Input length is not power of 2.\n"); assert(0); } // Pointers point to the beginning indices of two adjacent iterations TValue *curr = buf; TValue *next = out; // 'temp' used to store date of two adjacent iterations // Copy 'vec' to the first N element in 'temp' hipMemcpy(curr, vec, M*N*sizeof(TValue), hipMemcpyDeviceToDevice); // Current bufferfly length and half length int len = N; int halfLen = len / 2; // Iteratively bi-partition sequences into sub-sequences int cosOffset = 0; while (halfLen) { hipLaunchKernelGGL(( computeDctForward), dim3(block_count), dim3(thread_count), 0, 0, curr, next, cos, M, N, len, halfLen, cosOffset); swap(curr, next); cosOffset += halfLen; len = halfLen; halfLen /= 2; } // Bottom-up form the final DCT solution // Note that the case len = 2 will do nothing, so we start from len = 4 len = 4; halfLen = 2; while (halfLen < N) { hipLaunchKernelGGL(( computeDctBackward), dim3(block_count), dim3(thread_count), 0, 0, curr, next, M, N, len, halfLen); swap(curr, next); halfLen = len; len *= 2; } // Populate the final results into 'out' if (curr != out) { hipMemcpy(out, curr, M*N*sizeof(TValue), hipMemcpyDeviceToDevice); } } /// Compute y[k] = 0.5 * x[0] + sum_n=1..N-1 (x[n] * cos(n * (k + 0.5) * PI / N)), for k = 0..N-1 /// @param vec length M * N sequence to be transformed /// @param out length M * N helping buffer, which is also the output /// @param buf length M * N helping buffer /// @param cos length N - 1, stores cosine values precomputed by function 'precompute_idct_cos' /// @param M length of dimension 0 of vec /// @param N length of dimension 1 of vec, must be power of 2 template <typename TValue> void idct(const TValue *vec, TValue *out, TValue *buf, const TValue *cos, int M, int N) { int block_count = 32; int thread_count = 1024; // The input length must be power of 2 if (! isPowerOf2<int>(N)) { printf("Input length is not power of 2.\n"); assert(0); } // Pointers point to the beginning indices of two adjacent iterations TValue *curr = buf; TValue *next = out; // This array is used to store date of two adjacent iterations // Copy 'vec' to the first N element in 'temp' hipMemcpy(curr, vec, M*N*sizeof(TValue), hipMemcpyDeviceToDevice); hipLaunchKernelGGL(( computeIdctScale0), dim3(block_count), dim3(thread_count), 0, 0, curr, M, N); // Current bufferfly length and half length int len = N; int halfLen = len / 2; // Iteratively bi-partition sequences into sub-sequences while (halfLen) { hipLaunchKernelGGL(( computeIdctForward), dim3(block_count), dim3(thread_count), 0, 0, curr, next, M, N, len, halfLen); swap(curr, next); len = halfLen; halfLen /= 2; } // Bottom-up form the final IDCT solution len = 2; halfLen = 1; int cosOffset = 0; while(halfLen < N) { hipLaunchKernelGGL(( ComputeIdctBackward), dim3(block_count), dim3(thread_count), 0, 0, curr, next, cos, M, N, len, halfLen, cosOffset); swap(curr, next); cosOffset += halfLen; halfLen = len; len *= 2; } // Populate the final results into 'out' if (curr != out) { hipMemcpy(out, curr, M*N*sizeof(TValue), hipMemcpyDeviceToDevice); } } } // End of namespace lee #define REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(type) \ template void lee::precompute_dct_cos<type>(\ type* cos, \ int N \ ); REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(float); REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(double); #define REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(type) \ template void lee::precompute_idct_cos<type>(\ type* cos, \ int N \ ); REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(float); REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(double); #define REGISTER_DCT_KERNEL_LAUNCHER(type) \ template void lee::dct<type>(\ const type* vec, \ type* curr, \ type* next, \ const type* cos, \ int M, \ int N \ ); REGISTER_DCT_KERNEL_LAUNCHER(float); REGISTER_DCT_KERNEL_LAUNCHER(double); #define REGISTER_IDCT_KERNEL_LAUNCHER(type) \ template void lee::idct<type>(\ const type* vec, \ type* curr, \ type* next, \ const type* cos, \ int M, \ int N \ ); REGISTER_IDCT_KERNEL_LAUNCHER(float); REGISTER_IDCT_KERNEL_LAUNCHER(double); DREAMPLACE_END_NAMESPACE
7566e4adfbaea02a6a52a159d5349beb3827c0b9.cu
/** * @file dct_lee_cuda_kernel.cu * @author Yibo Lin * @date Oct 2018 */ //#include <stdexcept> //#include <algorithm> #include <cassert> #include <stdio.h> #include <math.h> #include <float.h> #include "cuda_runtime.h" // #include "dct_lee_cuda.h" #include "dct_lee_cuda_kernel.h" DREAMPLACE_BEGIN_NAMESPACE namespace lee { constexpr double PI = 3.14159265358979323846; /// Return true if a number is power of 2 template <typename T> inline bool isPowerOf2(T val) { return val && (val & (val - 1)) == 0; } template <typename T> inline void swap(T& x, T& y) { T tmp = x; x = y; y = tmp; } /// Precompute cosine values needed for N-point dct /// @param cos size N - 1 buffer on GPU, contains the result after function call /// @param N the length of target dct, must be power of 2 template <typename TValue> void precompute_dct_cos(TValue *cos, int N) { // The input length must be power of 2 if (! isPowerOf2<int>(N)) { printf("Input length is not power of 2.\n"); assert(0); } // create the array on host TValue* cos_host = new TValue [N]; int offset = 0; int halfLen = N / 2; while (halfLen) { TValue phaseStep = 0.5 * PI / halfLen; TValue phase = 0.5 * phaseStep; for (int i = 0; i < halfLen; ++i) { cos_host[offset + i] = 0.5 / std::cos(phase); phase += phaseStep; } offset += halfLen; halfLen /= 2; } // copy to GPU cudaMemcpy(cos, cos_host, N*sizeof(TValue), cudaMemcpyHostToDevice); delete [] cos_host; } /// Precompute cosine values needed for N-point idct /// @param cos size N - 1 buffer on GPU, contains the result after function call /// @param N the length of target idct, must be power of 2 template <typename TValue> void precompute_idct_cos(TValue *cos, int N) { // The input length must be power of 2 if (! isPowerOf2<int>(N)) { printf("Input length is not power of 2.\n"); assert(0); } // create the array on host TValue* cos_host = new TValue [N]; int offset = 0; int halfLen = 1; while(halfLen < N) { TValue phaseStep = 0.5 * PI / halfLen; TValue phase = 0.5 * phaseStep; for (int i = 0; i < halfLen; ++i) { cos_host[offset + i] = 0.5 / std::cos(phase); phase += phaseStep; } offset += halfLen; halfLen *= 2; } // copy to GPU cudaMemcpy(cos, cos_host, N*sizeof(TValue), cudaMemcpyHostToDevice); delete [] cos_host; } /// The implementation of fast Discrete Cosine Transform (DCT) algorithm and its inverse (IDCT) are Lee's algorithms /// Algorithm reference: A New Algorithm to Compute the Discrete Cosine Transform, by Byeong Gi Lee, 1984 /// /// Lee's algorithm has a recursive structure in nature. /// Here is a sample recursive implementation: https://www.nayuki.io/page/fast-discrete-cosine-transform-algorithms /// /// My implementation here is iterative, which is more efficient than the recursive version. /// Here is a sample iterative implementation: https://www.codeproject.com/Articles/151043/Iterative-Fast-1D-Forvard-DCT /// Compute y[k] = sum_n=0..N-1 (x[n] * cos((n + 0.5) * k * PI / N)), for k = 0..N-1 /// /// @param vec length M * N sequence to be transformed in last dimension /// @param out length M * N helping buffer, which is also the output /// @param buf length M * N helping buffer /// @param cos length N - 1, stores cosine values precomputed by function 'precompute_dct_cos' /// @param M length of dimension 0 of vec /// @param N length of dimension 1 of vec, must be power of 2 template <typename TValue> void dct(const TValue *vec, TValue *out, TValue* buf, const TValue *cos, int M, int N) { int block_count = 2048; int thread_count = 512; // The input length must be power of 2 if (! isPowerOf2<int>(N)) { printf("Input length is not power of 2.\n"); assert(0); } // Pointers point to the beginning indices of two adjacent iterations TValue *curr = buf; TValue *next = out; // 'temp' used to store date of two adjacent iterations // Copy 'vec' to the first N element in 'temp' cudaMemcpy(curr, vec, M*N*sizeof(TValue), cudaMemcpyDeviceToDevice); // Current bufferfly length and half length int len = N; int halfLen = len / 2; // Iteratively bi-partition sequences into sub-sequences int cosOffset = 0; while (halfLen) { computeDctForward<<<block_count, thread_count>>>(curr, next, cos, M, N, len, halfLen, cosOffset); swap(curr, next); cosOffset += halfLen; len = halfLen; halfLen /= 2; } // Bottom-up form the final DCT solution // Note that the case len = 2 will do nothing, so we start from len = 4 len = 4; halfLen = 2; while (halfLen < N) { computeDctBackward<<<block_count, thread_count>>>(curr, next, M, N, len, halfLen); swap(curr, next); halfLen = len; len *= 2; } // Populate the final results into 'out' if (curr != out) { cudaMemcpy(out, curr, M*N*sizeof(TValue), cudaMemcpyDeviceToDevice); } } /// Compute y[k] = 0.5 * x[0] + sum_n=1..N-1 (x[n] * cos(n * (k + 0.5) * PI / N)), for k = 0..N-1 /// @param vec length M * N sequence to be transformed /// @param out length M * N helping buffer, which is also the output /// @param buf length M * N helping buffer /// @param cos length N - 1, stores cosine values precomputed by function 'precompute_idct_cos' /// @param M length of dimension 0 of vec /// @param N length of dimension 1 of vec, must be power of 2 template <typename TValue> void idct(const TValue *vec, TValue *out, TValue *buf, const TValue *cos, int M, int N) { int block_count = 32; int thread_count = 1024; // The input length must be power of 2 if (! isPowerOf2<int>(N)) { printf("Input length is not power of 2.\n"); assert(0); } // Pointers point to the beginning indices of two adjacent iterations TValue *curr = buf; TValue *next = out; // This array is used to store date of two adjacent iterations // Copy 'vec' to the first N element in 'temp' cudaMemcpy(curr, vec, M*N*sizeof(TValue), cudaMemcpyDeviceToDevice); computeIdctScale0<<<block_count, thread_count>>>(curr, M, N); // Current bufferfly length and half length int len = N; int halfLen = len / 2; // Iteratively bi-partition sequences into sub-sequences while (halfLen) { computeIdctForward<<<block_count, thread_count>>>(curr, next, M, N, len, halfLen); swap(curr, next); len = halfLen; halfLen /= 2; } // Bottom-up form the final IDCT solution len = 2; halfLen = 1; int cosOffset = 0; while(halfLen < N) { ComputeIdctBackward<<<block_count, thread_count>>>(curr, next, cos, M, N, len, halfLen, cosOffset); swap(curr, next); cosOffset += halfLen; halfLen = len; len *= 2; } // Populate the final results into 'out' if (curr != out) { cudaMemcpy(out, curr, M*N*sizeof(TValue), cudaMemcpyDeviceToDevice); } } } // End of namespace lee #define REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(type) \ template void lee::precompute_dct_cos<type>(\ type* cos, \ int N \ ); REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(float); REGISTER_DCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(double); #define REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(type) \ template void lee::precompute_idct_cos<type>(\ type* cos, \ int N \ ); REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(float); REGISTER_IDCT_PRECOMPUTE_COS_KERNEL_LAUNCHER(double); #define REGISTER_DCT_KERNEL_LAUNCHER(type) \ template void lee::dct<type>(\ const type* vec, \ type* curr, \ type* next, \ const type* cos, \ int M, \ int N \ ); REGISTER_DCT_KERNEL_LAUNCHER(float); REGISTER_DCT_KERNEL_LAUNCHER(double); #define REGISTER_IDCT_KERNEL_LAUNCHER(type) \ template void lee::idct<type>(\ const type* vec, \ type* curr, \ type* next, \ const type* cos, \ int M, \ int N \ ); REGISTER_IDCT_KERNEL_LAUNCHER(float); REGISTER_IDCT_KERNEL_LAUNCHER(double); DREAMPLACE_END_NAMESPACE
b087c4959c2999ef5fd2c6d76d74dace035daf0b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #include <stdio.h> #include <device_launch_parameters.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset // Get pixel position in original matrix (image) int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; if (row < numRows && column < numCols){ // As Image is a 1D array we access r-th stride (row) // via "row * numCols" and then add the column to get equivalent // column in row or stride. uchar4 rgb_pixel = rgbaImage[row * numCols + column]; unsigned char gray_pixel = .299f * rgb_pixel.x + .587f * rgb_pixel.y + .114f * rgb_pixel.z; // We assign to correct column in correct stride on 1D gray image array. greyImage[row * numCols + column] = gray_pixel; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched // const dim3 blockSize(1, 1, 1); //TODO // const dim3 gridSize( 1, 1, 1); //TODO unsigned int block_h = 31; unsigned int block_w = 31; const dim3 blockSize(block_w, block_h, 1); unsigned int n_blocks_h = numRows/block_h; unsigned int n_blocks_w = numCols/block_w; const dim3 gridSize( n_blocks_w+1, n_blocks_h+1, 1); hipLaunchKernelGGL(( rgba_to_greyscale), dim3(gridSize), dim3(blockSize), 0, 0, d_rgbaImage, d_greyImage, numRows, numCols); hipDeviceSynchronize(); checkCudaErrors(hipGetLastError()); }
b087c4959c2999ef5fd2c6d76d74dace035daf0b.cu
// Homework 1 // Color to Greyscale Conversion //A common way to represent color images is known as RGBA - the color //is specified by how much Red, Grean and Blue is in it. //The 'A' stands for Alpha and is used for transparency, it will be //ignored in this homework. //Each channel Red, Blue, Green and Alpha is represented by one byte. //Since we are using one byte for each color there are 256 different //possible values for each color. This means we use 4 bytes per pixel. //Greyscale images are represented by a single intensity value per pixel //which is one byte in size. //To convert an image from color to grayscale one simple method is to //set the intensity to the average of the RGB channels. But we will //use a more sophisticated method that takes into account how the eye //perceives color and weights the channels unequally. //The eye responds most strongly to green followed by red and then blue. //The NTSC (National Television System Committee) recommends the following //formula for color to greyscale conversion: //I = .299f * R + .587f * G + .114f * B //Notice the trailing f's on the numbers which indicate that they are //single precision floating point constants and not double precision //constants. //You should fill in the kernel as well as set the block and grid sizes //so that the entire image is processed. #include "utils.h" #include <stdio.h> #include <device_launch_parameters.h> __global__ void rgba_to_greyscale(const uchar4* const rgbaImage, unsigned char* const greyImage, int numRows, int numCols) { //TODO //Fill in the kernel to convert from color to greyscale //the mapping from components of a uchar4 to RGBA is: // .x -> R ; .y -> G ; .z -> B ; .w -> A // //The output (greyImage) at each pixel should be the result of //applying the formula: output = .299f * R + .587f * G + .114f * B; //Note: We will be ignoring the alpha channel for this conversion //First create a mapping from the 2D block and grid locations //to an absolute 2D location in the image, then use that to //calculate a 1D offset // Get pixel position in original matrix (image) int row = blockIdx.y * blockDim.y + threadIdx.y; int column = blockIdx.x * blockDim.x + threadIdx.x; if (row < numRows && column < numCols){ // As Image is a 1D array we access r-th stride (row) // via "row * numCols" and then add the column to get equivalent // column in row or stride. uchar4 rgb_pixel = rgbaImage[row * numCols + column]; unsigned char gray_pixel = .299f * rgb_pixel.x + .587f * rgb_pixel.y + .114f * rgb_pixel.z; // We assign to correct column in correct stride on 1D gray image array. greyImage[row * numCols + column] = gray_pixel; } } void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage, unsigned char* const d_greyImage, size_t numRows, size_t numCols) { //You must fill in the correct sizes for the blockSize and gridSize //currently only one block with one thread is being launched // const dim3 blockSize(1, 1, 1); //TODO // const dim3 gridSize( 1, 1, 1); //TODO unsigned int block_h = 31; unsigned int block_w = 31; const dim3 blockSize(block_w, block_h, 1); unsigned int n_blocks_h = numRows/block_h; unsigned int n_blocks_w = numCols/block_w; const dim3 gridSize( n_blocks_w+1, n_blocks_h+1, 1); rgba_to_greyscale<<<gridSize, blockSize>>>(d_rgbaImage, d_greyImage, numRows, numCols); cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError()); }
0f52e456de410cec16d3423b32eb3f9b80267f51.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* FLUIDS v.3 - SPH Fluid Simulator for CPU and GPU Copyright (C) 2012-2013. Rama Hoetzlein, http://fluids3.com Attribute-ZLib license (* See additional part 4) This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. 4. Any published work based on this code must include public acknowledgement of the origin. This includes following when applicable: - Journal/Paper publications. Credited by reference to work in text & citation. - Public presentations. Credited in at least one slide. - Distributed Games/Apps. Credited as single line in game or app credit page. Retaining this additional license term is required in derivative works. Acknowledgement may be provided as: Publication version: 2012-2013, Hoetzlein, Rama C. Fluids v.3 - A Large-Scale, Open Source Fluid Simulator. Published online at: http://fluids3.com Single line (slides or app credits): GPU Fluids: Rama C. Hoetzlein (Fluids v3 2013) Notes on Clause 4: The intent of this clause is public attribution for this contribution, not code use restriction. Both commerical and open source projects may redistribute and reuse without code release. However, clause #1 of ZLib indicates that "you must not claim that you wrote the original software". Clause #4 makes this more specific by requiring public acknowledgement to be extended to derivative licenses. */ // for syntax highlight #include "device_launch_parameters.h" #define CUDA_KERNEL #include "fluid_system_kern.cuh" #include "cutil_math.h" #include "radixsort.cu" // Build in RadixSort __constant__ FluidParams simData; __constant__ uint gridActive; __global__ void insertParticles(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= pnum) return; register float3 gridMin = simData.gridMin; register float3 gridDelta = simData.gridDelta; register int3 gridRes = simData.gridRes; register int3 gridScan = simData.gridScanMax; register float poff = simData.psmoothradius / simData.psimscale; register int gs; register float3 gcf; register int3 gc; gcf = (buf.mpos[i] - gridMin) * gridDelta; // M: the relative position in simulation gc = make_int3(int(gcf.x), int(gcf.y), int(gcf.z)); gs = (gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; // M: turn 3D to 1D if (gc.x >= 1 && gc.x <= gridScan.x && gc.y >= 1 && gc.y <= gridScan.y && gc.z >= 1 && gc.z <= gridScan.z) { // M: for all the particles in the simulation domain buf.mgcell[i] = gs; // Grid cell insert. M: insert the grid cell index into the property of this particle buf.mgndx[i] = atomicAdd(&buf.mgridcnt[gs], 1); // Grid counts. M: 1. record the number of particles in this grid cell 2. record the rank of the particle in the cell // M: useless code //gcf = (-make_float3(poff,poff,poff) + buf.mpos[i] - gridMin) * gridDelta; //gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); //gs = ( gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; } else { buf.mgcell[i] = GRID_UNDEF; // M: assert the particle "grid undifined" which are out of the domain } } // it seems that this part did not work // the mutex variable //__device__ int g_mutex = 0; // M: default 0, seems it doesn't matter a lot // GPU simple synchronization function /*__device__ void __gpu_sync(int goalVal) { __threadfence (); // only thread 0 is used for synchronization if (threadIdx.x == 0) atomicAdd(&g_mutex, 1); // only when all blocks add 1 to g_mutex will // g_mutex equal to goalVal while(g_mutex < goalVal) { // infinite loop until g_mutx = goalVal } if ( blockIdx.x == 0 && threadIdx.x == 0 ) g_mutex = 0; __syncthreads(); }*/ // countingSortInPlace -- GPU_SYNC DOES NOT WORK /*uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) { __gpu_sync ( 2 ); return; } register float3 ipos, ivel, iveleval, iforce; register float ipress, idens; register int icell, indx, iclr; icell = buf.mgcell [ i ]; indx = buf.mgndx [ i ]; int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset if ( icell == GRID_UNDEF ) { __gpu_sync ( 2 ); return; } ipos = buf.mpos [ i ]; ivel = buf.mvel [ i ]; iveleval = buf.mveleval [ i ]; iforce = buf.mforce [ i ]; ipress = buf.mpress [ i ]; idens = buf.mdensity [ i ]; iclr = buf.mclr [ i ]; __gpu_sync ( 2 ) ; //threadfence(); // make sure every thread in all blocks has their data buf.mpos [ sort_ndx ] = ipos; buf.mvel [ sort_ndx ] = ivel; buf.mveleval [ sort_ndx ] = iveleval; buf.mforce [ sort_ndx ] = iforce; buf.mpress [ sort_ndx ] = ipress; buf.mdensity [ sort_ndx ] = idens; buf.mclr [ sort_ndx ] = iclr;*/ // Counting Sort - Index __global__ void countingSortIndex(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= pnum) return; uint icell = buf.mgcell[i]; // M: figure out which grid cell the particle exactly in uint indx = buf.mgndx[i]; // M: figure out which the particle is in its cell int sort_ndx = buf.mgridoff[icell] + indx; // global_ndx = grid_cell_offet + particle_offset if (icell != GRID_UNDEF) { buf.mgrid[sort_ndx] = i; // index sort, grid refers to original particle order } } // Counting Sort - Full (deep copy) __global__ void countingSortFull(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= pnum) return; // Copy particle from original, unsorted buffer (msortbuf), // into sorted memory location on device (mpos/mvel) uint icell = *(uint*)(buf.msortbuf + pnum*BUF_GCELL + i * sizeof(uint)); uint indx = *(uint*)(buf.msortbuf + pnum*BUF_GNDX + i * sizeof(uint)); if (icell != GRID_UNDEF) { // Determine the sort_ndx, location of the particle after sort int sort_ndx = buf.mgridoff[icell] + indx; // global_ndx = grid_cell_offet + particle_offset // Find the original particle data, offset into unsorted buffer (msortbuf) char* bpos = buf.msortbuf + i * sizeof(float3); // Transfer data to sort location buf.mgrid[sort_ndx] = sort_ndx; // full sort, grid indexing becomes identity buf.mpos[sort_ndx] = *(float3*)(bpos); buf.mvel[sort_ndx] = *(float3*)(bpos + pnum*BUF_VEL); buf.mveleval[sort_ndx] = *(float3*)(bpos + pnum*BUF_VELEVAL); buf.mforce[sort_ndx] = *(float3*)(bpos + pnum*BUF_FORCE); buf.mpress[sort_ndx] = *(float*)(buf.msortbuf + pnum*BUF_PRESS + i * sizeof(float)); buf.mdensity[sort_ndx] = *(float*)(buf.msortbuf + pnum*BUF_DENS + i * sizeof(float)); buf.mclr[sort_ndx] = *(uint*)(buf.msortbuf + pnum*BUF_CLR + i * sizeof(uint)); // ((uint) 255)<<24; -- dark matter buf.mgcell[sort_ndx] = icell; buf.mgndx[sort_ndx] = indx; } } // ***** UNUSED CODE (not working) ****** /*__global__ void countActiveCells ( bufList buf, int pnum ) //T: useless { if ( threadIdx.x == 0 ) { // use only one processor //gridActive = -1; int last_ndx = buf.mgridoff [ simData.gridTotal-1 ] + buf.mgridcnt[ simData.gridTotal-1 ] - 1; int last_p = buf.mgrid[ last_ndx ]; int last_cell = buf.mgcell[ last_p ]; int first_p = buf.mgrid[ 0 ]; int first_cell = buf.mgcell[ first_p ] ; int cell, cnt = 0, curr = 0; cell = first_cell; while ( cell < last_cell ) { buf.mgridactive[ cnt ] = cell; // add cell to active list cnt++; curr += buf.mgridcnt[cell]; // advance to next active cell // id = buf.mgrid[curr]; // get particle id -- when unsorted only cell = buf.mgcell [ curr ]; // get cell we are in -- use id when unsorted } // gridActive = cnt; } __syncthreads(); }*/ __device__ float contributePressure(int i, float3 p, int cell, bufList buf) { float3 dist; float dsq; float c; float sum; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2 / d2; sum = 0.0; if (buf.mgridcnt[cell] == 0) return 0.0; int cfirst = buf.mgridoff[cell]; int clast = cfirst + buf.mgridcnt[cell]; for (int cndx = cfirst; cndx < clast; ++cndx) { dist = p - buf.mpos[buf.mgrid[cndx]]; dsq = (dist.x * dist.x + dist.y * dist.y + dist.z * dist.z); if (dsq < r2) { c = (r2 - dsq) * d2; sum += c * c * c; } } return sum; } __global__ void computePressure(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= pnum) return; // Get search cell int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1; uint gc = buf.mgcell[i]; if (gc == GRID_UNDEF) return; // particle out-of-range gc -= nadj; // Sum Pressures float3 pos = buf.mpos[i]; float sum = 0.0; for (int c = 0; c < simData.gridAdjCnt; c++) { sum += contributePressure(i, pos, gc + simData.gridAdj[c], buf); } __syncthreads(); // Compute Density & Pressure sum = sum * simData.pmass * simData.poly6kern; if (sum == 0.0) sum = 1.0; //buf.mpress[ i ] = ( sum - simData.prest_dens ) * simData.pintstiff; //T: less accurate pressure solver buf.mpress[i] = (pow(sum / simData.prest_dens, 7) - 1) * simData.pintstiff; //T: more accurate pressure solver buf.mdensity[i] = 1.0f / sum; } /*__global__ void computeQuery ( bufList buf, int pnum ) //T: useless { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Pressures float3 pos = buf.mpos[ i ]; float sum = 0.0; for (int c=0; c < simData.gridAdjCnt; c++) { sum += 1.0; } __syncthreads(); }*/ /*FindNeighbors int cid = blockIdx.x * blockSize.x + blockIdx.y; // cluster id int pid = threadIdx.x; // 0 to 85 (max particles per cell) __shared__ Particle clist[ 85 ]; __shared__ Particle plist[ 85*8 ]; if ( pid < clusterCnt[cid] ) clist [ pid ] = particles [ clusterNdx[cid] + pid ]; for ( gid = 0; gid < 8; gid++ ) { if ( pid < gridCnt[ cid + group[gid] ] ) plist [ cid*CELL_CNT + pid ] = particles [ sortNdx[ cid + group[gid] ] + pid ]; } __syncthreads(); for ( int j = 0; j < cellcnt; j++ ) { dst = plist[ pid ] - plist[ j ]; if ( dst < R2 ) { ... } }*/ /*grid block <gx, gy, gz> <1, 32, 64> 256, 256, 256 total: */ #define LOCAL_PMAX 896 #define NUM_CELL 27 #define LAST_CELL 26 #define CENTER_CELL 13 __global__ void computePressureGroup(bufList buf, int pnum) { __shared__ float3 cpos[LOCAL_PMAX]; __shared__ int ncnt[NUM_CELL]; __shared__ int ngridoff[NUM_CELL]; __shared__ int noff[NUM_CELL]; int bid = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; if (bid > gridActive) return; // block must be in a valid grid uint cell = buf.mgridactive[bid]; // get grid cell (from blockID 1:1) register int i = -1; register float3 ipos; uint ndx = threadIdx.x; if (ndx < buf.mgridcnt[cell]) { i = buf.mgridoff[cell] + ndx; // particle id to process ipos = buf.mpos[i]; } int gid = threadIdx.x; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2 / d2; register float3 dist; register float c, dsq, sum; int neighbor; // copy neighbor cell counts to shared mem if (gid < NUM_CELL) { int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1; neighbor = cell - nadj + simData.gridAdj[gid]; // neighbor cell id ncnt[gid] = buf.mgridcnt[neighbor]; ngridoff[gid] = buf.mgridoff[neighbor]; } __syncthreads(); if (gid == 0) { // compute neighbor local ndx (as prefix sum) int nsum = 0; for (int z = 0; z < NUM_CELL; z++) { // 27-step prefix sum noff[z] = nsum; nsum += ncnt[z]; } } __syncthreads(); // copy particles into shared memory if (gid < NUM_CELL) { for (int j = 0; j < ncnt[gid]; j++) { neighbor = buf.mgrid[ngridoff[gid] + j]; // neighbor particle id ndx = noff[gid] + j; cpos[ndx] = buf.mpos[neighbor]; } } __syncthreads(); // compute pressure for current particle if (i == -1) return; int jnum = noff[LAST_CELL] + ncnt[LAST_CELL]; sum = 0.0; for (int j = 0; j < jnum; j++) { dist = ipos - cpos[j]; dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if (dsq > 0.0 && dsq < r2) { c = (r2 - dsq)*d2; sum += c * c * c; } } __syncthreads(); // put result into global mem sum = sum * simData.pmass * simData.poly6kern; if (sum == 0.0) sum = 1.0; buf.mpress[i] = (sum - simData.prest_dens) * simData.pintstiff; buf.mdensity[i] = 1.0f / sum; } __device__ float3 contributeForce(int i, float3 ipos, float3 iveleval, float ipress, float idens, int cell, bufList buf) { float dsq, c; float pterm; float3 dist; int j; if (buf.mgridcnt[cell] == 0) return make_float3(0, 0, 0); float3 force = make_float3(0, 0, 0); for (int cndx = buf.mgridoff[cell]; cndx < buf.mgridoff[cell] + buf.mgridcnt[cell]; cndx++) { j = buf.mgrid[cndx]; dist = (ipos - buf.mpos[j]); // dist in cm dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if (dsq < simData.rd2 && dsq > 0) { dsq = sqrt(dsq * simData.d2); c = (simData.psmoothradius - dsq); // M: c:= (h - r) pterm = simData.psimscale * -0.5f * c * simData.spikykern * (ipress + buf.mpress[j]) / dsq; force += (pterm * dist + simData.vterm * (buf.mveleval[j] - iveleval)) * c * idens * (buf.mdensity[j]); } } return force; } __global__ void computeForce(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= pnum) return; // Get search cell uint gc = buf.mgcell[i]; if (gc == GRID_UNDEF) return; // particle out-of-range gc -= (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1; // Sum Pressures register float3 force; force = make_float3(0, 0, 0); for (int c = 0; c < simData.gridAdjCnt; c++) { force += contributeForce(i, buf.mpos[i], buf.mveleval[i], buf.mpress[i], buf.mdensity[i], gc + simData.gridAdj[c], buf); } buf.mforce[i] = force; } /*__global__ void computeForceNbr ( char* bufPnts, int* bufGrid, int numPnt ) { uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( ndx >= numPnt ) return; char* ioffs = bufPnts + __mul24(ndx, simData.stride ); float3 ipos = *(float3*) (ioffs + OFFSET_POS); float3 ivelval = *(float3*) (ioffs + OFFSET_VELEVAL); float press = *(float*) (ioffs + OFFSET_PRESS); float dens = *(float*) (ioffs + OFFSET_DENS); int icnt = *(int*) (ioffs + OFFSET_NBRCNT); char* joffs; float3 jpos, jveleval; float3 dist, force; float c, ndistj, pterm, dterm, vterm; vterm = simData.lapkern * simData.visc; force = make_float3(0,0,0); for (int nbr=0; nbr < icnt; nbr++) { // base 1, n[0] = count ndistj = bufNdist[ndx][nbr]; joffs = bufPnts + __mul24(bufNeighbor[ndx][nbr], simData.stride); jpos = *(float3*) (joffs + OFFSET_POS); jveleval = *(float3*) (joffs + OFFSET_VELEVAL); c = ( simData.smooth_rad - ndistj ); dist.x = ( ipos.x - jpos.x ); // dist in cm dist.y = ( ipos.y - jpos.y ); dist.z = ( ipos.z - jpos.z ); pterm = simData.sim_scale * -0.5f * c * simData.spikykern * ( press + *(float*)(joffs+OFFSET_PRESS) ) / ndistj; dterm = c * dens * *(float*)(joffs+OFFSET_DENS); force.x += ( pterm * dist.x + vterm * ( jveleval.x - ivelval.x )) * dterm; force.y += ( pterm * dist.y + vterm * ( jveleval.y - ivelval.y )) * dterm; force.z += ( pterm * dist.z + vterm * ( jveleval.z - ivelval.z )) * dterm; } *(float3*) ( ioffs + OFFSET_FORCE ) = force; }*/ __global__ void advanceParticles(float time, float dt, float ss, bufList buf, int numPnts) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= numPnts) return; if (buf.mgcell[i] == GRID_UNDEF) { buf.mpos[i] = make_float3(-1000, -1000, -1000); buf.mvel[i] = make_float3(0, 0, 0); return; } // Get particle vars register float3 accel, norm; register float diff, adj, speed; register float3 pos = buf.mpos[i]; register float3 veval = buf.mveleval[i]; // Leapfrog integration accel = buf.mforce[i]; accel *= simData.pmass; // Boundaries // Y-axis diff = simData.pradius - (pos.y - (simData.pboundmin.y + (pos.x - simData.pboundmin.x)*simData.pground_slope)) * ss; if (diff > EPSILON) { norm = make_float3(-simData.pground_slope, 1.0 - simData.pground_slope, 0); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } diff = simData.pradius - (simData.pboundmax.y - pos.y)*ss; if (diff > EPSILON) { norm = make_float3(0, -1, 0); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } // X-axis diff = simData.pradius - (pos.x - (simData.pboundmin.x + (sin(time*simData.pforce_freq) + 1)*0.5 * simData.pforce_min))*ss; if (diff > EPSILON) { norm = make_float3(1, 0, 0); adj = (simData.pforce_min + 1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } diff = simData.pradius - ((simData.pboundmax.x - (sin(time*simData.pforce_freq) + 1)*0.5*simData.pforce_max) - pos.x)*ss; if (diff > EPSILON) { norm = make_float3(-1, 0, 0); adj = (simData.pforce_max + 1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } // Z-axis diff = simData.pradius - (pos.z - simData.pboundmin.z) * ss; if (diff > EPSILON) { norm = make_float3(0, 0, 1); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } diff = simData.pradius - (simData.pboundmax.z - pos.z)*ss; if (diff > EPSILON) { norm = make_float3(0, 0, -1); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } // Gravity accel += simData.pgravity; // Accel Limit speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z; if (speed > simData.AL2) { accel *= simData.AL / sqrt(speed); } // Velocity Limit float3 vel = buf.mvel[i]; speed = vel.x*vel.x + vel.y*vel.y + vel.z*vel.z; if (speed > simData.VL2) { speed = simData.VL2; vel *= simData.VL / sqrt(speed); } // Ocean colors if (speed > simData.VL2*0.2) { adj = simData.VL2*0.2; buf.mclr[i] += ((buf.mclr[i] & 0xFF) < 0xFD) ? +0x00000002 : 0; // decrement R by one buf.mclr[i] += (((buf.mclr[i] >> 8) & 0xFF) < 0xFD) ? +0x00000200 : 0; // decrement G by one buf.mclr[i] += (((buf.mclr[i] >> 16) & 0xFF) < 0xFD) ? +0x00020000 : 0; // decrement G by one } if (speed < 0.03) { int v = int(speed / .01) + 1; buf.mclr[i] += ((buf.mclr[i] & 0xFF) > 0x80) ? -0x00000001 * v : 0; // decrement R by one buf.mclr[i] += (((buf.mclr[i] >> 8) & 0xFF) > 0x80) ? -0x00000100 * v : 0; // decrement G by one } //-- surface particle density //buf.mclr[i] = buf.mclr[i] & 0x00FFFFFF; //if ( buf.mdensity[i] > 0.0014 ) buf.mclr[i] += 0xAA000000; // Leap-frog Integration float3 vnext = accel*dt + vel; // v(t+1/2) = v(t-1/2) + a(t) dt buf.mveleval[i] = (vel + vnext) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 buf.mvel[i] = vnext; buf.mpos[i] += vnext * (dt / ss); // p(t+1) = p(t) + v(t+1/2) dt } void updateSimParams(FluidParams* cpufp) { hipError_t status; #ifdef CUDA_42 // Only for CUDA 4.x or earlier. Depricated in CUDA 5.0+ // Original worked even if symbol was declared __device__ status = hipMemcpyToSymbol("simData", cpufp, sizeof(FluidParams)); #else // CUDA 5.x+. Only works if symbol is declared __constant__ status = hipMemcpyToSymbol(simData, cpufp, sizeof(FluidParams)); #endif /*app_printf ( "SIM PARAMETERS:\n" ); app_printf ( " CPU: %p\n", cpufp ); app_printf ( " GPU: %p\n", &simData ); */ } __global__ void insertFineParticles(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (i >= pnum) { return; } register int nadj = (1 * simData.gridRes.z + 1) * simData.gridRes.x + 1; // Center of neighbor cells register uint gc = buf.mgcell[i]; // Grid index of central coarse particle if (gc == GRID_UNDEF) { return; } gc -= nadj; register float3 pos = buf.mpos[i]; // Central coarse particle position register float3 gridDelta = simData.gridDelta; register float sqlamdac = gridDelta.x * gridDelta.x + gridDelta.y * gridDelta.y + gridDelta.z * gridDelta.z; pos -= gridDelta; // First sampling position of central coarse particle register int sampleNum = 0; // Sample number in one demension register float sampleDelta = 2.0 / sampleNum; // Sample distance interval for (int i = 0; i <= sampleNum; ++i) { for (int j = 0; j <= sampleNum; ++j) { for (int k = 0; k <= sampleNum; ++k) { bool isFineParticle = true; float3 finePos = pos + make_float3( i * sampleDelta * gridDelta.x, j * sampleDelta * gridDelta.y, k * sampleDelta * gridDelta.z); // Try sample for (int c = 0; isFineParticle && c < simData.gridAdjCnt; c++) { // For all neighbor grid uint neighbor_gc = gc + simData.gridAdj[c]; // Current neighbor grid position int cfirst = buf.mgridoff[neighbor_gc]; // First neighbor coarse particle in this grid int clast = cfirst + buf.mgridcnt[neighbor_gc]; // Last neighbor coarse particle in this grid float3 dist; // 3D distance between fine particle and neighbor coarse particle float dsq; // distance^2 for (int cndx = cfirst; cndx < clast; ++cndx) { // Sample sampleNum^3 points uniformly around central coarse particle dist = finePos - buf.mpos[buf.mgrid[cndx]]; dsq = (dist.x * dist.x + dist.y * dist.y + dist.z * dist.z); if (dsq < sqlamdac) { // Dont sample at this point (lambda_c = grid size = gridDelta) isFineParticle = false; break; } } } if (isFineParticle) { uint index = atomicAdd(&(*buf.sfgoodnum), 1); //int index = *(buf.sfgoodnum)++; if (index > simData.sfnum) { return; } *(buf.sfexist + index) = 1; *(buf.sfpos + index) = finePos; *(buf.sfvel + index) = make_float3(0, 0, 0); *(buf.sfveleval + index) = make_float3(0, 0, 0); *(buf.sfforce + index) = make_float3(0, 0, 0); *(buf.sfpress + index) = 0; *(buf.sfdensity + index) = 0; } } } } }
0f52e456de410cec16d3423b32eb3f9b80267f51.cu
/* FLUIDS v.3 - SPH Fluid Simulator for CPU and GPU Copyright (C) 2012-2013. Rama Hoetzlein, http://fluids3.com Attribute-ZLib license (* See additional part 4) This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. 4. Any published work based on this code must include public acknowledgement of the origin. This includes following when applicable: - Journal/Paper publications. Credited by reference to work in text & citation. - Public presentations. Credited in at least one slide. - Distributed Games/Apps. Credited as single line in game or app credit page. Retaining this additional license term is required in derivative works. Acknowledgement may be provided as: Publication version: 2012-2013, Hoetzlein, Rama C. Fluids v.3 - A Large-Scale, Open Source Fluid Simulator. Published online at: http://fluids3.com Single line (slides or app credits): GPU Fluids: Rama C. Hoetzlein (Fluids v3 2013) Notes on Clause 4: The intent of this clause is public attribution for this contribution, not code use restriction. Both commerical and open source projects may redistribute and reuse without code release. However, clause #1 of ZLib indicates that "you must not claim that you wrote the original software". Clause #4 makes this more specific by requiring public acknowledgement to be extended to derivative licenses. */ // for syntax highlight #include "device_launch_parameters.h" #define CUDA_KERNEL #include "fluid_system_kern.cuh" #include "cutil_math.h" #include "radixsort.cu" // Build in RadixSort __constant__ FluidParams simData; __constant__ uint gridActive; __global__ void insertParticles(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= pnum) return; register float3 gridMin = simData.gridMin; register float3 gridDelta = simData.gridDelta; register int3 gridRes = simData.gridRes; register int3 gridScan = simData.gridScanMax; register float poff = simData.psmoothradius / simData.psimscale; register int gs; register float3 gcf; register int3 gc; gcf = (buf.mpos[i] - gridMin) * gridDelta; // M: the relative position in simulation gc = make_int3(int(gcf.x), int(gcf.y), int(gcf.z)); gs = (gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; // M: turn 3D to 1D if (gc.x >= 1 && gc.x <= gridScan.x && gc.y >= 1 && gc.y <= gridScan.y && gc.z >= 1 && gc.z <= gridScan.z) { // M: for all the particles in the simulation domain buf.mgcell[i] = gs; // Grid cell insert. M: insert the grid cell index into the property of this particle buf.mgndx[i] = atomicAdd(&buf.mgridcnt[gs], 1); // Grid counts. M: 1. record the number of particles in this grid cell 2. record the rank of the particle in the cell // M: useless code //gcf = (-make_float3(poff,poff,poff) + buf.mpos[i] - gridMin) * gridDelta; //gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); //gs = ( gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; } else { buf.mgcell[i] = GRID_UNDEF; // M: assert the particle "grid undifined" which are out of the domain } } // it seems that this part did not work // the mutex variable //__device__ int g_mutex = 0; // M: default 0, seems it doesn't matter a lot // GPU simple synchronization function /*__device__ void __gpu_sync(int goalVal) { __threadfence (); // only thread 0 is used for synchronization if (threadIdx.x == 0) atomicAdd(&g_mutex, 1); // only when all blocks add 1 to g_mutex will // g_mutex equal to goalVal while(g_mutex < goalVal) { // infinite loop until g_mutx = goalVal } if ( blockIdx.x == 0 && threadIdx.x == 0 ) g_mutex = 0; __syncthreads(); }*/ // countingSortInPlace -- GPU_SYNC DOES NOT WORK /*uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) { __gpu_sync ( 2 ); return; } register float3 ipos, ivel, iveleval, iforce; register float ipress, idens; register int icell, indx, iclr; icell = buf.mgcell [ i ]; indx = buf.mgndx [ i ]; int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset if ( icell == GRID_UNDEF ) { __gpu_sync ( 2 ); return; } ipos = buf.mpos [ i ]; ivel = buf.mvel [ i ]; iveleval = buf.mveleval [ i ]; iforce = buf.mforce [ i ]; ipress = buf.mpress [ i ]; idens = buf.mdensity [ i ]; iclr = buf.mclr [ i ]; __gpu_sync ( 2 ) ; //threadfence(); // make sure every thread in all blocks has their data buf.mpos [ sort_ndx ] = ipos; buf.mvel [ sort_ndx ] = ivel; buf.mveleval [ sort_ndx ] = iveleval; buf.mforce [ sort_ndx ] = iforce; buf.mpress [ sort_ndx ] = ipress; buf.mdensity [ sort_ndx ] = idens; buf.mclr [ sort_ndx ] = iclr;*/ // Counting Sort - Index __global__ void countingSortIndex(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= pnum) return; uint icell = buf.mgcell[i]; // M: figure out which grid cell the particle exactly in uint indx = buf.mgndx[i]; // M: figure out which the particle is in its cell int sort_ndx = buf.mgridoff[icell] + indx; // global_ndx = grid_cell_offet + particle_offset if (icell != GRID_UNDEF) { buf.mgrid[sort_ndx] = i; // index sort, grid refers to original particle order } } // Counting Sort - Full (deep copy) __global__ void countingSortFull(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= pnum) return; // Copy particle from original, unsorted buffer (msortbuf), // into sorted memory location on device (mpos/mvel) uint icell = *(uint*)(buf.msortbuf + pnum*BUF_GCELL + i * sizeof(uint)); uint indx = *(uint*)(buf.msortbuf + pnum*BUF_GNDX + i * sizeof(uint)); if (icell != GRID_UNDEF) { // Determine the sort_ndx, location of the particle after sort int sort_ndx = buf.mgridoff[icell] + indx; // global_ndx = grid_cell_offet + particle_offset // Find the original particle data, offset into unsorted buffer (msortbuf) char* bpos = buf.msortbuf + i * sizeof(float3); // Transfer data to sort location buf.mgrid[sort_ndx] = sort_ndx; // full sort, grid indexing becomes identity buf.mpos[sort_ndx] = *(float3*)(bpos); buf.mvel[sort_ndx] = *(float3*)(bpos + pnum*BUF_VEL); buf.mveleval[sort_ndx] = *(float3*)(bpos + pnum*BUF_VELEVAL); buf.mforce[sort_ndx] = *(float3*)(bpos + pnum*BUF_FORCE); buf.mpress[sort_ndx] = *(float*)(buf.msortbuf + pnum*BUF_PRESS + i * sizeof(float)); buf.mdensity[sort_ndx] = *(float*)(buf.msortbuf + pnum*BUF_DENS + i * sizeof(float)); buf.mclr[sort_ndx] = *(uint*)(buf.msortbuf + pnum*BUF_CLR + i * sizeof(uint)); // ((uint) 255)<<24; -- dark matter buf.mgcell[sort_ndx] = icell; buf.mgndx[sort_ndx] = indx; } } // ***** UNUSED CODE (not working) ****** /*__global__ void countActiveCells ( bufList buf, int pnum ) //T: useless { if ( threadIdx.x == 0 ) { // use only one processor //gridActive = -1; int last_ndx = buf.mgridoff [ simData.gridTotal-1 ] + buf.mgridcnt[ simData.gridTotal-1 ] - 1; int last_p = buf.mgrid[ last_ndx ]; int last_cell = buf.mgcell[ last_p ]; int first_p = buf.mgrid[ 0 ]; int first_cell = buf.mgcell[ first_p ] ; int cell, cnt = 0, curr = 0; cell = first_cell; while ( cell < last_cell ) { buf.mgridactive[ cnt ] = cell; // add cell to active list cnt++; curr += buf.mgridcnt[cell]; // advance to next active cell // id = buf.mgrid[curr]; // get particle id -- when unsorted only cell = buf.mgcell [ curr ]; // get cell we are in -- use id when unsorted } // gridActive = cnt; } __syncthreads(); }*/ __device__ float contributePressure(int i, float3 p, int cell, bufList buf) { float3 dist; float dsq; float c; float sum; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2 / d2; sum = 0.0; if (buf.mgridcnt[cell] == 0) return 0.0; int cfirst = buf.mgridoff[cell]; int clast = cfirst + buf.mgridcnt[cell]; for (int cndx = cfirst; cndx < clast; ++cndx) { dist = p - buf.mpos[buf.mgrid[cndx]]; dsq = (dist.x * dist.x + dist.y * dist.y + dist.z * dist.z); if (dsq < r2) { c = (r2 - dsq) * d2; sum += c * c * c; } } return sum; } __global__ void computePressure(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= pnum) return; // Get search cell int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1; uint gc = buf.mgcell[i]; if (gc == GRID_UNDEF) return; // particle out-of-range gc -= nadj; // Sum Pressures float3 pos = buf.mpos[i]; float sum = 0.0; for (int c = 0; c < simData.gridAdjCnt; c++) { sum += contributePressure(i, pos, gc + simData.gridAdj[c], buf); } __syncthreads(); // Compute Density & Pressure sum = sum * simData.pmass * simData.poly6kern; if (sum == 0.0) sum = 1.0; //buf.mpress[ i ] = ( sum - simData.prest_dens ) * simData.pintstiff; //T: less accurate pressure solver buf.mpress[i] = (pow(sum / simData.prest_dens, 7) - 1) * simData.pintstiff; //T: more accurate pressure solver buf.mdensity[i] = 1.0f / sum; } /*__global__ void computeQuery ( bufList buf, int pnum ) //T: useless { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Pressures float3 pos = buf.mpos[ i ]; float sum = 0.0; for (int c=0; c < simData.gridAdjCnt; c++) { sum += 1.0; } __syncthreads(); }*/ /*FindNeighbors int cid = blockIdx.x * blockSize.x + blockIdx.y; // cluster id int pid = threadIdx.x; // 0 to 85 (max particles per cell) __shared__ Particle clist[ 85 ]; __shared__ Particle plist[ 85*8 ]; if ( pid < clusterCnt[cid] ) clist [ pid ] = particles [ clusterNdx[cid] + pid ]; for ( gid = 0; gid < 8; gid++ ) { if ( pid < gridCnt[ cid + group[gid] ] ) plist [ cid*CELL_CNT + pid ] = particles [ sortNdx[ cid + group[gid] ] + pid ]; } __syncthreads(); for ( int j = 0; j < cellcnt; j++ ) { dst = plist[ pid ] - plist[ j ]; if ( dst < R2 ) { ... } }*/ /*grid block <gx, gy, gz> <1, 32, 64> 256, 256, 256 total: */ #define LOCAL_PMAX 896 #define NUM_CELL 27 #define LAST_CELL 26 #define CENTER_CELL 13 __global__ void computePressureGroup(bufList buf, int pnum) { __shared__ float3 cpos[LOCAL_PMAX]; __shared__ int ncnt[NUM_CELL]; __shared__ int ngridoff[NUM_CELL]; __shared__ int noff[NUM_CELL]; int bid = __mul24(blockIdx.y, gridDim.x) + blockIdx.x; if (bid > gridActive) return; // block must be in a valid grid uint cell = buf.mgridactive[bid]; // get grid cell (from blockID 1:1) register int i = -1; register float3 ipos; uint ndx = threadIdx.x; if (ndx < buf.mgridcnt[cell]) { i = buf.mgridoff[cell] + ndx; // particle id to process ipos = buf.mpos[i]; } int gid = threadIdx.x; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2 / d2; register float3 dist; register float c, dsq, sum; int neighbor; // copy neighbor cell counts to shared mem if (gid < NUM_CELL) { int nadj = (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1; neighbor = cell - nadj + simData.gridAdj[gid]; // neighbor cell id ncnt[gid] = buf.mgridcnt[neighbor]; ngridoff[gid] = buf.mgridoff[neighbor]; } __syncthreads(); if (gid == 0) { // compute neighbor local ndx (as prefix sum) int nsum = 0; for (int z = 0; z < NUM_CELL; z++) { // 27-step prefix sum noff[z] = nsum; nsum += ncnt[z]; } } __syncthreads(); // copy particles into shared memory if (gid < NUM_CELL) { for (int j = 0; j < ncnt[gid]; j++) { neighbor = buf.mgrid[ngridoff[gid] + j]; // neighbor particle id ndx = noff[gid] + j; cpos[ndx] = buf.mpos[neighbor]; } } __syncthreads(); // compute pressure for current particle if (i == -1) return; int jnum = noff[LAST_CELL] + ncnt[LAST_CELL]; sum = 0.0; for (int j = 0; j < jnum; j++) { dist = ipos - cpos[j]; dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if (dsq > 0.0 && dsq < r2) { c = (r2 - dsq)*d2; sum += c * c * c; } } __syncthreads(); // put result into global mem sum = sum * simData.pmass * simData.poly6kern; if (sum == 0.0) sum = 1.0; buf.mpress[i] = (sum - simData.prest_dens) * simData.pintstiff; buf.mdensity[i] = 1.0f / sum; } __device__ float3 contributeForce(int i, float3 ipos, float3 iveleval, float ipress, float idens, int cell, bufList buf) { float dsq, c; float pterm; float3 dist; int j; if (buf.mgridcnt[cell] == 0) return make_float3(0, 0, 0); float3 force = make_float3(0, 0, 0); for (int cndx = buf.mgridoff[cell]; cndx < buf.mgridoff[cell] + buf.mgridcnt[cell]; cndx++) { j = buf.mgrid[cndx]; dist = (ipos - buf.mpos[j]); // dist in cm dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if (dsq < simData.rd2 && dsq > 0) { dsq = sqrt(dsq * simData.d2); c = (simData.psmoothradius - dsq); // M: c:= (h - r) pterm = simData.psimscale * -0.5f * c * simData.spikykern * (ipress + buf.mpress[j]) / dsq; force += (pterm * dist + simData.vterm * (buf.mveleval[j] - iveleval)) * c * idens * (buf.mdensity[j]); } } return force; } __global__ void computeForce(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= pnum) return; // Get search cell uint gc = buf.mgcell[i]; if (gc == GRID_UNDEF) return; // particle out-of-range gc -= (1 * simData.gridRes.z + 1)*simData.gridRes.x + 1; // Sum Pressures register float3 force; force = make_float3(0, 0, 0); for (int c = 0; c < simData.gridAdjCnt; c++) { force += contributeForce(i, buf.mpos[i], buf.mveleval[i], buf.mpress[i], buf.mdensity[i], gc + simData.gridAdj[c], buf); } buf.mforce[i] = force; } /*__global__ void computeForceNbr ( char* bufPnts, int* bufGrid, int numPnt ) { uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( ndx >= numPnt ) return; char* ioffs = bufPnts + __mul24(ndx, simData.stride ); float3 ipos = *(float3*) (ioffs + OFFSET_POS); float3 ivelval = *(float3*) (ioffs + OFFSET_VELEVAL); float press = *(float*) (ioffs + OFFSET_PRESS); float dens = *(float*) (ioffs + OFFSET_DENS); int icnt = *(int*) (ioffs + OFFSET_NBRCNT); char* joffs; float3 jpos, jveleval; float3 dist, force; float c, ndistj, pterm, dterm, vterm; vterm = simData.lapkern * simData.visc; force = make_float3(0,0,0); for (int nbr=0; nbr < icnt; nbr++) { // base 1, n[0] = count ndistj = bufNdist[ndx][nbr]; joffs = bufPnts + __mul24(bufNeighbor[ndx][nbr], simData.stride); jpos = *(float3*) (joffs + OFFSET_POS); jveleval = *(float3*) (joffs + OFFSET_VELEVAL); c = ( simData.smooth_rad - ndistj ); dist.x = ( ipos.x - jpos.x ); // dist in cm dist.y = ( ipos.y - jpos.y ); dist.z = ( ipos.z - jpos.z ); pterm = simData.sim_scale * -0.5f * c * simData.spikykern * ( press + *(float*)(joffs+OFFSET_PRESS) ) / ndistj; dterm = c * dens * *(float*)(joffs+OFFSET_DENS); force.x += ( pterm * dist.x + vterm * ( jveleval.x - ivelval.x )) * dterm; force.y += ( pterm * dist.y + vterm * ( jveleval.y - ivelval.y )) * dterm; force.z += ( pterm * dist.z + vterm * ( jveleval.z - ivelval.z )) * dterm; } *(float3*) ( ioffs + OFFSET_FORCE ) = force; }*/ __global__ void advanceParticles(float time, float dt, float ss, bufList buf, int numPnts) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if (i >= numPnts) return; if (buf.mgcell[i] == GRID_UNDEF) { buf.mpos[i] = make_float3(-1000, -1000, -1000); buf.mvel[i] = make_float3(0, 0, 0); return; } // Get particle vars register float3 accel, norm; register float diff, adj, speed; register float3 pos = buf.mpos[i]; register float3 veval = buf.mveleval[i]; // Leapfrog integration accel = buf.mforce[i]; accel *= simData.pmass; // Boundaries // Y-axis diff = simData.pradius - (pos.y - (simData.pboundmin.y + (pos.x - simData.pboundmin.x)*simData.pground_slope)) * ss; if (diff > EPSILON) { norm = make_float3(-simData.pground_slope, 1.0 - simData.pground_slope, 0); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } diff = simData.pradius - (simData.pboundmax.y - pos.y)*ss; if (diff > EPSILON) { norm = make_float3(0, -1, 0); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } // X-axis diff = simData.pradius - (pos.x - (simData.pboundmin.x + (sin(time*simData.pforce_freq) + 1)*0.5 * simData.pforce_min))*ss; if (diff > EPSILON) { norm = make_float3(1, 0, 0); adj = (simData.pforce_min + 1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } diff = simData.pradius - ((simData.pboundmax.x - (sin(time*simData.pforce_freq) + 1)*0.5*simData.pforce_max) - pos.x)*ss; if (diff > EPSILON) { norm = make_float3(-1, 0, 0); adj = (simData.pforce_max + 1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } // Z-axis diff = simData.pradius - (pos.z - simData.pboundmin.z) * ss; if (diff > EPSILON) { norm = make_float3(0, 0, 1); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } diff = simData.pradius - (simData.pboundmax.z - pos.z)*ss; if (diff > EPSILON) { norm = make_float3(0, 0, -1); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval); norm *= adj; accel += norm; } // Gravity accel += simData.pgravity; // Accel Limit speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z; if (speed > simData.AL2) { accel *= simData.AL / sqrt(speed); } // Velocity Limit float3 vel = buf.mvel[i]; speed = vel.x*vel.x + vel.y*vel.y + vel.z*vel.z; if (speed > simData.VL2) { speed = simData.VL2; vel *= simData.VL / sqrt(speed); } // Ocean colors if (speed > simData.VL2*0.2) { adj = simData.VL2*0.2; buf.mclr[i] += ((buf.mclr[i] & 0xFF) < 0xFD) ? +0x00000002 : 0; // decrement R by one buf.mclr[i] += (((buf.mclr[i] >> 8) & 0xFF) < 0xFD) ? +0x00000200 : 0; // decrement G by one buf.mclr[i] += (((buf.mclr[i] >> 16) & 0xFF) < 0xFD) ? +0x00020000 : 0; // decrement G by one } if (speed < 0.03) { int v = int(speed / .01) + 1; buf.mclr[i] += ((buf.mclr[i] & 0xFF) > 0x80) ? -0x00000001 * v : 0; // decrement R by one buf.mclr[i] += (((buf.mclr[i] >> 8) & 0xFF) > 0x80) ? -0x00000100 * v : 0; // decrement G by one } //-- surface particle density //buf.mclr[i] = buf.mclr[i] & 0x00FFFFFF; //if ( buf.mdensity[i] > 0.0014 ) buf.mclr[i] += 0xAA000000; // Leap-frog Integration float3 vnext = accel*dt + vel; // v(t+1/2) = v(t-1/2) + a(t) dt buf.mveleval[i] = (vel + vnext) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 buf.mvel[i] = vnext; buf.mpos[i] += vnext * (dt / ss); // p(t+1) = p(t) + v(t+1/2) dt } void updateSimParams(FluidParams* cpufp) { cudaError_t status; #ifdef CUDA_42 // Only for CUDA 4.x or earlier. Depricated in CUDA 5.0+ // Original worked even if symbol was declared __device__ status = cudaMemcpyToSymbol("simData", cpufp, sizeof(FluidParams)); #else // CUDA 5.x+. Only works if symbol is declared __constant__ status = cudaMemcpyToSymbol(simData, cpufp, sizeof(FluidParams)); #endif /*app_printf ( "SIM PARAMETERS:\n" ); app_printf ( " CPU: %p\n", cpufp ); app_printf ( " GPU: %p\n", &simData ); */ } __global__ void insertFineParticles(bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; if (i >= pnum) { return; } register int nadj = (1 * simData.gridRes.z + 1) * simData.gridRes.x + 1; // Center of neighbor cells register uint gc = buf.mgcell[i]; // Grid index of central coarse particle if (gc == GRID_UNDEF) { return; } gc -= nadj; register float3 pos = buf.mpos[i]; // Central coarse particle position register float3 gridDelta = simData.gridDelta; register float sqlamdac = gridDelta.x * gridDelta.x + gridDelta.y * gridDelta.y + gridDelta.z * gridDelta.z; pos -= gridDelta; // First sampling position of central coarse particle register int sampleNum = 0; // Sample number in one demension register float sampleDelta = 2.0 / sampleNum; // Sample distance interval for (int i = 0; i <= sampleNum; ++i) { for (int j = 0; j <= sampleNum; ++j) { for (int k = 0; k <= sampleNum; ++k) { bool isFineParticle = true; float3 finePos = pos + make_float3( i * sampleDelta * gridDelta.x, j * sampleDelta * gridDelta.y, k * sampleDelta * gridDelta.z); // Try sample for (int c = 0; isFineParticle && c < simData.gridAdjCnt; c++) { // For all neighbor grid uint neighbor_gc = gc + simData.gridAdj[c]; // Current neighbor grid position int cfirst = buf.mgridoff[neighbor_gc]; // First neighbor coarse particle in this grid int clast = cfirst + buf.mgridcnt[neighbor_gc]; // Last neighbor coarse particle in this grid float3 dist; // 3D distance between fine particle and neighbor coarse particle float dsq; // distance^2 for (int cndx = cfirst; cndx < clast; ++cndx) { // Sample sampleNum^3 points uniformly around central coarse particle dist = finePos - buf.mpos[buf.mgrid[cndx]]; dsq = (dist.x * dist.x + dist.y * dist.y + dist.z * dist.z); if (dsq < sqlamdac) { // Dont sample at this point (lambda_c = grid size = gridDelta) isFineParticle = false; break; } } } if (isFineParticle) { uint index = atomicAdd(&(*buf.sfgoodnum), 1); //int index = *(buf.sfgoodnum)++; if (index > simData.sfnum) { return; } *(buf.sfexist + index) = 1; *(buf.sfpos + index) = finePos; *(buf.sfvel + index) = make_float3(0, 0, 0); *(buf.sfveleval + index) = make_float3(0, 0, 0); *(buf.sfforce + index) = make_float3(0, 0, 0); *(buf.sfpress + index) = 0; *(buf.sfdensity + index) = 0; } } } } }
7a2c6d87f96e99bb79273e9a7b06ba8e57059ed3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <math.h> // random quick sort algorithm __device__ int partition_GPU(unsigned char* window, int start, int end) { int i = start; for(int j = start; j < end; j++) if(window[j] < window[end]) { unsigned char temp = window[i]; window[i] = window[j]; window[j] = temp; i++; } unsigned char temp = window[i]; window[i] = window[end]; window[end] = temp; return i; } __device__ void randQuickSort(unsigned char * window, int start, int end, int idx) { if(start < end) { // initialization of generating a random number. // cannot figure it out how to generate a random number in cuda // use the midpoint of start and end instead /* hiprandState_t state; hiprand_init(1234, idx, 0, &state); // random number float myrandf = hiprand_uniform(&state); myrandf *= (end - start); int p = (int)myrandf; */ int p = (end - start)/2 + start; unsigned char temp = window[p]; window[p] = window[end]; window[end] = temp; int q = partition_GPU(window, start, end); randQuickSort(window, start, q - 1, idx); randQuickSort(window, q + 1, end, idx); } } /** * CUDA Kernel Device code */ __global__ void medianFilterGPU(const unsigned char *d_inputData, unsigned char *d_outputData, const int filterSize, const int img_height, const int img_width) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int edgex = (filterSize / 2); int edgey = (filterSize / 2); if(y >= edgey && y < img_height - edgey && x >= edgex && x < img_width - edgex) { int i = 0; unsigned char * window = new unsigned char[filterSize * filterSize]; for(int fy = 0; fy < filterSize; fy++) for(int fx = 0; fx < filterSize; fx++) { window[i] = d_inputData[(y + fy - edgey) * img_width + (x + fx - edgex)]; i++; } randQuickSort(window, 0, filterSize * filterSize - 1, x); d_outputData[y * img_width + x] = window[filterSize * filterSize / 2]; delete[] window; } else // copy the pixels on the corners and sides d_outputData[y * img_width + x] = d_inputData[y * img_width + x]; }
7a2c6d87f96e99bb79273e9a7b06ba8e57059ed3.cu
#include <curand.h> #include <curand_kernel.h> #include <math.h> // random quick sort algorithm __device__ int partition_GPU(unsigned char* window, int start, int end) { int i = start; for(int j = start; j < end; j++) if(window[j] < window[end]) { unsigned char temp = window[i]; window[i] = window[j]; window[j] = temp; i++; } unsigned char temp = window[i]; window[i] = window[end]; window[end] = temp; return i; } __device__ void randQuickSort(unsigned char * window, int start, int end, int idx) { if(start < end) { // initialization of generating a random number. // cannot figure it out how to generate a random number in cuda // use the midpoint of start and end instead /* curandState state; curand_init(1234, idx, 0, &state); // random number float myrandf = curand_uniform(&state); myrandf *= (end - start); int p = (int)myrandf; */ int p = (end - start)/2 + start; unsigned char temp = window[p]; window[p] = window[end]; window[end] = temp; int q = partition_GPU(window, start, end); randQuickSort(window, start, q - 1, idx); randQuickSort(window, q + 1, end, idx); } } /** * CUDA Kernel Device code */ __global__ void medianFilterGPU(const unsigned char *d_inputData, unsigned char *d_outputData, const int filterSize, const int img_height, const int img_width) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int edgex = (filterSize / 2); int edgey = (filterSize / 2); if(y >= edgey && y < img_height - edgey && x >= edgex && x < img_width - edgex) { int i = 0; unsigned char * window = new unsigned char[filterSize * filterSize]; for(int fy = 0; fy < filterSize; fy++) for(int fx = 0; fx < filterSize; fx++) { window[i] = d_inputData[(y + fy - edgey) * img_width + (x + fx - edgex)]; i++; } randQuickSort(window, 0, filterSize * filterSize - 1, x); d_outputData[y * img_width + x] = window[filterSize * filterSize / 2]; delete[] window; } else // copy the pixels on the corners and sides d_outputData[y * img_width + x] = d_inputData[y * img_width + x]; }
d7c7a7ec77023064e7c2db39d2621f726a0620d8.hip
// !!! This is a file automatically generated by hipify!!! // Copyright 2019 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "statespace_testfixture.h" #include <rocblas.h> #include <custatevec.h> #include "gtest/gtest.h" #include "../lib/simulator_custatevec.h" #include "../lib/statespace_custatevec.h" namespace qsim { template <class T> class StateSpaceCuStateVecTest : public testing::Test {}; using fp_impl = ::testing::Types<float, double>; TYPED_TEST_SUITE(StateSpaceCuStateVecTest, fp_impl); template <typename fp_type> struct Factory { using Simulator = qsim::SimulatorCuStateVec<fp_type>; using StateSpace = typename Simulator::StateSpace; Factory() { ErrorCheck(hipblasCreate(&cublas_handle)); ErrorCheck(custatevecCreate(&custatevec_handle)); } ~Factory() { ErrorCheck(hipblasDestroy(cublas_handle)); ErrorCheck(custatevecDestroy(custatevec_handle)); } StateSpace CreateStateSpace() const { return StateSpace(cublas_handle, custatevec_handle); } Simulator CreateSimulator() const { return Simulator(cublas_handle, custatevec_handle); } hipblasHandle_t cublas_handle; custatevecHandle_t custatevec_handle; }; TYPED_TEST(StateSpaceCuStateVecTest, Add) { TestAdd(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, NormSmall) { TestNormSmall(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, NormAndInnerProductSmall) { TestNormAndInnerProductSmall(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, NormAndInnerProduct) { TestNormAndInnerProduct(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, SamplingSmall) { TestSamplingSmall(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, SamplingCrossEntropyDifference) { TestSamplingCrossEntropyDifference(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, Ordering) { TestOrdering(qsim::Factory<TypeParam>()); } TEST(StateSpaceCuStateVecTest, MeasurementSmall) { TestMeasurementSmall(qsim::Factory<float>(), true); } TYPED_TEST(StateSpaceCuStateVecTest, MeasurementLarge) { // This test fails. // TestMeasurementLarge(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, Collapse) { TestCollapse(qsim::Factory<TypeParam>()); } TEST(StateSpaceCuStateVecTest, InvalidStateSize) { TestInvalidStateSize(qsim::Factory<float>()); } TYPED_TEST(StateSpaceCuStateVecTest, BulkSetAmpl) { // Not implemented. // TestBulkSetAmplitude(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, BulkSetAmplExclusion) { // Not implemented. // TestBulkSetAmplitudeExclusion(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, BulkSetAmplDefault) { // Not implemented. // TestBulkSetAmplitudeDefault(factory); } } // namespace qsim int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
d7c7a7ec77023064e7c2db39d2621f726a0620d8.cu
// Copyright 2019 Google LLC. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "statespace_testfixture.h" #include <cublas_v2.h> #include <custatevec.h> #include "gtest/gtest.h" #include "../lib/simulator_custatevec.h" #include "../lib/statespace_custatevec.h" namespace qsim { template <class T> class StateSpaceCuStateVecTest : public testing::Test {}; using fp_impl = ::testing::Types<float, double>; TYPED_TEST_SUITE(StateSpaceCuStateVecTest, fp_impl); template <typename fp_type> struct Factory { using Simulator = qsim::SimulatorCuStateVec<fp_type>; using StateSpace = typename Simulator::StateSpace; Factory() { ErrorCheck(cublasCreate(&cublas_handle)); ErrorCheck(custatevecCreate(&custatevec_handle)); } ~Factory() { ErrorCheck(cublasDestroy(cublas_handle)); ErrorCheck(custatevecDestroy(custatevec_handle)); } StateSpace CreateStateSpace() const { return StateSpace(cublas_handle, custatevec_handle); } Simulator CreateSimulator() const { return Simulator(cublas_handle, custatevec_handle); } cublasHandle_t cublas_handle; custatevecHandle_t custatevec_handle; }; TYPED_TEST(StateSpaceCuStateVecTest, Add) { TestAdd(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, NormSmall) { TestNormSmall(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, NormAndInnerProductSmall) { TestNormAndInnerProductSmall(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, NormAndInnerProduct) { TestNormAndInnerProduct(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, SamplingSmall) { TestSamplingSmall(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, SamplingCrossEntropyDifference) { TestSamplingCrossEntropyDifference(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, Ordering) { TestOrdering(qsim::Factory<TypeParam>()); } TEST(StateSpaceCuStateVecTest, MeasurementSmall) { TestMeasurementSmall(qsim::Factory<float>(), true); } TYPED_TEST(StateSpaceCuStateVecTest, MeasurementLarge) { // This test fails. // TestMeasurementLarge(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, Collapse) { TestCollapse(qsim::Factory<TypeParam>()); } TEST(StateSpaceCuStateVecTest, InvalidStateSize) { TestInvalidStateSize(qsim::Factory<float>()); } TYPED_TEST(StateSpaceCuStateVecTest, BulkSetAmpl) { // Not implemented. // TestBulkSetAmplitude(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, BulkSetAmplExclusion) { // Not implemented. // TestBulkSetAmplitudeExclusion(qsim::Factory<TypeParam>()); } TYPED_TEST(StateSpaceCuStateVecTest, BulkSetAmplDefault) { // Not implemented. // TestBulkSetAmplitudeDefault(factory); } } // namespace qsim int main(int argc, char** argv) { ::testing::InitGoogleTest(&argc, argv); return RUN_ALL_TESTS(); }
3b85e1a0e5fd984f62090ed12f7b423cec456ec9.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <cuda_device_runtime_api.h> #include <hip/driver_types.h> #include <iostream> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <mma.h> #include <hip/hip_fp16.h> #include "../util.h" #define N 128 #define M 768 #define K 3072 #define KBLOCK 2 #define CUDA_ENFORCE(x) \ do { \ auto ec = x; \ if (ec != hipSuccess) { \ std::cout << hipGetErrorName(ec) << std::endl; \ throw; \ } \ } while(false) using namespace nvcuda; __global__ void splitk(half * __restrict__ a, half * __restrict__ b, float * __restrict__ c) { int x = blockIdx.y; int y = blockIdx.x; __shared__ float spad[KBLOCK * 16 * 16]; wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> a_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::row_major> b_frag; wmma::fragment<wmma::accumulator, 16, 16, 16, float, void> c_frag; wmma::fill_fragment(c_frag, 0.0f); for (int k_inner = 0; k_inner < (K / KBLOCK); k_inner += 16) { int k = threadIdx.y * (K / KBLOCK) + k_inner; wmma::load_matrix_sync(a_frag, a + (x * 16) * K + k, K); wmma::load_matrix_sync(b_frag, b + k * M + y * 16, M); wmma::mma_sync(c_frag, a_frag, b_frag, c_frag); } wmma::store_matrix_sync(spad + 16 * 16 * threadIdx.y, c_frag, 16, wmma::mem_row_major); __syncthreads(); int workidx = 32 * threadIdx.y + threadIdx.x; int workload = (16 * 16) / (32 * KBLOCK); for (int i = 0; i < workload; ++i) { #pragma UNROLL for (int j = 1; j < KBLOCK; ++j) { spad[workidx * workload + i] += spad[j * 16 * 16 + workidx * workload + i]; } int xx = (workidx * workload + i) % 16; int yy = (workidx * workload + i) / 16; c[((x * 16) + xx) * M + (y * 16) + yy] = spad[workidx * workload + i]; } } __global__ void shared_mem(half * __restrict__ a, half * __restrict__ b, float * __restrict__ c) { int x = blockIdx.y; int y = blockIdx.x; __shared__ float spad[KBLOCK * 2 * 2 * 16 * 16]; __shared__ half aa[KBLOCK * 2 * 16 * 16]; __shared__ half bb[KBLOCK * 2 * 16 * 16]; half la[256 * KBLOCK / 32]; half lb[256 * KBLOCK / 32]; wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> a_frag[2]; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::row_major> b_frag[2]; wmma::fragment<wmma::accumulator, 16, 16, 16, float, void> c_frag[2][2]; #pragma unroll for (int i = 0; i < 2; ++i) { #pragma unroll for (int j = 0; j < 2; ++j) { wmma::fill_fragment(c_frag[i][j], 0.0f); } } for (int k_inner = -16; k_inner < (K / KBLOCK); k_inner += 16) { if (k_inner + 16 < k_inner < (K / KBLOCK)) { int i = threadIdx.x / 16; int xx = threadIdx.x % 16; int k = threadIdx.y * (K / KBLOCK) + (k_inner + 16); // a[((x * 2 * 16) + (i * 16)):16][k:16]; // b[k:16][(y * 2 * 16 + i * 16):16]; #pragma unroll for (int yy = 0; yy < 16; yy += 8) { *reinterpret_cast<int4*>(&la[yy]) = *reinterpret_cast<int4*>(&a[(((x * 2 * 16) + (i * 16)) + xx) * K + (k + yy)]); *reinterpret_cast<int4*>(&lb[yy]) = *reinterpret_cast<int4*>(&b[(k + xx) * M + ((y * 2 * 16 + i * 16) + yy)]); //la[yy] = a[(((x * 2 * 16) + (i * 16)) + xx) * K + (k + yy)]; //lb[yy] = b[(k + xx) * M + ((y * 2 * 16 + i * 16) + yy)]; } } if (k_inner >= 0) { #pragma unroll for (int i = 0; i < 2; ++i) { int k = threadIdx.y; wmma::load_matrix_sync(a_frag[i], aa + k * 2 * 16 * 16 + i * 16 * 16, 16); wmma::load_matrix_sync(b_frag[i], bb + k * 2 * 16 * 16 + i * 16 * 16, 16); } #pragma unroll for (int i = 0; i < 2; ++i) { #pragma unroll for (int j = 0; j < 2; ++j) { wmma::mma_sync(c_frag[i][j], a_frag[i], b_frag[j], c_frag[i][j]); } } } if (k_inner + 16 < (K / KBLOCK)) { __syncthreads(); // __shared__ half aa[KBLOCK][2][16][16]; int k = threadIdx.y; int i = threadIdx.x / 16; int xx = threadIdx.x % 16; #pragma unroll for (int yy = 0; yy < 16; yy += 8) { // aa[threadIdx.y][i][xx][yy] = la[i][xx][yy] *reinterpret_cast<int4*>(&aa[k * 16 * 16 * 2 + i * 16 * 16 + xx * 16 + yy]) = *reinterpret_cast<int4*>(&la[yy]); *reinterpret_cast<int4*>(&bb[k * 16 * 16 * 2 + i * 16 * 16 + xx * 16 + yy]) = *reinterpret_cast<int4*>(&lb[yy]); //aa[k * 16 * 16 * 2 + i * 16 * 16 + xx * 16 + yy] = la[yy]; //bb[k * 16 * 16 * 2 + i * 16 * 16 + xx * 16 + yy] = lb[yy]; } __syncthreads(); } } #pragma unroll for (int i = 0; i < 2; ++i) { #pragma unroll for (int j = 0; j < 2; ++j) { wmma::store_matrix_sync(spad + 2 * 2 * 16 * 16 * threadIdx.y + (i * 2 + j) * 256, c_frag[i][j], 16, wmma::mem_row_major); } } __syncthreads(); int i = threadIdx.y; int j = threadIdx.x / 16; int xx = threadIdx.x % 16; for (int yy = 0; yy < 16; yy += 4) { float4 acc = *reinterpret_cast<float4*>(&spad[(i * 2 + j) * 256 + xx * 16 + yy]); for (int k = 1; k < KBLOCK; ++k) { float4 delta = *reinterpret_cast<float4*>(&spad[k * 2 * 2 * 256 + (i * 2 + j) * 256 + xx * 16 + yy]); acc.w += delta.w; acc.x += delta.x; acc.y += delta.y; acc.z += delta.z; } *reinterpret_cast<float4*>(&c[(x * 32 + (i * 16 + xx)) * M + (y * 32 + (j * 16 + yy))]) = acc; } } half a[N * K], b[M * K]; float c[N * M], ref[N * M]; template<typename T> void print(int n, int m, const T* a) { for (int i = 0; i < n; ++i) { for (int j = 0; j < m; ++j) { if (j) std::cout << " "; std::cout << a[i * m + j]; } std::cout << std::endl; } std::cout << std::endl; } template<> void print(int n, int m, const half* a) { for (int i = 0; i < n; ++i) { for (int j = 0; j < m; ++j) { if (j) std::cout << " "; std::cout << __half2float(a[i * m + j]); } std::cout << std::endl; } std::cout << std::endl; } void compare(int n, float *c, float *ref) { for (int i = 0; i < n; ++i) { if (fabs(c[i] - ref[i]) / ref[i] > 1e-3) { std::cout << i << "\n" << c[i] << ", expect: " << ref[i] << " " << fabs(c[i] - ref[i]) / ref[i] << std::endl; throw; } } } int main() { //hipDeviceProp_t prop; //assert(hipSuccess == hipGetDeviceProperties(&prop, 0)); //std::cout << "Warp size is: " << prop.warpSize << std::endl; for (int i = 0; i < N * K; ++i) a[i] = __float2half((float)(rand() % 100) / 100.); for (int i = 0; i < K * M; ++i) b[i] = __float2half((float)(rand() % 100) / 100.); for (int i = 0; i < N; ++i) for (int j = 0; j < M; ++j) { ref[i * M + j] = 0.0; for (int ko = 0; ko < KBLOCK; ++ko) { float sub = 0.0; for (int ki = 0; ki < K / KBLOCK; ki += 16) { float sum = 0; for (int kii = 0; kii < 16; ++kii) { int k = ko * (K / KBLOCK) + ki + kii; sum += __half2float(a[i * K + k]) * __half2float(b[k * M + j]); } sub += sum; } ref[i * M + j] += sub; } } half *dev_a, *dev_b; hipMalloc(&dev_a, N * K * sizeof(half)); hipMalloc(&dev_b, M * K * sizeof(half)); hipMemcpy(dev_a, a, sizeof a, hipMemcpyHostToDevice); hipMemcpy(dev_b, b, sizeof b, hipMemcpyHostToDevice); std::cout.precision(5); //{ // memset(c, 0, sizeof(c)); // float *dev_c; // hipMalloc(&dev_c, N * M * KBLOCK * sizeof(float)); // hipMemcpy(dev_c, c, sizeof c, hipMemcpyHostToDevice); // dim3 threads(32, KBLOCK, 1); // dim3 blocks(M / 16, N / 16); // splitk<<<blocks, threads>>>(dev_a, dev_b, dev_c); // hipDeviceSynchronize(); // begin_roi(); // splitk<<<blocks, threads>>>(dev_a, dev_b, dev_c); // hipDeviceSynchronize(); // float elps = end_roi(); // std::cout << "time elps: " << elps << std::endl; // hipMemcpy(c, dev_c, sizeof c, hipMemcpyDeviceToHost); // compare(N * M, c, ref); // hipFree(dev_c); //} { memset(c, 0, sizeof(c)); float *dev_c; hipMalloc(&dev_c, N * M * KBLOCK * sizeof(float)); hipMemcpy(dev_c, c, sizeof c, hipMemcpyHostToDevice); dim3 threads(32, KBLOCK, 1); dim3 blocks(M / 32, N / 32); hipLaunchKernelGGL(( shared_mem), dim3(blocks), dim3(threads), 0, 0, dev_a, dev_b, dev_c); CUDA_ENFORCE(hipDeviceSynchronize()); begin_roi(); hipLaunchKernelGGL(( shared_mem), dim3(blocks), dim3(threads), 0, 0, dev_a, dev_b, dev_c); CUDA_ENFORCE(hipDeviceSynchronize()); float elps = end_roi(); std::cout << "time elps: " << elps << std::endl; std::cout << (N * M * K) / elps / 1000. << std::endl; hipMemcpy(c, dev_c, sizeof c, hipMemcpyDeviceToHost); compare(N * M, c, ref); hipFree(dev_c); } //print(N, M, a); //print(N, M, b); //print(N, K, c); //print(N, M, ref); return 0; }
3b85e1a0e5fd984f62090ed12f7b423cec456ec9.cu
#include <cassert> #include <cuda_device_runtime_api.h> #include <driver_types.h> #include <iostream> #include <cuda.h> #include <cuda_runtime.h> #include <mma.h> #include <cuda_fp16.h> #include "../util.h" #define N 128 #define M 768 #define K 3072 #define KBLOCK 2 #define CUDA_ENFORCE(x) \ do { \ auto ec = x; \ if (ec != cudaSuccess) { \ std::cout << cudaGetErrorName(ec) << std::endl; \ throw; \ } \ } while(false) using namespace nvcuda; __global__ void splitk(half * __restrict__ a, half * __restrict__ b, float * __restrict__ c) { int x = blockIdx.y; int y = blockIdx.x; __shared__ float spad[KBLOCK * 16 * 16]; wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> a_frag; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::row_major> b_frag; wmma::fragment<wmma::accumulator, 16, 16, 16, float, void> c_frag; wmma::fill_fragment(c_frag, 0.0f); for (int k_inner = 0; k_inner < (K / KBLOCK); k_inner += 16) { int k = threadIdx.y * (K / KBLOCK) + k_inner; wmma::load_matrix_sync(a_frag, a + (x * 16) * K + k, K); wmma::load_matrix_sync(b_frag, b + k * M + y * 16, M); wmma::mma_sync(c_frag, a_frag, b_frag, c_frag); } wmma::store_matrix_sync(spad + 16 * 16 * threadIdx.y, c_frag, 16, wmma::mem_row_major); __syncthreads(); int workidx = 32 * threadIdx.y + threadIdx.x; int workload = (16 * 16) / (32 * KBLOCK); for (int i = 0; i < workload; ++i) { #pragma UNROLL for (int j = 1; j < KBLOCK; ++j) { spad[workidx * workload + i] += spad[j * 16 * 16 + workidx * workload + i]; } int xx = (workidx * workload + i) % 16; int yy = (workidx * workload + i) / 16; c[((x * 16) + xx) * M + (y * 16) + yy] = spad[workidx * workload + i]; } } __global__ void shared_mem(half * __restrict__ a, half * __restrict__ b, float * __restrict__ c) { int x = blockIdx.y; int y = blockIdx.x; __shared__ float spad[KBLOCK * 2 * 2 * 16 * 16]; __shared__ half aa[KBLOCK * 2 * 16 * 16]; __shared__ half bb[KBLOCK * 2 * 16 * 16]; half la[256 * KBLOCK / 32]; half lb[256 * KBLOCK / 32]; wmma::fragment<wmma::matrix_a, 16, 16, 16, half, wmma::row_major> a_frag[2]; wmma::fragment<wmma::matrix_b, 16, 16, 16, half, wmma::row_major> b_frag[2]; wmma::fragment<wmma::accumulator, 16, 16, 16, float, void> c_frag[2][2]; #pragma unroll for (int i = 0; i < 2; ++i) { #pragma unroll for (int j = 0; j < 2; ++j) { wmma::fill_fragment(c_frag[i][j], 0.0f); } } for (int k_inner = -16; k_inner < (K / KBLOCK); k_inner += 16) { if (k_inner + 16 < k_inner < (K / KBLOCK)) { int i = threadIdx.x / 16; int xx = threadIdx.x % 16; int k = threadIdx.y * (K / KBLOCK) + (k_inner + 16); // a[((x * 2 * 16) + (i * 16)):16][k:16]; // b[k:16][(y * 2 * 16 + i * 16):16]; #pragma unroll for (int yy = 0; yy < 16; yy += 8) { *reinterpret_cast<int4*>(&la[yy]) = *reinterpret_cast<int4*>(&a[(((x * 2 * 16) + (i * 16)) + xx) * K + (k + yy)]); *reinterpret_cast<int4*>(&lb[yy]) = *reinterpret_cast<int4*>(&b[(k + xx) * M + ((y * 2 * 16 + i * 16) + yy)]); //la[yy] = a[(((x * 2 * 16) + (i * 16)) + xx) * K + (k + yy)]; //lb[yy] = b[(k + xx) * M + ((y * 2 * 16 + i * 16) + yy)]; } } if (k_inner >= 0) { #pragma unroll for (int i = 0; i < 2; ++i) { int k = threadIdx.y; wmma::load_matrix_sync(a_frag[i], aa + k * 2 * 16 * 16 + i * 16 * 16, 16); wmma::load_matrix_sync(b_frag[i], bb + k * 2 * 16 * 16 + i * 16 * 16, 16); } #pragma unroll for (int i = 0; i < 2; ++i) { #pragma unroll for (int j = 0; j < 2; ++j) { wmma::mma_sync(c_frag[i][j], a_frag[i], b_frag[j], c_frag[i][j]); } } } if (k_inner + 16 < (K / KBLOCK)) { __syncthreads(); // __shared__ half aa[KBLOCK][2][16][16]; int k = threadIdx.y; int i = threadIdx.x / 16; int xx = threadIdx.x % 16; #pragma unroll for (int yy = 0; yy < 16; yy += 8) { // aa[threadIdx.y][i][xx][yy] = la[i][xx][yy] *reinterpret_cast<int4*>(&aa[k * 16 * 16 * 2 + i * 16 * 16 + xx * 16 + yy]) = *reinterpret_cast<int4*>(&la[yy]); *reinterpret_cast<int4*>(&bb[k * 16 * 16 * 2 + i * 16 * 16 + xx * 16 + yy]) = *reinterpret_cast<int4*>(&lb[yy]); //aa[k * 16 * 16 * 2 + i * 16 * 16 + xx * 16 + yy] = la[yy]; //bb[k * 16 * 16 * 2 + i * 16 * 16 + xx * 16 + yy] = lb[yy]; } __syncthreads(); } } #pragma unroll for (int i = 0; i < 2; ++i) { #pragma unroll for (int j = 0; j < 2; ++j) { wmma::store_matrix_sync(spad + 2 * 2 * 16 * 16 * threadIdx.y + (i * 2 + j) * 256, c_frag[i][j], 16, wmma::mem_row_major); } } __syncthreads(); int i = threadIdx.y; int j = threadIdx.x / 16; int xx = threadIdx.x % 16; for (int yy = 0; yy < 16; yy += 4) { float4 acc = *reinterpret_cast<float4*>(&spad[(i * 2 + j) * 256 + xx * 16 + yy]); for (int k = 1; k < KBLOCK; ++k) { float4 delta = *reinterpret_cast<float4*>(&spad[k * 2 * 2 * 256 + (i * 2 + j) * 256 + xx * 16 + yy]); acc.w += delta.w; acc.x += delta.x; acc.y += delta.y; acc.z += delta.z; } *reinterpret_cast<float4*>(&c[(x * 32 + (i * 16 + xx)) * M + (y * 32 + (j * 16 + yy))]) = acc; } } half a[N * K], b[M * K]; float c[N * M], ref[N * M]; template<typename T> void print(int n, int m, const T* a) { for (int i = 0; i < n; ++i) { for (int j = 0; j < m; ++j) { if (j) std::cout << " "; std::cout << a[i * m + j]; } std::cout << std::endl; } std::cout << std::endl; } template<> void print(int n, int m, const half* a) { for (int i = 0; i < n; ++i) { for (int j = 0; j < m; ++j) { if (j) std::cout << " "; std::cout << __half2float(a[i * m + j]); } std::cout << std::endl; } std::cout << std::endl; } void compare(int n, float *c, float *ref) { for (int i = 0; i < n; ++i) { if (fabs(c[i] - ref[i]) / ref[i] > 1e-3) { std::cout << i << "\n" << c[i] << ", expect: " << ref[i] << " " << fabs(c[i] - ref[i]) / ref[i] << std::endl; throw; } } } int main() { //cudaDeviceProp prop; //assert(cudaSuccess == cudaGetDeviceProperties(&prop, 0)); //std::cout << "Warp size is: " << prop.warpSize << std::endl; for (int i = 0; i < N * K; ++i) a[i] = __float2half((float)(rand() % 100) / 100.); for (int i = 0; i < K * M; ++i) b[i] = __float2half((float)(rand() % 100) / 100.); for (int i = 0; i < N; ++i) for (int j = 0; j < M; ++j) { ref[i * M + j] = 0.0; for (int ko = 0; ko < KBLOCK; ++ko) { float sub = 0.0; for (int ki = 0; ki < K / KBLOCK; ki += 16) { float sum = 0; for (int kii = 0; kii < 16; ++kii) { int k = ko * (K / KBLOCK) + ki + kii; sum += __half2float(a[i * K + k]) * __half2float(b[k * M + j]); } sub += sum; } ref[i * M + j] += sub; } } half *dev_a, *dev_b; cudaMalloc(&dev_a, N * K * sizeof(half)); cudaMalloc(&dev_b, M * K * sizeof(half)); cudaMemcpy(dev_a, a, sizeof a, cudaMemcpyHostToDevice); cudaMemcpy(dev_b, b, sizeof b, cudaMemcpyHostToDevice); std::cout.precision(5); //{ // memset(c, 0, sizeof(c)); // float *dev_c; // cudaMalloc(&dev_c, N * M * KBLOCK * sizeof(float)); // cudaMemcpy(dev_c, c, sizeof c, cudaMemcpyHostToDevice); // dim3 threads(32, KBLOCK, 1); // dim3 blocks(M / 16, N / 16); // splitk<<<blocks, threads>>>(dev_a, dev_b, dev_c); // cudaDeviceSynchronize(); // begin_roi(); // splitk<<<blocks, threads>>>(dev_a, dev_b, dev_c); // cudaDeviceSynchronize(); // float elps = end_roi(); // std::cout << "time elps: " << elps << std::endl; // cudaMemcpy(c, dev_c, sizeof c, cudaMemcpyDeviceToHost); // compare(N * M, c, ref); // cudaFree(dev_c); //} { memset(c, 0, sizeof(c)); float *dev_c; cudaMalloc(&dev_c, N * M * KBLOCK * sizeof(float)); cudaMemcpy(dev_c, c, sizeof c, cudaMemcpyHostToDevice); dim3 threads(32, KBLOCK, 1); dim3 blocks(M / 32, N / 32); shared_mem<<<blocks, threads>>>(dev_a, dev_b, dev_c); CUDA_ENFORCE(cudaDeviceSynchronize()); begin_roi(); shared_mem<<<blocks, threads>>>(dev_a, dev_b, dev_c); CUDA_ENFORCE(cudaDeviceSynchronize()); float elps = end_roi(); std::cout << "time elps: " << elps << std::endl; std::cout << (N * M * K) / elps / 1000. << std::endl; cudaMemcpy(c, dev_c, sizeof c, cudaMemcpyDeviceToHost); compare(N * M, c, ref); cudaFree(dev_c); } //print(N, M, a); //print(N, M, b); //print(N, K, c); //print(N, M, ref); return 0; }
823f5bef0ba9c28b17d51767d876a8715a301c0a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<sys/mman.h> #include<assert.h> #include<iostream> #include<string.h> #include "../common.h" #include "types.h" //#include<unordered_map> //#include<cuda.h> __device__ int CTALB = 0; //the lower bound of CTA id you want to profile __device__ int CTAUB = 99999; //the upper bound of CTA id you want to profile __device__ int CONSTANCE = 128; __device__ int aliveCTA = 0; //__device__ std::unordered_map< std::string, long> blockmap; //__device__ std::vector<int> testt; //this DOESN'T work //"dynamic initialization doesn't work for __device__ __device__ bool VERBOSE=false; __device__ bool CALLPATHVERBOSE=false; extern "C" { //so that no mangling for function names __device__ void takeString(void* , int); __device__ void RetKernel(void*); __device__ void passBasicBlock(int, int, int, int, void*); __device__ void print5(void*, int, int, int, int, void*); __device__ void print4(void*); __device__ void callFunc(void* , void* , int , int, void*); __device__ int getContextID(void*); __device__ void* InitKernel(void*); __device__ void print1(int); } __device__ unsigned long long ccnntt = 1; //the very first element is reserved for metadata __device__ unsigned long long bbccnntt = 1; //the very first element is reserved for metadata __device__ int* buffer_oN_DeViCe; //should be multiples of 6 //__device__ int* globalCallStack; //__device__ CallSite_t* globalCallStack; //__device__ int* stackHeight; __device__ char funcDic[UNIQUE_FUNC_DEVICE][FUNC_NAME_LEN]; //maintains 100 unique functions and 31 chars for each __device__ int dicHeight = 0; // size of funcDic[][] __device__ CallSite_t contextDic[TOTAL_NUMBER_CONTEXT][CALL_PATH_LEN_DEVICE]; //maintains 100 unique contexts, each has up to 10 function __device__ int cHeight = 0; /* #define MAX_NUM_CTAS 1024 __device__ volatile int arrIn[MAX_NUM_CTAS];//for inter-CTA sync __device__ volatile int arrOut[MAX_NUM_CTAS];//for inter-CTA sync __device__ void __sync_ctas(int goalVal) //, volatile int *arrIn, volatile int *arrOut) { // assuming there the number of threads/CTA is greater than the number of CTAs in the entire grid // otherwise, dead loop // this assumption hurts int nBlockNum = gridDim.x * gridDim.y; int bid = blockIdx.x* gridDim.y + blockIdx.y; int tid = threadIdx.x * blockDim.y + threadIdx.y; if (threadIdx.x + threadIdx.y ==0) arrIn[bid] = goalVal; if (bid==1) { if ( tid < nBlockNum) { while( arrIn[tid] != goalVal) {} } __syncthreads(); if (tid < nBlockNum) arrOut[tid] = goalVal; } if (tid==0) while (arrOut[bid]!=goalVal) {} __syncthreads(); if ( tid==0 ) printf("d: CTA %d sync-ed\n", bid); } */ /* //this is from a published paper. //but it doesn't work. // probably because only one CTA is allowed on one SM, otherwise errors. __device__ void __sync_ctas(int goalVal) { if (threadIdx.x + threadIdx.y ==0) { int id = atomicAdd( (int*)&g_mutex,1); printf("d: CTA (%d, %d) got id=%d, goal is %d\n", blockIdx.x, blockIdx.y, id, goalVal); return; while (g_mutex != goalVal) {}//busy wait } __syncthreads(); } */ __device__ void mystrcpy(char* dst, char* src) { int cnt = 0; while ( src[cnt] != '\0' && cnt < FUNC_NAME_LEN-1) //never exceeds this 30 limit { dst[cnt] = src[cnt]; cnt++; } dst[cnt] = '\0'; return; } __device__ bool mystrcmp(char* dst, char* src) { int cnt = 0; while ( cnt < FUNC_NAME_LEN-1 ) //never exceeds this 30 limit { if ( dst[cnt] == '\0' && src[cnt] == '\0') return true; if (dst[cnt] != src[cnt]) return false; cnt++; } return true; } __device__ int getFuncID(char* func) { if (dicHeight == 0 ) //the very first function { mystrcpy(funcDic[0], func); // printf("src: %s\n", func); // printf("dst: %s\n", funcDic[0]); dicHeight ++; return 0; } // printf("d: height = %d\n", dicHeight); for(int i=0; i < dicHeight; i++) { bool found = mystrcmp( funcDic[i], func ); // printf("d:: compare this pair: %s: \t%s \tVS\t %s\n", found?"yes":"no", funcDic[i], func); if(found) return i; } //return -1;//DEBUG //if you are here, means we have a new func mystrcpy(funcDic[dicHeight], func); dicHeight ++; return dicHeight-1; } __device__ void updateCallStack(int caller, int callee, short sline, short scolm, int bid, int tid, void* p_stackzone) { int offset = bid*blockDim.x*blockDim.y+tid; // CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE])); // int &height = stackHeight[offset]; CallSite_t* callStack = (CallSite_t*) p_stackzone; int bytesPerThread = (CALL_PATH_LEN_DEVICE*sizeof(CallSite_t)); int* temp = (int*)( (char*)p_stackzone + bytesPerThread+16); //offset by 16 to be safe, need to be consistent int &height = *temp; // int &h11 = * (int*)( (char*)p_stackzone + bytesPerThread); // if(CALLPATHVERBOSE) // printf( ":::::::: height = %d :::::::::\n", height); // assert(height != 1 && "stack height != 1") ; //return;//DUBUG if (height==0) { // if (CALLPATHVERBOSE) // printf("first ever. tid=%d\n", tid); callStack[0].id = caller; callStack[0].sline = sline; callStack[0].scolm = scolm; callStack[1].id = callee; callStack[1].sline = -1; callStack[1].scolm = -1; height=2; return; } int p_caller = callStack[height-2].id; int p_callee = callStack[height-1].id; if ( p_caller == caller && p_callee == callee) { //repeated call // if (CALLPATHVERBOSE) // printf("repeated call\n"); callStack[height-2].sline = sline; callStack[height-2].scolm = scolm; return; } else if ( p_caller == caller && p_callee != callee) { //the same parent called a different function, simply update the callee // if (CALLPATHVERBOSE) // printf("same caller different callee\n"); callStack[height-1].id = callee; callStack[height-2].sline = sline; callStack[height-2].scolm = scolm; return; } else if ( p_callee == caller) { // a typical call path // if (CALLPATHVERBOSE) // printf("call sequence\n"); callStack[height-1].sline = sline; callStack[height-1].scolm = scolm; callStack[height].id = callee; callStack[height].sline = -1; callStack[height].scolm = -1; height++; return; } // return;//DUBUG // if (CALLPATHVERBOSE) // printf("the caller exists deeply in the stack\n"); // the caller exists deeply in the stack for (int i=height-1; i>=0; i--) { if ( callStack[i].id == caller) { height = i+1; callStack[i].id = callee; callStack[i].sline = -1; callStack[i].scolm = -1; callStack[i].sline = sline; callStack[i].scolm = scolm; return; } } // the caller exists deeply in the stack // assert( (0==-1) && "!! undefined things happeened here\n"); } /* __device__ void printCallStack(int bid, int tid) { int offset = bid*blockDim.x*blockDim.y+tid; CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE])); int height = stackHeight[offset]; printf(" d::: current call stack height: %d @ bid = %d, tid = %d = (%d,%d,%d,%d)\n", height, bid, tid, threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y); if (height<1) return; for (int i=0; i<height; i++) printf(" %d: call site: %d, (%d, %d)\n", i, callStack[i].id, callStack[i].sline, callStack[i].scolm ); } */ __device__ void* InitKernel(void* ptrhead) { //TODO: if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return NULL; int tid = threadIdx.x + threadIdx.y *blockDim.x; int bid = blockIdx.x + blockIdx.y * gridDim.x; int global_tid = tid + bid*blockDim.x*blockDim.y; int num_cta = gridDim.x*gridDim.y; int num_thread = blockDim.x*blockDim.y; __shared__ char* handler; //this pointer is for maintaing stack/callpath __syncthreads(); int bytesPerThread = sizeof(CallSite_t)*CALL_PATH_LEN_DEVICE + 32;// I put 32 just to be safe if ( tid ==0 ) { handler = (char*) malloc( blockDim.x*blockDim.y*bytesPerThread); assert( handler!=NULL); // printf(" CTA \t%d\tgrabs memroy\t%p\n", bid, handler); int rank = atomicAdd( &aliveCTA, 1); printf(" CTA\t%d\tonline, total alive\t%d\n", bid, rank); if (rank==0) { // if (tid%32==0) { // buffer_oN_DeViCe = (int*)ptrhead; printf("\nd: InitKernel...\n"); printf("d: buffer pointer: %p\n", buffer_oN_DeViCe); printf("d: size of kernel grid: %d, %d\t%d, %d\n", gridDim.x, gridDim.y, blockDim.x, blockDim.y); } } if (rank == 1) buffer_oN_DeViCe = (int*)ptrhead; } __syncthreads(); void* stackzone = (void*)( handler + bytesPerThread*tid ); // if (tid==1) // { // stackHeight = (int*) ptr3; // globalCallStack = (CallSite_t*)ptr2; // buffer_oN_DeViCe = (int*)ptrhead; //} // if (tid ==0) // printf("d: DEBUG: here1 from CTA %d\n", bid); return stackzone; // if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) // vunlunteer to do the initialization /* done2 = atomicAdd(&done2, 1); if ( stackHeight==NULL && done2 == 2) { printf("I will 2nd malloc() %ld by (%d, %d)\n", numthreads*sizeof(int) , bid, tid); stackHeight = (int*)malloc( sizeof(int) * numthreads); printf(" 2nd malloc() done by (%d, %d)\n", bid, tid); } else{ holdon( bid*10000); } __syncthreads(); holdon(bid*10000); // done1 = atomicAdd(&done1, 1); // if ( globalCallStack==NULL && tid==0) // { // printf("I will malloc() %ld by (%d, %d)\n", numthreads*sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE ,bid, tid); // globalCallStack = (CallSite_t**) malloc(sizeof(CallSite_t*) * numthreads); // // } if ( globalCallStack[global_tid] ==NULL) { //DEBUG: there are still repeated allocation from the SAME thread globalCallStack[global_tid] = (CallSite_t*) malloc(UNIQUE_FUNC_DEVICE* sizeof(CallSite_t) ); printf("I do it by myself %ld @ %p by (%d, %d)=%d\n", sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE, globalCallStack[global_tid], bid, tid, global_tid); } __syncthreads(); stackHeight[global_tid] = 0; __syncthreads(); printf("__ back from InitKernel: %d, %d\n", bid, tid); */ //Du: July 10 /* else { //wait and see int cnt = 0; while ( globalCallStack==NULL ) cnt++; while ( stackHeight==NULL) cnt++; } */ /* int mask = __ballot(1); int leader = __ffs(mask)-1; if( globalCallStack==NULL && leader == threadIdx.x%32) { long numthreads = gridDim.x*gridDim.y*blockDim.x*blockDim.y*32; printf("I will malloc() %ld by (%d, %d)\n", numthreads*sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE , threadIdx.x, threadIdx.y); globalCallStack = (CallSite_t**) malloc(sizeof(CallSite_t*) * numthreads); for( int i = 0; i<numthreads; i++) globalCallStack[i] = (CallSite_t*) malloc(UNIQUE_FUNC_DEVICE* sizeof(CallSite_t) ); stackHeight = (int*)malloc( sizeof(int) * numthreads); for (int i=0; i<numthreads; i++) stackHeight[i] = 0; } */ } __device__ void callFunc(void* er, void* ee, int sline, int scolm, void* p_stackzone) { // if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return; //DEBUG // printf("d::%d\n", sline ); // printf("d::%s\n", (char*)er ); // if (CALLPATHVERBOSE) // printf("d:::: >>>>\n"); int id1 = getFuncID( (char*)er ); int id2 = getFuncID( (char*)ee ); // if (CALLPATHVERBOSE) // { // printf("d:::: ID: %d :%s\n", id1, (char*)er ); // printf("d:::: ID: %d :%s\n", id2, (char*)ee ); // } int tid = threadIdx.y * blockDim.x + threadIdx.x; int bid = blockIdx.x + blockIdx.y * gridDim.x; int global_tid = bid * (blockDim.x * blockDim.y) + tid; updateCallStack(id1, id2, (short) sline, (short) scolm, bid, tid, p_stackzone); // printCallStack(global_tid); // if (CALLPATHVERBOSE) // printf("d:::: <<<<\n"); } /* __device__ void takeString(void* p, int action) { if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return; if (VERBOSE) { if (action==1) printf("d: caller: %s\n",(char*)p); else if (action==2) printf("d: callee: %s\n",(char*)p); else if (action==3) printf("d: return: %s\n",(char*)p); else printf("d: undefined: %s\n",(char*)p); } return; } */ __device__ void cxtprint(int id) { if (id<0) return; printf("d::: requested context id: %d out of %d\n", id, cHeight); for (int i = 0; i< CALL_PATH_LEN_DEVICE && contextDic[id][i].id != -1 ; i++) { printf("d::::::: current context [%d][%d]: %d, %d, %d\n", id, i, contextDic[id][i].id, contextDic[id][i].sline, contextDic[id][i].scolm) ; } return; } __device__ void cxtcpy( CallSite_t* dst, CallSite_t* src , int height) //context copy { int i; for( i=0; i< height; i++) dst[i] = src[i]; // assert(i<CALL_PATH_LEN_DEVICE && "code: e56: call stack too deep"); dst[i].id = -1; //to mark the ending of one context return; } __device__ bool cxtcmp( CallSite_t* dst, CallSite_t* src, int height) { for( int i=0; i< height; i++) if ( dst[i].id == src[i].id ) // && dst[i].id == src[i].id && continue; else return false; return true; } __device__ int getContextID(void* p_stackzone) { //shared by all treahds, there are races //you can manually to take care of serialization? // if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y != 0 ) return -2; //DEBUG int bid = blockIdx.x + blockIdx.y * gridDim.x; int tid = threadIdx.y * blockDim.x + threadIdx.x; // int offset = bid*blockDim.x*blockDim.y+tid; // CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE])); // int &height = stackHeight[offset]; CallSite_t* callStack = (CallSite_t*) p_stackzone; int bytesPerThread = (CALL_PATH_LEN_DEVICE*sizeof(CallSite_t)); int* temp = (int*)( (char*)p_stackzone + bytesPerThread+16); //offset by 8 to be safe, need to be consistent int &height = *temp; if ( height ==0) //it is possible that call stack is still empty return -1; if (cHeight==0)// the first ever context in the dic { // if (CALLPATHVERBOSE) // printf("d::: the very first context in dic, depth=%d\n", height); cxtcpy(contextDic[0], callStack, height ); cHeight=1; return 0; } // something already exists // if (CALLPATHVERBOSE) // { // printf("d::: going to match existing items in context dic\n"); // printf("d::: number of existing contexts: %d\n", cHeight); // } int i; for (i = 0; i<cHeight; i++) { if ( cxtcmp( contextDic[i], callStack, height ) ) //yes, found { // if (CALLPATHVERBOSE) // printf("d::: matched, returning %d, depth=%d\n",i, height); return i; } } // if (CALLPATHVERBOSE) // printf("d::: not found, value of i: %d\n", i); // assert (i< TOTAL_NUMBER_CONTEXT && "code:e34: Not enough space for Context Dic, index i"); // printCallStack(); cxtcpy(contextDic[i], callStack, height ); cHeight = i+1; // assert (cHeight < TOTAL_NUMBER_CONTEXT && "code:e41: Not enough space for Context Dic, cHeight"); // if (CALLPATHVERBOSE) // printf("d::: inserted new one: id = %d, depth=%d\n", i, height); return i; } __device__ void passBasicBlock(int tmp /*pointer to block name*/, int action, int sline, int scolm, void* p_stackzone) { if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return; int map = __ballot(1); int numActive = __popc(map); if ( buffer_oN_DeViCe == NULL) return; if (numActive==32) { //then choose one thread to write numbers int tid = threadIdx.x + threadIdx.y *blockDim.x; if (tid%32==0) { //do the writing // printf("I will write for my warp tid=(%d, %d)\n", threadIdx.x, threadIdx.y); int bid = atomicAdd(&bbccnntt, 1); unsigned long long key=0; BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe; bblog[bid].key = key; bblog[bid].tidx = (short)threadIdx.x; bblog[bid].tidy = (short)threadIdx.y; bblog[bid].bidx = (short)blockIdx.x; bblog[bid].bidy = (short)blockIdx.y; bblog[bid].sline = sline; bblog[bid].scolm = scolm; bblog[bid].cid = getContextID(p_stackzone); } } else { //every thread needs to write //printf("I will write for my self tid=(%d, %d)\n", threadIdx.x, threadIdx.y); int bid = atomicAdd(&bbccnntt, 1); unsigned long long key=0; BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe; bblog[bid].key = key; bblog[bid].tidx = (short)threadIdx.x; bblog[bid].tidy = (short)threadIdx.y; bblog[bid].bidx = (short)blockIdx.x; bblog[bid].bidy = (short)blockIdx.y; bblog[bid].sline = sline; bblog[bid].scolm = scolm; bblog[bid].cid = getContextID(p_stackzone); } return; } /* __device__ void passBasicBlock(int tmp , int action, int sline, int scolm, void* p_stackzone) //__device__ void passBasicBlock(void* p , int action, int sline, int scolm, void* p_stackzone) { if ( buffer_oN_DeViCe == NULL) return; assert ( (bbccnntt < BUFFERSIZE/24 - 128) && "code: e317: too many entries to the buffer" ); //DO NOT COMMENT OUT if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return; // if (threadIdx.x + blockIdx.x + threadIdx.y + blockIdx.y == 0) // getFuncID( (char*)p); //DEBUG // printf("d: basic block: %s \ttid: (%d, %d)\n", str, threadIdx.x, threadIdx.y) ; int bid = atomicAdd(&bbccnntt, 1); if (bid > BUFFERSIZE/sizeof(BBlog_t) - 128) //overflow protection return; // for(int i=0; *(str+i) != 0; i++) // { // printf("%c", *(str+i) ); // } // printf("\n"); unsigned long long key=0; int cnt = 0; long long factor = 1; char* str = (char*)p; for(int i=0; *(str+i) != 0; i++) { int ascii = (int)(*(str+i)) ; if (ascii<48 || ascii > 123) continue; key += ascii*factor; factor *= CONSTANCE; // printf("%d\t", (int)(*(str+i)) ); // printf("key of %s is \t %llu\n", str, key); } // printf("key of %s is \t %llu\n", str, key); // printf("\n"); BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe; // bblog[bid].key = key; bblog[bid].tidx = (short)threadIdx.x; bblog[bid].tidy = (short)threadIdx.y; bblog[bid].bidx = (short)blockIdx.x; bblog[bid].bidy = (short)blockIdx.y; bblog[bid].sline = sline; bblog[bid].scolm = scolm; // bblog[bid].cid = getContextID(p_stackzone); //printf("d:: context ID: %d\n", bblog[bid].cid); // if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 ) // printf("d:: context ID= %d\n", bblog[bid].cid); return; } */ __device__ void storeLines(void* p, short size/*bytes*/, short line, short colmn, short op /*load or store*/, void* p_stackzone) { if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return; int map = __ballot(1); int numActive = __popc(map); if ( ccnntt > (int)(((long)BUFFERSIZE)/24) - 128*100) return; //DEBUG assert ( (ccnntt < BUFFERSIZE/24 - 128) && "code: e31: too many entries to the buffer"); //DO NOT COMMENT OUT int bid = atomicAdd(&ccnntt, 1); //d_trace[bid].bidx = blockIdx.x; //d_trace[bid].tidx = threadIdx.x; //d_trace[bid].ea = p; //d_trace[bid].bytes = size; //printf(" d : bid = %d from (%d,%d) (%d,%d) \n", bid, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y); if (buffer_oN_DeViCe==NULL) return; if( true) { int tid = threadIdx.x + threadIdx.y *blockDim.x; if ( tid%32==0 || true) { short* buffer_oN_DeViCe_short = (short*) buffer_oN_DeViCe; long* buffer_oN_DeViCe_long = (long*) buffer_oN_DeViCe; buffer_oN_DeViCe_short[bid*12+0] = (short)blockIdx.x; buffer_oN_DeViCe_short[bid*12+1] = (short)blockIdx.y; buffer_oN_DeViCe_short[bid*12+2] = (short)threadIdx.x; buffer_oN_DeViCe_short[bid*12+3] = (short)threadIdx.y; buffer_oN_DeViCe_long[bid*3+1] = (long)p; buffer_oN_DeViCe_short[bid*12+8] = size; buffer_oN_DeViCe_short[bid*12+9] = line; buffer_oN_DeViCe_short[bid*12+10] = colmn; buffer_oN_DeViCe_short[bid*12+11] = op; getContextID(p_stackzone); } } } /* __device__ void dumpLines(void) { if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return; int ii; for(ii=1; ii< ccnntt; ii=ii+6) { // printf("d: %d Bytes at %p by (%d, %d)\n", buffer_oN_DeViCe[ii*6+4], buffer_oN_DeViCe[ii*6+2], buffer_oN_DeViCe[ii*6], buffer_oN_DeViCe[ii*6+1] ); } // printf("\n" ); // const char* ss = "this is the end"; // void *ps = ss; // takeString(ps); //Or, this also works. // char s[200] = "this is the end"; void *ps = &(s); takeString(s); // printf("try mmap\n" ); // void* ptr = mmap(NULL, 1000, PROT_READ | PROT_WRITE| PROT_EXEC, MAP_SHARED, -1, 0); // printf("%p\n",ptr ); } */ __device__ void print1(int a) { if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) printf("d: print1: %d\n", a); return; if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE) { if (a==1) printf("d: load by CTA (%d,%d)\n", blockIdx.x, blockIdx.y); else if (a==2) printf("d: store by CTA (%d,%d)\n", blockIdx.x, blockIdx.y); else printf("d: !!! undefined !!! \n" ); } } /* __device__ void print2() { if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE) printf("d: store by CTA (%d,%d)\n", blockIdx.x, blockIdx.y); } */ __device__ void print3(int line, int col) { return; if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE) printf("d: source line: %d\t column: %d by CTA (%d,%d)\n", line, col, blockIdx.x, blockIdx.y); } __device__ void print4(void* p) { //if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE) printf("d: print4: %p\n", p); } __device__ void print5(void* p, int bits, int sline, int scolm, int op, void* p_stackzone) { // if ( (blockIdx.x + blockIdx.y* gridDim.x) * (blockDim.x * blockDim.y) >= 32*128) // no more than 128 warps // return; // printf("d: ea: %p by (%d,%d) (%d,%d), CTA id = %d\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y , (blockIdx.x + blockIdx.y* gridDim.x)); if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return; storeLines(p, (short)(bits/8), (short)sline, (short) scolm, (short)op, p_stackzone); // printf("d: ea: %p by (%d,%d) (%d,%d), CTA id = %d\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y , (blockIdx.x + blockIdx.y* gridDim.x)); // printf("d: ea: %p by (%d,%d) (%d,%d)\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y ); } //// __device__ void RetKernel(void* p_stackzone) { if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return; int bid = blockIdx.x + blockIdx.y * gridDim.x; int tid = threadIdx.x + threadIdx.y *blockDim.x; __syncthreads(); //IMPORTANT to sync here int rank = -1; if ( tid == 0) { // printf(" CTA\t%d\treleases:\t%p\n", bid, stackzone); // atomicAdd( &alive, -1); if (p_stackzone!=NULL) { free(p_stackzone); rank = atomicAdd( &aliveCTA, -1); printf("CTA\t%d\texits, total remains\t%d\n", bid, rank); } else printf("d:: p_stack is hacked!!\n"); } __syncthreads(); if (threadIdx.x + threadIdx.y == 0 && rank ==1 ) { printf("d: in RetKernel...\n"); // for (int kk=0; kk< cHeight; kk++) // cxtprint( kk ); if (true) { //memory short* buffer_oN_DeViCe_short = (short*) buffer_oN_DeViCe; buffer_oN_DeViCe_short[0+0] = blockDim.x; // Be consistent with print.cpp, dumpTrace() buffer_oN_DeViCe_short[0+1] = blockDim.y; buffer_oN_DeViCe_short[0+2] = gridDim.x; buffer_oN_DeViCe_short[0+3] = gridDim.y; printf("d: Kernel Returns: collected [ %llu ] memory entries. \n" , ccnntt); printf("d: Kernel Returns: collected [ %llu ] memory entries. \n" , bbccnntt); long* buffer_oN_DeViCe_long = (long*) buffer_oN_DeViCe; buffer_oN_DeViCe_long[0+1] = ccnntt; } else { //branch BBlog_t* bbbuffer_oN_DeViCe_short = (BBlog_t*) buffer_oN_DeViCe; bbbuffer_oN_DeViCe_short[0].bidx = blockDim.x; // Be consistent with print.cpp, dumpTrace() bbbuffer_oN_DeViCe_short[0].bidy = blockDim.y; bbbuffer_oN_DeViCe_short[0].tidx = gridDim.x; bbbuffer_oN_DeViCe_short[0].tidy = gridDim.y; bbbuffer_oN_DeViCe_short[0].key = bbccnntt; bbbuffer_oN_DeViCe_short[0].sline = 0; bbbuffer_oN_DeViCe_short[0].scolm = 0; printf("d: Kernel Returns: collected [ %llu ] BB logs. \n" , bbccnntt); printf("d: Kernel Returns: collected [ %llu ] BB logs. \n" , ccnntt); } unsigned long offset1 = ((UNIQUE_FUNC_DEVICE* FUNC_NAME_LEN*sizeof(char))/1024+1)*1024; unsigned long offset2 = ((TOTAL_NUMBER_CONTEXT * CALL_PATH_LEN_DEVICE* sizeof(CallSite_t))/1024+1)*1024 + offset1; printf("size of function dic: %d %d %lu -> %lu , rounded to %lu\n", UNIQUE_FUNC_DEVICE, FUNC_NAME_LEN, sizeof(char), UNIQUE_FUNC_DEVICE*FUNC_NAME_LEN*sizeof(char), offset1 ); printf("size of context dic: %d %d %lu -> %lu , rounded to %lu\n", TOTAL_NUMBER_CONTEXT, CALL_PATH_LEN_DEVICE, sizeof(CallSite_t), TOTAL_NUMBER_CONTEXT* CALL_PATH_LEN_DEVICE* sizeof(CallSite_t) , offset2); //function dic is the last, //context dic is second to last void* ptr; ptr = (void*)( buffer_oN_DeViCe + (BUFFERSIZE - offset1)/sizeof(int)) ; //operate on a int*, not a void* memcpy( ptr, funcDic, UNIQUE_FUNC_DEVICE *FUNC_NAME_LEN*sizeof(char) ); ptr = (void*)(buffer_oN_DeViCe + (BUFFERSIZE - offset2)/sizeof(int)) ; //operate on a int*, not a void* memcpy( ptr, contextDic, TOTAL_NUMBER_CONTEXT * CALL_PATH_LEN_DEVICE*sizeof(CallSite_t) ); /* BBlog_t* tmpbb = (BBlog_t*) buffer_oN_DeViCe; for (int i=1; i<bbccnntt; i++) { printf(" %d\t", tmpbb[i].bidx); printf(" %d\t", tmpbb[i].bidy); printf(" %d\t", tmpbb[i].tidx); printf(" %d\t", tmpbb[i].tidy); printf(" %llu\t", tmpbb[i].key); printf(" %d\t", tmpbb[i].sline); printf(" %d\t", tmpbb[i].scolm); printf("\n"); } */ /* Entry_t* tp = (Entry_t*) buffer_oN_DeViCe; int i; for (i=1; i<ccnntt; i++) { printf(" d: bid (%d,%d) \ttid (%d,%d) \t%p\t%d,%d\t%d\n", tp[i].bidx, tp[i].bidy, tp[i].tidx, tp[i].tidy, tp[i].ea, tp[i].sline, tp[i].scolm, tp[i].op); } */ ccnntt = 1; //reset, prepares for next kernel call bbccnntt = 1; //reset, prepares for next kernel call }//end of if }
823f5bef0ba9c28b17d51767d876a8715a301c0a.cu
#include<sys/mman.h> #include<assert.h> #include<iostream> #include<string.h> #include "../common.h" #include "types.h" //#include<unordered_map> //#include<cuda.h> __device__ int CTALB = 0; //the lower bound of CTA id you want to profile __device__ int CTAUB = 99999; //the upper bound of CTA id you want to profile __device__ int CONSTANCE = 128; __device__ int aliveCTA = 0; //__device__ std::unordered_map< std::string, long> blockmap; //__device__ std::vector<int> testt; //this DOESN'T work //"dynamic initialization doesn't work for __device__ __device__ bool VERBOSE=false; __device__ bool CALLPATHVERBOSE=false; extern "C" { //so that no mangling for function names __device__ void takeString(void* , int); __device__ void RetKernel(void*); __device__ void passBasicBlock(int, int, int, int, void*); __device__ void print5(void*, int, int, int, int, void*); __device__ void print4(void*); __device__ void callFunc(void* , void* , int , int, void*); __device__ int getContextID(void*); __device__ void* InitKernel(void*); __device__ void print1(int); } __device__ unsigned long long ccnntt = 1; //the very first element is reserved for metadata __device__ unsigned long long bbccnntt = 1; //the very first element is reserved for metadata __device__ int* buffer_oN_DeViCe; //should be multiples of 6 //__device__ int* globalCallStack; //__device__ CallSite_t* globalCallStack; //__device__ int* stackHeight; __device__ char funcDic[UNIQUE_FUNC_DEVICE][FUNC_NAME_LEN]; //maintains 100 unique functions and 31 chars for each __device__ int dicHeight = 0; // size of funcDic[][] __device__ CallSite_t contextDic[TOTAL_NUMBER_CONTEXT][CALL_PATH_LEN_DEVICE]; //maintains 100 unique contexts, each has up to 10 function __device__ int cHeight = 0; /* #define MAX_NUM_CTAS 1024 __device__ volatile int arrIn[MAX_NUM_CTAS];//for inter-CTA sync __device__ volatile int arrOut[MAX_NUM_CTAS];//for inter-CTA sync __device__ void __sync_ctas(int goalVal) //, volatile int *arrIn, volatile int *arrOut) { // assuming there the number of threads/CTA is greater than the number of CTAs in the entire grid // otherwise, dead loop // this assumption hurts int nBlockNum = gridDim.x * gridDim.y; int bid = blockIdx.x* gridDim.y + blockIdx.y; int tid = threadIdx.x * blockDim.y + threadIdx.y; if (threadIdx.x + threadIdx.y ==0) arrIn[bid] = goalVal; if (bid==1) { if ( tid < nBlockNum) { while( arrIn[tid] != goalVal) {} } __syncthreads(); if (tid < nBlockNum) arrOut[tid] = goalVal; } if (tid==0) while (arrOut[bid]!=goalVal) {} __syncthreads(); if ( tid==0 ) printf("d: CTA %d sync-ed\n", bid); } */ /* //this is from a published paper. //but it doesn't work. // probably because only one CTA is allowed on one SM, otherwise errors. __device__ void __sync_ctas(int goalVal) { if (threadIdx.x + threadIdx.y ==0) { int id = atomicAdd( (int*)&g_mutex,1); printf("d: CTA (%d, %d) got id=%d, goal is %d\n", blockIdx.x, blockIdx.y, id, goalVal); return; while (g_mutex != goalVal) {}//busy wait } __syncthreads(); } */ __device__ void mystrcpy(char* dst, char* src) { int cnt = 0; while ( src[cnt] != '\0' && cnt < FUNC_NAME_LEN-1) //never exceeds this 30 limit { dst[cnt] = src[cnt]; cnt++; } dst[cnt] = '\0'; return; } __device__ bool mystrcmp(char* dst, char* src) { int cnt = 0; while ( cnt < FUNC_NAME_LEN-1 ) //never exceeds this 30 limit { if ( dst[cnt] == '\0' && src[cnt] == '\0') return true; if (dst[cnt] != src[cnt]) return false; cnt++; } return true; } __device__ int getFuncID(char* func) { if (dicHeight == 0 ) //the very first function { mystrcpy(funcDic[0], func); // printf("src: %s\n", func); // printf("dst: %s\n", funcDic[0]); dicHeight ++; return 0; } // printf("d: height = %d\n", dicHeight); for(int i=0; i < dicHeight; i++) { bool found = mystrcmp( funcDic[i], func ); // printf("d:: compare this pair: %s: \t%s \tVS\t %s\n", found?"yes":"no", funcDic[i], func); if(found) return i; } //return -1;//DEBUG //if you are here, means we have a new func mystrcpy(funcDic[dicHeight], func); dicHeight ++; return dicHeight-1; } __device__ void updateCallStack(int caller, int callee, short sline, short scolm, int bid, int tid, void* p_stackzone) { int offset = bid*blockDim.x*blockDim.y+tid; // CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE])); // int &height = stackHeight[offset]; CallSite_t* callStack = (CallSite_t*) p_stackzone; int bytesPerThread = (CALL_PATH_LEN_DEVICE*sizeof(CallSite_t)); int* temp = (int*)( (char*)p_stackzone + bytesPerThread+16); //offset by 16 to be safe, need to be consistent int &height = *temp; // int &h11 = * (int*)( (char*)p_stackzone + bytesPerThread); // if(CALLPATHVERBOSE) // printf( ":::::::: height = %d :::::::::\n", height); // assert(height != 1 && "stack height != 1") ; //return;//DUBUG if (height==0) { // if (CALLPATHVERBOSE) // printf("first ever. tid=%d\n", tid); callStack[0].id = caller; callStack[0].sline = sline; callStack[0].scolm = scolm; callStack[1].id = callee; callStack[1].sline = -1; callStack[1].scolm = -1; height=2; return; } int p_caller = callStack[height-2].id; int p_callee = callStack[height-1].id; if ( p_caller == caller && p_callee == callee) { //repeated call // if (CALLPATHVERBOSE) // printf("repeated call\n"); callStack[height-2].sline = sline; callStack[height-2].scolm = scolm; return; } else if ( p_caller == caller && p_callee != callee) { //the same parent called a different function, simply update the callee // if (CALLPATHVERBOSE) // printf("same caller different callee\n"); callStack[height-1].id = callee; callStack[height-2].sline = sline; callStack[height-2].scolm = scolm; return; } else if ( p_callee == caller) { // a typical call path // if (CALLPATHVERBOSE) // printf("call sequence\n"); callStack[height-1].sline = sline; callStack[height-1].scolm = scolm; callStack[height].id = callee; callStack[height].sline = -1; callStack[height].scolm = -1; height++; return; } // return;//DUBUG // if (CALLPATHVERBOSE) // printf("the caller exists deeply in the stack\n"); // the caller exists deeply in the stack for (int i=height-1; i>=0; i--) { if ( callStack[i].id == caller) { height = i+1; callStack[i].id = callee; callStack[i].sline = -1; callStack[i].scolm = -1; callStack[i].sline = sline; callStack[i].scolm = scolm; return; } } // the caller exists deeply in the stack // assert( (0==-1) && "!! undefined things happeened here\n"); } /* __device__ void printCallStack(int bid, int tid) { int offset = bid*blockDim.x*blockDim.y+tid; CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE])); int height = stackHeight[offset]; printf(" d::: current call stack height: %d @ bid = %d, tid = %d = (%d,%d,%d,%d)\n", height, bid, tid, threadIdx.x, threadIdx.y, blockIdx.x, blockIdx.y); if (height<1) return; for (int i=0; i<height; i++) printf(" %d: call site: %d, (%d, %d)\n", i, callStack[i].id, callStack[i].sline, callStack[i].scolm ); } */ __device__ void* InitKernel(void* ptrhead) { //TODO: if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return NULL; int tid = threadIdx.x + threadIdx.y *blockDim.x; int bid = blockIdx.x + blockIdx.y * gridDim.x; int global_tid = tid + bid*blockDim.x*blockDim.y; int num_cta = gridDim.x*gridDim.y; int num_thread = blockDim.x*blockDim.y; __shared__ char* handler; //this pointer is for maintaing stack/callpath __syncthreads(); int bytesPerThread = sizeof(CallSite_t)*CALL_PATH_LEN_DEVICE + 32;// I put 32 just to be safe if ( tid ==0 ) { handler = (char*) malloc( blockDim.x*blockDim.y*bytesPerThread); assert( handler!=NULL); // printf(" CTA \t%d\tgrabs memroy\t%p\n", bid, handler); int rank = atomicAdd( &aliveCTA, 1); printf(" CTA\t%d\tonline, total alive\t%d\n", bid, rank); if (rank==0) { // if (tid%32==0) { // buffer_oN_DeViCe = (int*)ptrhead; printf("\nd: InitKernel...\n"); printf("d: buffer pointer: %p\n", buffer_oN_DeViCe); printf("d: size of kernel grid: %d, %d\t%d, %d\n", gridDim.x, gridDim.y, blockDim.x, blockDim.y); } } if (rank == 1) buffer_oN_DeViCe = (int*)ptrhead; } __syncthreads(); void* stackzone = (void*)( handler + bytesPerThread*tid ); // if (tid==1) // { // stackHeight = (int*) ptr3; // globalCallStack = (CallSite_t*)ptr2; // buffer_oN_DeViCe = (int*)ptrhead; //} // if (tid ==0) // printf("d: DEBUG: here1 from CTA %d\n", bid); return stackzone; // if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) // vunlunteer to do the initialization /* done2 = atomicAdd(&done2, 1); if ( stackHeight==NULL && done2 == 2) { printf("I will 2nd malloc() %ld by (%d, %d)\n", numthreads*sizeof(int) , bid, tid); stackHeight = (int*)malloc( sizeof(int) * numthreads); printf(" 2nd malloc() done by (%d, %d)\n", bid, tid); } else{ holdon( bid*10000); } __syncthreads(); holdon(bid*10000); // done1 = atomicAdd(&done1, 1); // if ( globalCallStack==NULL && tid==0) // { // printf("I will malloc() %ld by (%d, %d)\n", numthreads*sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE ,bid, tid); // globalCallStack = (CallSite_t**) malloc(sizeof(CallSite_t*) * numthreads); // // } if ( globalCallStack[global_tid] ==NULL) { //DEBUG: there are still repeated allocation from the SAME thread globalCallStack[global_tid] = (CallSite_t*) malloc(UNIQUE_FUNC_DEVICE* sizeof(CallSite_t) ); printf("I do it by myself %ld @ %p by (%d, %d)=%d\n", sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE, globalCallStack[global_tid], bid, tid, global_tid); } __syncthreads(); stackHeight[global_tid] = 0; __syncthreads(); printf("__ back from InitKernel: %d, %d\n", bid, tid); */ //Du: July 10 /* else { //wait and see int cnt = 0; while ( globalCallStack==NULL ) cnt++; while ( stackHeight==NULL) cnt++; } */ /* int mask = __ballot(1); int leader = __ffs(mask)-1; if( globalCallStack==NULL && leader == threadIdx.x%32) { long numthreads = gridDim.x*gridDim.y*blockDim.x*blockDim.y*32; printf("I will malloc() %ld by (%d, %d)\n", numthreads*sizeof(CallSite_t*)* UNIQUE_FUNC_DEVICE , threadIdx.x, threadIdx.y); globalCallStack = (CallSite_t**) malloc(sizeof(CallSite_t*) * numthreads); for( int i = 0; i<numthreads; i++) globalCallStack[i] = (CallSite_t*) malloc(UNIQUE_FUNC_DEVICE* sizeof(CallSite_t) ); stackHeight = (int*)malloc( sizeof(int) * numthreads); for (int i=0; i<numthreads; i++) stackHeight[i] = 0; } */ } __device__ void callFunc(void* er, void* ee, int sline, int scolm, void* p_stackzone) { // if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return; //DEBUG // printf("d::%d\n", sline ); // printf("d::%s\n", (char*)er ); // if (CALLPATHVERBOSE) // printf("d:::: >>>>\n"); int id1 = getFuncID( (char*)er ); int id2 = getFuncID( (char*)ee ); // if (CALLPATHVERBOSE) // { // printf("d:::: ID: %d :%s\n", id1, (char*)er ); // printf("d:::: ID: %d :%s\n", id2, (char*)ee ); // } int tid = threadIdx.y * blockDim.x + threadIdx.x; int bid = blockIdx.x + blockIdx.y * gridDim.x; int global_tid = bid * (blockDim.x * blockDim.y) + tid; updateCallStack(id1, id2, (short) sline, (short) scolm, bid, tid, p_stackzone); // printCallStack(global_tid); // if (CALLPATHVERBOSE) // printf("d:::: <<<<\n"); } /* __device__ void takeString(void* p, int action) { if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return; if (VERBOSE) { if (action==1) printf("d: caller: %s\n",(char*)p); else if (action==2) printf("d: callee: %s\n",(char*)p); else if (action==3) printf("d: return: %s\n",(char*)p); else printf("d: undefined: %s\n",(char*)p); } return; } */ __device__ void cxtprint(int id) { if (id<0) return; printf("d::: requested context id: %d out of %d\n", id, cHeight); for (int i = 0; i< CALL_PATH_LEN_DEVICE && contextDic[id][i].id != -1 ; i++) { printf("d::::::: current context [%d][%d]: %d, %d, %d\n", id, i, contextDic[id][i].id, contextDic[id][i].sline, contextDic[id][i].scolm) ; } return; } __device__ void cxtcpy( CallSite_t* dst, CallSite_t* src , int height) //context copy { int i; for( i=0; i< height; i++) dst[i] = src[i]; // assert(i<CALL_PATH_LEN_DEVICE && "code: e56: call stack too deep"); dst[i].id = -1; //to mark the ending of one context return; } __device__ bool cxtcmp( CallSite_t* dst, CallSite_t* src, int height) { for( int i=0; i< height; i++) if ( dst[i].id == src[i].id ) // && dst[i].id == src[i].id && continue; else return false; return true; } __device__ int getContextID(void* p_stackzone) { //shared by all treahds, there are races //you can manually to take care of serialization? // if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y != 0 ) return -2; //DEBUG int bid = blockIdx.x + blockIdx.y * gridDim.x; int tid = threadIdx.y * blockDim.x + threadIdx.x; // int offset = bid*blockDim.x*blockDim.y+tid; // CallSite_t* callStack = (CallSite_t*) (&(globalCallStack[offset*CALL_PATH_LEN_DEVICE])); // int &height = stackHeight[offset]; CallSite_t* callStack = (CallSite_t*) p_stackzone; int bytesPerThread = (CALL_PATH_LEN_DEVICE*sizeof(CallSite_t)); int* temp = (int*)( (char*)p_stackzone + bytesPerThread+16); //offset by 8 to be safe, need to be consistent int &height = *temp; if ( height ==0) //it is possible that call stack is still empty return -1; if (cHeight==0)// the first ever context in the dic { // if (CALLPATHVERBOSE) // printf("d::: the very first context in dic, depth=%d\n", height); cxtcpy(contextDic[0], callStack, height ); cHeight=1; return 0; } // something already exists // if (CALLPATHVERBOSE) // { // printf("d::: going to match existing items in context dic\n"); // printf("d::: number of existing contexts: %d\n", cHeight); // } int i; for (i = 0; i<cHeight; i++) { if ( cxtcmp( contextDic[i], callStack, height ) ) //yes, found { // if (CALLPATHVERBOSE) // printf("d::: matched, returning %d, depth=%d\n",i, height); return i; } } // if (CALLPATHVERBOSE) // printf("d::: not found, value of i: %d\n", i); // assert (i< TOTAL_NUMBER_CONTEXT && "code:e34: Not enough space for Context Dic, index i"); // printCallStack(); cxtcpy(contextDic[i], callStack, height ); cHeight = i+1; // assert (cHeight < TOTAL_NUMBER_CONTEXT && "code:e41: Not enough space for Context Dic, cHeight"); // if (CALLPATHVERBOSE) // printf("d::: inserted new one: id = %d, depth=%d\n", i, height); return i; } __device__ void passBasicBlock(int tmp /*pointer to block name*/, int action, int sline, int scolm, void* p_stackzone) { if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return; int map = __ballot(1); int numActive = __popc(map); if ( buffer_oN_DeViCe == NULL) return; if (numActive==32) { //then choose one thread to write numbers int tid = threadIdx.x + threadIdx.y *blockDim.x; if (tid%32==0) { //do the writing // printf("I will write for my warp tid=(%d, %d)\n", threadIdx.x, threadIdx.y); int bid = atomicAdd(&bbccnntt, 1); unsigned long long key=0; BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe; bblog[bid].key = key; bblog[bid].tidx = (short)threadIdx.x; bblog[bid].tidy = (short)threadIdx.y; bblog[bid].bidx = (short)blockIdx.x; bblog[bid].bidy = (short)blockIdx.y; bblog[bid].sline = sline; bblog[bid].scolm = scolm; bblog[bid].cid = getContextID(p_stackzone); } } else { //every thread needs to write //printf("I will write for my self tid=(%d, %d)\n", threadIdx.x, threadIdx.y); int bid = atomicAdd(&bbccnntt, 1); unsigned long long key=0; BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe; bblog[bid].key = key; bblog[bid].tidx = (short)threadIdx.x; bblog[bid].tidy = (short)threadIdx.y; bblog[bid].bidx = (short)blockIdx.x; bblog[bid].bidy = (short)blockIdx.y; bblog[bid].sline = sline; bblog[bid].scolm = scolm; bblog[bid].cid = getContextID(p_stackzone); } return; } /* __device__ void passBasicBlock(int tmp , int action, int sline, int scolm, void* p_stackzone) //__device__ void passBasicBlock(void* p , int action, int sline, int scolm, void* p_stackzone) { if ( buffer_oN_DeViCe == NULL) return; assert ( (bbccnntt < BUFFERSIZE/24 - 128) && "code: e317: too many entries to the buffer" ); //DO NOT COMMENT OUT if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return; // if (threadIdx.x + blockIdx.x + threadIdx.y + blockIdx.y == 0) // getFuncID( (char*)p); //DEBUG // printf("d: basic block: %s \ttid: (%d, %d)\n", str, threadIdx.x, threadIdx.y) ; int bid = atomicAdd(&bbccnntt, 1); if (bid > BUFFERSIZE/sizeof(BBlog_t) - 128) //overflow protection return; // for(int i=0; *(str+i) != 0; i++) // { // printf("%c", *(str+i) ); // } // printf("\n"); unsigned long long key=0; int cnt = 0; long long factor = 1; char* str = (char*)p; for(int i=0; *(str+i) != 0; i++) { int ascii = (int)(*(str+i)) ; if (ascii<48 || ascii > 123) continue; key += ascii*factor; factor *= CONSTANCE; // printf("%d\t", (int)(*(str+i)) ); // printf("key of %s is \t %llu\n", str, key); } // printf("key of %s is \t %llu\n", str, key); // printf("\n"); BBlog_t* bblog = (BBlog_t*) buffer_oN_DeViCe; // bblog[bid].key = key; bblog[bid].tidx = (short)threadIdx.x; bblog[bid].tidy = (short)threadIdx.y; bblog[bid].bidx = (short)blockIdx.x; bblog[bid].bidy = (short)blockIdx.y; bblog[bid].sline = sline; bblog[bid].scolm = scolm; // bblog[bid].cid = getContextID(p_stackzone); //printf("d:: context ID: %d\n", bblog[bid].cid); // if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 ) // printf("d:: context ID= %d\n", bblog[bid].cid); return; } */ __device__ void storeLines(void* p, short size/*bytes*/, short line, short colmn, short op /*load or store*/, void* p_stackzone) { if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return; int map = __ballot(1); int numActive = __popc(map); if ( ccnntt > (int)(((long)BUFFERSIZE)/24) - 128*100) return; //DEBUG assert ( (ccnntt < BUFFERSIZE/24 - 128) && "code: e31: too many entries to the buffer"); //DO NOT COMMENT OUT int bid = atomicAdd(&ccnntt, 1); //d_trace[bid].bidx = blockIdx.x; //d_trace[bid].tidx = threadIdx.x; //d_trace[bid].ea = p; //d_trace[bid].bytes = size; //printf(" d : bid = %d from (%d,%d) (%d,%d) \n", bid, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y); if (buffer_oN_DeViCe==NULL) return; if( true) { int tid = threadIdx.x + threadIdx.y *blockDim.x; if ( tid%32==0 || true) { short* buffer_oN_DeViCe_short = (short*) buffer_oN_DeViCe; long* buffer_oN_DeViCe_long = (long*) buffer_oN_DeViCe; buffer_oN_DeViCe_short[bid*12+0] = (short)blockIdx.x; buffer_oN_DeViCe_short[bid*12+1] = (short)blockIdx.y; buffer_oN_DeViCe_short[bid*12+2] = (short)threadIdx.x; buffer_oN_DeViCe_short[bid*12+3] = (short)threadIdx.y; buffer_oN_DeViCe_long[bid*3+1] = (long)p; buffer_oN_DeViCe_short[bid*12+8] = size; buffer_oN_DeViCe_short[bid*12+9] = line; buffer_oN_DeViCe_short[bid*12+10] = colmn; buffer_oN_DeViCe_short[bid*12+11] = op; getContextID(p_stackzone); } } } /* __device__ void dumpLines(void) { if (threadIdx.x != 0 || blockIdx.x != 0 || threadIdx.y != 0 || blockIdx.y != 0) return; int ii; for(ii=1; ii< ccnntt; ii=ii+6) { // printf("d: %d Bytes at %p by (%d, %d)\n", buffer_oN_DeViCe[ii*6+4], buffer_oN_DeViCe[ii*6+2], buffer_oN_DeViCe[ii*6], buffer_oN_DeViCe[ii*6+1] ); } // printf("\n" ); // const char* ss = "this is the end"; // void *ps = ss; // takeString(ps); //Or, this also works. // char s[200] = "this is the end"; void *ps = &(s); takeString(s); // printf("try mmap\n" ); // void* ptr = mmap(NULL, 1000, PROT_READ | PROT_WRITE| PROT_EXEC, MAP_SHARED, -1, 0); // printf("%p\n",ptr ); } */ __device__ void print1(int a) { if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0) printf("d: print1: %d\n", a); return; if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE) { if (a==1) printf("d: load by CTA (%d,%d)\n", blockIdx.x, blockIdx.y); else if (a==2) printf("d: store by CTA (%d,%d)\n", blockIdx.x, blockIdx.y); else printf("d: !!! undefined !!! \n" ); } } /* __device__ void print2() { if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE) printf("d: store by CTA (%d,%d)\n", blockIdx.x, blockIdx.y); } */ __device__ void print3(int line, int col) { return; if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE) printf("d: source line: %d\t column: %d by CTA (%d,%d)\n", line, col, blockIdx.x, blockIdx.y); } __device__ void print4(void* p) { //if (threadIdx.x + threadIdx.y + blockIdx.x + blockIdx.y == 0 && VERBOSE) printf("d: print4: %p\n", p); } __device__ void print5(void* p, int bits, int sline, int scolm, int op, void* p_stackzone) { // if ( (blockIdx.x + blockIdx.y* gridDim.x) * (blockDim.x * blockDim.y) >= 32*128) // no more than 128 warps // return; // printf("d: ea: %p by (%d,%d) (%d,%d), CTA id = %d\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y , (blockIdx.x + blockIdx.y* gridDim.x)); if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return; storeLines(p, (short)(bits/8), (short)sline, (short) scolm, (short)op, p_stackzone); // printf("d: ea: %p by (%d,%d) (%d,%d), CTA id = %d\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y , (blockIdx.x + blockIdx.y* gridDim.x)); // printf("d: ea: %p by (%d,%d) (%d,%d)\n",p, blockIdx.x, threadIdx.x, blockIdx.y, threadIdx.y ); } //// __device__ void RetKernel(void* p_stackzone) { if ( (blockIdx.x + blockIdx.y*gridDim.x) < CTALB || (blockIdx.x + blockIdx.y*gridDim.x) > CTAUB) // you only need a few CTAs return; int bid = blockIdx.x + blockIdx.y * gridDim.x; int tid = threadIdx.x + threadIdx.y *blockDim.x; __syncthreads(); //IMPORTANT to sync here int rank = -1; if ( tid == 0) { // printf(" CTA\t%d\treleases:\t%p\n", bid, stackzone); // atomicAdd( &alive, -1); if (p_stackzone!=NULL) { free(p_stackzone); rank = atomicAdd( &aliveCTA, -1); printf("CTA\t%d\texits, total remains\t%d\n", bid, rank); } else printf("d:: p_stack is hacked!!\n"); } __syncthreads(); if (threadIdx.x + threadIdx.y == 0 && rank ==1 ) { printf("d: in RetKernel...\n"); // for (int kk=0; kk< cHeight; kk++) // cxtprint( kk ); if (true) { //memory short* buffer_oN_DeViCe_short = (short*) buffer_oN_DeViCe; buffer_oN_DeViCe_short[0+0] = blockDim.x; // Be consistent with print.cpp, dumpTrace() buffer_oN_DeViCe_short[0+1] = blockDim.y; buffer_oN_DeViCe_short[0+2] = gridDim.x; buffer_oN_DeViCe_short[0+3] = gridDim.y; printf("d: Kernel Returns: collected [ %llu ] memory entries. \n" , ccnntt); printf("d: Kernel Returns: collected [ %llu ] memory entries. \n" , bbccnntt); long* buffer_oN_DeViCe_long = (long*) buffer_oN_DeViCe; buffer_oN_DeViCe_long[0+1] = ccnntt; } else { //branch BBlog_t* bbbuffer_oN_DeViCe_short = (BBlog_t*) buffer_oN_DeViCe; bbbuffer_oN_DeViCe_short[0].bidx = blockDim.x; // Be consistent with print.cpp, dumpTrace() bbbuffer_oN_DeViCe_short[0].bidy = blockDim.y; bbbuffer_oN_DeViCe_short[0].tidx = gridDim.x; bbbuffer_oN_DeViCe_short[0].tidy = gridDim.y; bbbuffer_oN_DeViCe_short[0].key = bbccnntt; bbbuffer_oN_DeViCe_short[0].sline = 0; bbbuffer_oN_DeViCe_short[0].scolm = 0; printf("d: Kernel Returns: collected [ %llu ] BB logs. \n" , bbccnntt); printf("d: Kernel Returns: collected [ %llu ] BB logs. \n" , ccnntt); } unsigned long offset1 = ((UNIQUE_FUNC_DEVICE* FUNC_NAME_LEN*sizeof(char))/1024+1)*1024; unsigned long offset2 = ((TOTAL_NUMBER_CONTEXT * CALL_PATH_LEN_DEVICE* sizeof(CallSite_t))/1024+1)*1024 + offset1; printf("size of function dic: %d %d %lu -> %lu , rounded to %lu\n", UNIQUE_FUNC_DEVICE, FUNC_NAME_LEN, sizeof(char), UNIQUE_FUNC_DEVICE*FUNC_NAME_LEN*sizeof(char), offset1 ); printf("size of context dic: %d %d %lu -> %lu , rounded to %lu\n", TOTAL_NUMBER_CONTEXT, CALL_PATH_LEN_DEVICE, sizeof(CallSite_t), TOTAL_NUMBER_CONTEXT* CALL_PATH_LEN_DEVICE* sizeof(CallSite_t) , offset2); //function dic is the last, //context dic is second to last void* ptr; ptr = (void*)( buffer_oN_DeViCe + (BUFFERSIZE - offset1)/sizeof(int)) ; //operate on a int*, not a void* memcpy( ptr, funcDic, UNIQUE_FUNC_DEVICE *FUNC_NAME_LEN*sizeof(char) ); ptr = (void*)(buffer_oN_DeViCe + (BUFFERSIZE - offset2)/sizeof(int)) ; //operate on a int*, not a void* memcpy( ptr, contextDic, TOTAL_NUMBER_CONTEXT * CALL_PATH_LEN_DEVICE*sizeof(CallSite_t) ); /* BBlog_t* tmpbb = (BBlog_t*) buffer_oN_DeViCe; for (int i=1; i<bbccnntt; i++) { printf(" %d\t", tmpbb[i].bidx); printf(" %d\t", tmpbb[i].bidy); printf(" %d\t", tmpbb[i].tidx); printf(" %d\t", tmpbb[i].tidy); printf(" %llu\t", tmpbb[i].key); printf(" %d\t", tmpbb[i].sline); printf(" %d\t", tmpbb[i].scolm); printf("\n"); } */ /* Entry_t* tp = (Entry_t*) buffer_oN_DeViCe; int i; for (i=1; i<ccnntt; i++) { printf(" d: bid (%d,%d) \ttid (%d,%d) \t%p\t%d,%d\t%d\n", tp[i].bidx, tp[i].bidy, tp[i].tidx, tp[i].tidy, tp[i].ea, tp[i].sline, tp[i].scolm, tp[i].op); } */ ccnntt = 1; //reset, prepares for next kernel call bbccnntt = 1; //reset, prepares for next kernel call }//end of if }
5f46323f099c69452a1c44802e1f9619ba1d953b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* FLUIDS v.3 - SPH Fluid Simulator for CPU and GPU Copyright (C) 2012. Rama Hoetzlein, http://fluids3.com Fluids-ZLib license (* see part 1 below) This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. Acknowledgement of the original author is required if you publish this in a paper, or use it in a product. (See fluids3.com for details) 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #define CUDA_KERNEL #include "fluid_system_kern.cuh" #include "cutil_math.h" #include "radixsort.cu" // Build in RadixSort __device__ FluidParams simData; __device__ uint gridActive; __global__ void insertParticles ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; register float3 gridMin = simData.gridMin; register float3 gridDelta = simData.gridDelta; register int3 gridRes = simData.gridRes; register int3 gridScan = simData.gridScanMax; register float poff = simData.psmoothradius / simData.psimscale; register int gs; register float3 gcf; register int3 gc; gcf = (buf.mpos[i] - gridMin) * gridDelta; gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); gs = (gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; if ( gc.x >= 1 && gc.x <= gridScan.x && gc.y >= 1 && gc.y <= gridScan.y && gc.z >= 1 && gc.z <= gridScan.z ) { buf.mgcell[i] = gs; // Grid cell insert. buf.mgndx[i] = atomicAdd ( &buf.mgridcnt[ gs ], 1 ); // Grid counts. gcf = (-make_float3(poff,poff,poff) + buf.mpos[i] - gridMin) * gridDelta; gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); gs = ( gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; //buf.mcluster[i] = gs; -- make sure it is allocated! } else { buf.mgcell[i] = GRID_UNDEF; //buf.mcluster[i] = GRID_UNDEF; -- make sure it is allocated! } } // the mutex variable __device__ int g_mutex = 0; // GPU simple synchronization function __device__ void __gpu_sync(int goalVal) { __threadfence (); // only thread 0 is used for synchronization if (threadIdx.x == 0) atomicAdd(&g_mutex, 1); // only when all blocks add 1 to g_mutex will // g_mutex equal to goalVal while(g_mutex < goalVal) { // infinite loop until g_mutx = goalVal } if ( blockIdx.x == 0 && threadIdx.x == 0 ) g_mutex = 0; __syncthreads(); } // countingSortInPlace -- GPU_SYNC DOES NOT WORK /*uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) { __gpu_sync ( 2 ); return; } register float3 ipos, ivel, iveleval, iforce; register float ipress, idens; register int icell, indx, iclr; icell = buf.mgcell [ i ]; indx = buf.mgndx [ i ]; int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset if ( icell == GRID_UNDEF ) { __gpu_sync ( 2 ); return; } ipos = buf.mpos [ i ]; ivel = buf.mvel [ i ]; iveleval = buf.mveleval [ i ]; iforce = buf.mforce [ i ]; ipress = buf.mpress [ i ]; idens = buf.mdensity [ i ]; iclr = buf.mclr [ i ]; __gpu_sync ( 2 ) ; //threadfence(); // make sure every thread in all blocks has their data buf.mpos [ sort_ndx ] = ipos; buf.mvel [ sort_ndx ] = ivel; buf.mveleval [ sort_ndx ] = iveleval; buf.mforce [ sort_ndx ] = iforce; buf.mpress [ sort_ndx ] = ipress; buf.mdensity [ sort_ndx ] = idens; buf.mclr [ sort_ndx ] = iclr;*/ // Counting Sort - Index __global__ void countingSortIndex ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; uint icell = buf.mgcell[i]; uint indx = buf.mgndx[i]; int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset if ( icell != GRID_UNDEF ) { buf.mgrid[ sort_ndx ] = i; // index sort, grid refers to original particle order } } // Counting Sort - Full (deep copy) __global__ void countingSortFull ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; uint icell = *(uint*) (buf.msortbuf + pnum*BUF_GCELL + i*sizeof(uint) ); uint indx = *(uint*) (buf.msortbuf + pnum*BUF_GNDX + i*sizeof(uint) ); int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset if ( icell != GRID_UNDEF ) { buf.mgrid[ sort_ndx ] = sort_ndx; // full sort, grid indexing becomes identity char* bpos = buf.msortbuf + i*sizeof(float3); buf.mpos[ sort_ndx ] = *(float3*) (bpos); buf.mvel[ sort_ndx ] = *(float3*) (bpos + pnum*BUF_VEL ); buf.mveleval[ sort_ndx ] = *(float3*) (bpos + pnum*BUF_VELEVAL ); buf.mforce[ sort_ndx ] = *(float3*) (bpos + pnum*BUF_FORCE ); buf.mpress[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_PRESS + i*sizeof(float) ); buf.mdensity[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_DENS + i*sizeof(float) ); buf.mclr[ sort_ndx ] = *(uint*) (buf.msortbuf + pnum*BUF_CLR+ i*sizeof(uint) ); // ((uint) 255)<<24; -- dark matter buf.mgcell[ sort_ndx ] = icell; buf.mgndx[ sort_ndx ] = indx; } } __global__ void countActiveCells ( bufList buf, int pnum ) { if ( threadIdx.x == 0 ) { // use only one processor gridActive = -1; int last_ndx = buf.mgridoff [ simData.gridTotal-1 ] + buf.mgridcnt[ simData.gridTotal-1 ] - 1; int last_p = buf.mgrid[ last_ndx ]; int last_cell = buf.mgcell[ last_p ]; int first_p = buf.mgrid[ 0 ]; int first_cell = buf.mgcell[ first_p ] ; int id, cell, cnt = 0, curr = 0; cell = first_cell; while ( cell < last_cell ) { buf.mgridactive[ cnt ] = cell; // add cell to active list cnt++; curr += buf.mgridcnt[cell]; // advance to next active cell // id = buf.mgrid[curr]; // get particle id -- when unsorted only cell = buf.mgcell [ curr ]; // get cell we are in -- use id when unsorted } gridActive = cnt; } __syncthreads(); } __device__ float contributePressure ( int i, float3 p, int cell, bufList buf ) { float3 dist; float dsq, c, sum; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2/d2; int j; sum = 0.0; if ( buf.mgridcnt[cell] == 0 ) return 0.0; int cfirst = buf.mgridoff[ cell ]; int clast = cfirst + buf.mgridcnt[ cell ]; for ( int cndx = cfirst; cndx < clast; cndx++ ) { dist = p - buf.mpos[ buf.mgrid[cndx] ]; dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq < r2 && dsq > 0.0) { c = (r2 - dsq)*d2; sum += c * c * c; } } return sum; } __global__ void computePressure ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Pressures float3 pos = buf.mpos[ i ]; float sum = 0.0; for (int c=0; c < simData.gridAdjCnt; c++) { sum += contributePressure ( i, pos, gc + simData.gridAdj[c], buf ); __syncthreads(); } // Compute Density & Pressure sum = sum * simData.pmass * simData.poly6kern; if ( sum == 0.0 ) sum = 1.0; buf.mpress[ i ] = ( sum - simData.prest_dens ) * simData.pintstiff; buf.mdensity[ i ] = 1.0f / sum; } /*FindNeighbors int cid = blockIdx.x * blockSize.x + blockIdx.y; // cluster id int pid = threadIdx.x; // 0 to 85 (max particles per cell) __shared__ Particle clist[ 85 ]; __shared__ Particle plist[ 85*8 ]; if ( pid < clusterCnt[cid] ) clist [ pid ] = particles [ clusterNdx[cid] + pid ]; for ( gid = 0; gid < 8; gid++ ) { if ( pid < gridCnt[ cid + group[gid] ] ) plist [ cid*CELL_CNT + pid ] = particles [ sortNdx[ cid + group[gid] ] + pid ]; } __syncthreads(); for ( int j = 0; j < cellcnt; j++ ) { dst = plist[ pid ] - plist[ j ]; if ( dst < R2 ) { ... } }*/ /*grid block <gx, gy, gz> <1, 32, 64> 256, 256, 256 total: */ #define LOCAL_PMAX 896 #define NUM_CELL 27 #define LAST_CELL 26 #define CENTER_CELL 13 __global__ void computePressureGroup ( bufList buf, int pnum ) { __shared__ float3 cpos[ LOCAL_PMAX ]; __shared__ int ncnt[ NUM_CELL ]; __shared__ int ngridoff[ NUM_CELL ]; __shared__ int noff[ NUM_CELL ]; int bid = __mul24( blockIdx.y, gridDim.x ) + blockIdx.x; if ( bid > gridActive ) return; // block must be in a valid grid uint cell = buf.mgridactive [ bid ]; // get grid cell (from blockID 1:1) register int i = -1; register float3 ipos; uint ndx = threadIdx.x; if ( ndx < buf.mgridcnt[cell] ) { i = buf.mgridoff[cell] + ndx; // particle id to process ipos = buf.mpos[ i ]; } int gid = threadIdx.x; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2 / d2; register float3 dist; register float c, dsq, sum; int neighbor; // copy neighbor cell counts to shared mem if ( gid < NUM_CELL ) { int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; neighbor = cell - nadj + simData.gridAdj[gid]; // neighbor cell id ncnt[gid] = buf.mgridcnt [ neighbor ]; ngridoff[gid] = buf.mgridoff [ neighbor ]; } __syncthreads (); if ( gid == 0 ) { // compute neighbor local ndx (as prefix sum) int nsum = 0; for (int z=0; z < NUM_CELL; z++) { // 27-step prefix sum noff[z] = nsum; nsum += ncnt[z]; } } __syncthreads (); // copy particles into shared memory if ( gid < NUM_CELL ) { for (int j=0; j < ncnt[gid]; j++ ) { neighbor = buf.mgrid [ ngridoff[gid] + j ]; // neighbor particle id ndx = noff[ gid ] + j; cpos[ ndx ] = buf.mpos [ neighbor ]; } } __syncthreads (); // compute pressure for current particle if ( i == -1 ) return; int jnum = noff[LAST_CELL] + ncnt[LAST_CELL]; sum = 0.0; for (int j = 0; j < jnum; j++) { dist = ipos - cpos[ j ]; dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq > 0.0 && dsq < r2 ) { c = (r2 - dsq)*d2; sum += c * c * c; } } __syncthreads (); // put result into global mem sum = sum * simData.pmass * simData.poly6kern; if ( sum == 0.0 ) sum = 1.0; buf.mpress[ i ] = ( sum - simData.prest_dens ) * simData.pintstiff; buf.mdensity[ i ] = 1.0f / sum; } __device__ float3 contributeForce ( int i, float3 ipos, float3 iveleval, float ipress, float idens, int cell, bufList buf ) { float dsq, c, sum; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2/d2; float3 dist, force; float pterm, dterm, vterm; int j; if ( buf.mgridcnt[cell] == 0 ) return make_float3(0,0,0); int cfirst = buf.mgridoff[ cell ]; int clast = cfirst + buf.mgridcnt[ cell ]; force = make_float3(0,0,0); vterm = simData.lapkern * simData.pvisc; for ( int cndx = cfirst; cndx < clast; cndx++ ) { j = buf.mgrid[ cndx ]; dist = ( ipos - buf.mpos[ j ] ); // dist in cm dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq < r2 && dsq > 0) { dsq = sqrt(dsq*d2); c = ( simData.psmoothradius - dsq ); pterm = simData.psimscale * -0.5f * c * simData.spikykern * ( ipress + buf.mpress[ j ] ) / dsq; dterm = c * idens * (buf.mdensity[ j ] ); force += ( pterm * dist + vterm * ( buf.mveleval[ j ] - iveleval )) * dterm; } } return force; } __global__ void computeForce ( bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Pressures register float3 ipos = buf.mpos[ i ]; register float3 iveleval = buf.mveleval[ i ]; register float ipress = buf.mpress[ i ]; register float idens = buf.mdensity[ i ]; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2/d2; register float srad = simData.psmoothradius; register float vterm = simData.lapkern * simData.pvisc; register float spiky = simData.spikykern; register float ss = simData.psimscale; register float dsq, pterm, dterm; register float3 dist, force; register int j, cell, cfirst, clast;; force = make_float3(0,0,0); for (int c=0; c < simData.gridAdjCnt; c++) { force += contributeForce ( i, ipos, iveleval, ipress, idens, gc + simData.gridAdj[c], buf ); /*cell = gc + simData.gridAdj[c]; cfirst = buf.mgridoff[ cell ]; clast = cfirst + buf.mgridcnt[ cell ]; for ( int cndx = cfirst; cndx < clast; cndx++ ) { dist = ( ipos - buf.mpos_sort[cndx] ); // dist in cm dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq < r2 && dsq > 0) { j = buf.mgrid[ cndx ]; dsq = sqrt(dsq*d2); c = ( srad - dsq ); pterm = ss * -0.5f * c * spiky * ( ipress + buf.mpress[j] ) / dsq; dterm = c * idens * (buf.mdensity[j] ); //force += ( pterm * dist + vterm * ( buf.mveleval[j] - iveleval )) * dterm; } } __syncthreads ();*/ } buf.mforce[ i ] = force; } /*__global__ void computeForceNbr ( char* bufPnts, int* bufGrid, int numPnt ) { uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( ndx >= numPnt ) return; char* ioffs = bufPnts + __mul24(ndx, simData.stride ); float3 ipos = *(float3*) (ioffs + OFFSET_POS); float3 ivelval = *(float3*) (ioffs + OFFSET_VELEVAL); float press = *(float*) (ioffs + OFFSET_PRESS); float dens = *(float*) (ioffs + OFFSET_DENS); int icnt = *(int*) (ioffs + OFFSET_NBRCNT); char* joffs; float3 jpos, jveleval; float3 dist, force; float c, ndistj, pterm, dterm, vterm; vterm = simData.lapkern * simData.visc; force = make_float3(0,0,0); for (int nbr=0; nbr < icnt; nbr++) { // base 1, n[0] = count ndistj = bufNdist[ndx][nbr]; joffs = bufPnts + __mul24(bufNeighbor[ndx][nbr], simData.stride); jpos = *(float3*) (joffs + OFFSET_POS); jveleval = *(float3*) (joffs + OFFSET_VELEVAL); c = ( simData.smooth_rad - ndistj ); dist.x = ( ipos.x - jpos.x ); // dist in cm dist.y = ( ipos.y - jpos.y ); dist.z = ( ipos.z - jpos.z ); pterm = simData.sim_scale * -0.5f * c * simData.spikykern * ( press + *(float*)(joffs+OFFSET_PRESS) ) / ndistj; dterm = c * dens * *(float*)(joffs+OFFSET_DENS); force.x += ( pterm * dist.x + vterm * ( jveleval.x - ivelval.x )) * dterm; force.y += ( pterm * dist.y + vterm * ( jveleval.y - ivelval.y )) * dterm; force.z += ( pterm * dist.z + vterm * ( jveleval.z - ivelval.z )) * dterm; } *(float3*) ( ioffs + OFFSET_FORCE ) = force; }*/ __global__ void advanceParticles ( float time, float dt, float ss, bufList buf, int numPnts ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= numPnts ) return; if ( buf.mgcell[i] == GRID_UNDEF ) { buf.mpos[i] = make_float3(-1000,-1000,-1000); buf.mvel[i] = make_float3(0,0,0); return; } // Get particle vars register float3 accel, norm; register float diff, adj, speed; register float3 pos = buf.mpos[i]; register float3 veval = buf.mveleval[i]; // Leapfrog integration accel = buf.mforce[i]; accel *= simData.pmass; // Boundaries // Y-axis diff = simData.pradius - (pos.y - (simData.pboundmin.y + (pos.x-simData.pboundmin.x)*simData.pground_slope )) * ss; if ( diff > EPSILON ) { norm = make_float3( -simData.pground_slope, 1.0 - simData.pground_slope, 0); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = simData.pradius - ( simData.pboundmax.y - pos.y )*ss; if ( diff > EPSILON ) { norm = make_float3(0, -1, 0); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // X-axis diff = simData.pradius - (pos.x - (simData.pboundmin.x + (sin(time*simData.pforce_freq)+1)*0.5 * simData.pforce_min))*ss; if ( diff > EPSILON ) { norm = make_float3( 1, 0, 0); adj = (simData.pforce_min+1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = simData.pradius - ( (simData.pboundmax.x - (sin(time*simData.pforce_freq)+1)*0.5*simData.pforce_max) - pos.x)*ss; if ( diff > EPSILON ) { norm = make_float3(-1, 0, 0); adj = (simData.pforce_max+1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // Z-axis diff = simData.pradius - (pos.z - simData.pboundmin.z ) * ss; if ( diff > EPSILON ) { norm = make_float3( 0, 0, 1 ); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = simData.pradius - ( simData.pboundmax.z - pos.z )*ss; if ( diff > EPSILON ) { norm = make_float3( 0, 0, -1 ); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // Gravity accel += simData.pgravity; // Accel Limit speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z; if ( speed > simData.AL2 ) { accel *= simData.AL / sqrt(speed); } // Velocity Limit float3 vel = buf.mvel[i]; speed = vel.x*vel.x + vel.y*vel.y + vel.z*vel.z; if ( speed > simData.VL2 ) { speed = simData.VL2; vel *= simData.VL / sqrt(speed); } // Ocean colors if ( speed > simData.VL2*0.2) { adj = simData.VL2*0.2; buf.mclr[i] += (( buf.mclr[i] & 0xFF) < 0xFD ) ? +0x00000002 : 0; // decrement R by one buf.mclr[i] += (( (buf.mclr[i]>>8) & 0xFF) < 0xFD ) ? +0x00000200 : 0; // decrement G by one buf.mclr[i] += (( (buf.mclr[i]>>16) & 0xFF) < 0xFD ) ? +0x00020000 : 0; // decrement G by one } if ( speed < 0.03 ) { int v = int(speed/.01)+1; buf.mclr[i] += (( buf.mclr[i] & 0xFF) > 0x80 ) ? -0x00000001 * v : 0; // decrement R by one buf.mclr[i] += (( (buf.mclr[i]>>8) & 0xFF) > 0x80 ) ? -0x00000100 * v : 0; // decrement G by one } //-- surface particle density //buf.mclr[i] = buf.mclr[i] & 0x00FFFFFF; //if ( buf.mdensity[i] > 0.0014 ) buf.mclr[i] += 0xAA000000; // Leap-frog Integration float3 vnext = accel*dt + vel; // v(t+1/2) = v(t-1/2) + a(t) dt buf.mveleval[i] = (vel + vnext) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 buf.mvel[i] = vnext; buf.mpos[i] += vnext * (dt/ss); // p(t+1) = p(t) + v(t+1/2) dt }
5f46323f099c69452a1c44802e1f9619ba1d953b.cu
/* FLUIDS v.3 - SPH Fluid Simulator for CPU and GPU Copyright (C) 2012. Rama Hoetzlein, http://fluids3.com Fluids-ZLib license (* see part 1 below) This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. Acknowledgement of the original author is required if you publish this in a paper, or use it in a product. (See fluids3.com for details) 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ #define CUDA_KERNEL #include "fluid_system_kern.cuh" #include "cutil_math.h" #include "radixsort.cu" // Build in RadixSort __device__ FluidParams simData; __device__ uint gridActive; __global__ void insertParticles ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; register float3 gridMin = simData.gridMin; register float3 gridDelta = simData.gridDelta; register int3 gridRes = simData.gridRes; register int3 gridScan = simData.gridScanMax; register float poff = simData.psmoothradius / simData.psimscale; register int gs; register float3 gcf; register int3 gc; gcf = (buf.mpos[i] - gridMin) * gridDelta; gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); gs = (gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; if ( gc.x >= 1 && gc.x <= gridScan.x && gc.y >= 1 && gc.y <= gridScan.y && gc.z >= 1 && gc.z <= gridScan.z ) { buf.mgcell[i] = gs; // Grid cell insert. buf.mgndx[i] = atomicAdd ( &buf.mgridcnt[ gs ], 1 ); // Grid counts. gcf = (-make_float3(poff,poff,poff) + buf.mpos[i] - gridMin) * gridDelta; gc = make_int3( int(gcf.x), int(gcf.y), int(gcf.z) ); gs = ( gc.y * gridRes.z + gc.z)*gridRes.x + gc.x; //buf.mcluster[i] = gs; -- make sure it is allocated! } else { buf.mgcell[i] = GRID_UNDEF; //buf.mcluster[i] = GRID_UNDEF; -- make sure it is allocated! } } // the mutex variable __device__ int g_mutex = 0; // GPU simple synchronization function __device__ void __gpu_sync(int goalVal) { __threadfence (); // only thread 0 is used for synchronization if (threadIdx.x == 0) atomicAdd(&g_mutex, 1); // only when all blocks add 1 to g_mutex will // g_mutex equal to goalVal while(g_mutex < goalVal) { // infinite loop until g_mutx = goalVal } if ( blockIdx.x == 0 && threadIdx.x == 0 ) g_mutex = 0; __syncthreads(); } // countingSortInPlace -- GPU_SYNC DOES NOT WORK /*uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) { __gpu_sync ( 2 ); return; } register float3 ipos, ivel, iveleval, iforce; register float ipress, idens; register int icell, indx, iclr; icell = buf.mgcell [ i ]; indx = buf.mgndx [ i ]; int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset if ( icell == GRID_UNDEF ) { __gpu_sync ( 2 ); return; } ipos = buf.mpos [ i ]; ivel = buf.mvel [ i ]; iveleval = buf.mveleval [ i ]; iforce = buf.mforce [ i ]; ipress = buf.mpress [ i ]; idens = buf.mdensity [ i ]; iclr = buf.mclr [ i ]; __gpu_sync ( 2 ) ; //threadfence(); // make sure every thread in all blocks has their data buf.mpos [ sort_ndx ] = ipos; buf.mvel [ sort_ndx ] = ivel; buf.mveleval [ sort_ndx ] = iveleval; buf.mforce [ sort_ndx ] = iforce; buf.mpress [ sort_ndx ] = ipress; buf.mdensity [ sort_ndx ] = idens; buf.mclr [ sort_ndx ] = iclr;*/ // Counting Sort - Index __global__ void countingSortIndex ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; uint icell = buf.mgcell[i]; uint indx = buf.mgndx[i]; int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset if ( icell != GRID_UNDEF ) { buf.mgrid[ sort_ndx ] = i; // index sort, grid refers to original particle order } } // Counting Sort - Full (deep copy) __global__ void countingSortFull ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; uint icell = *(uint*) (buf.msortbuf + pnum*BUF_GCELL + i*sizeof(uint) ); uint indx = *(uint*) (buf.msortbuf + pnum*BUF_GNDX + i*sizeof(uint) ); int sort_ndx = buf.mgridoff[ icell ] + indx; // global_ndx = grid_cell_offet + particle_offset if ( icell != GRID_UNDEF ) { buf.mgrid[ sort_ndx ] = sort_ndx; // full sort, grid indexing becomes identity char* bpos = buf.msortbuf + i*sizeof(float3); buf.mpos[ sort_ndx ] = *(float3*) (bpos); buf.mvel[ sort_ndx ] = *(float3*) (bpos + pnum*BUF_VEL ); buf.mveleval[ sort_ndx ] = *(float3*) (bpos + pnum*BUF_VELEVAL ); buf.mforce[ sort_ndx ] = *(float3*) (bpos + pnum*BUF_FORCE ); buf.mpress[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_PRESS + i*sizeof(float) ); buf.mdensity[ sort_ndx ] = *(float*) (buf.msortbuf + pnum*BUF_DENS + i*sizeof(float) ); buf.mclr[ sort_ndx ] = *(uint*) (buf.msortbuf + pnum*BUF_CLR+ i*sizeof(uint) ); // ((uint) 255)<<24; -- dark matter buf.mgcell[ sort_ndx ] = icell; buf.mgndx[ sort_ndx ] = indx; } } __global__ void countActiveCells ( bufList buf, int pnum ) { if ( threadIdx.x == 0 ) { // use only one processor gridActive = -1; int last_ndx = buf.mgridoff [ simData.gridTotal-1 ] + buf.mgridcnt[ simData.gridTotal-1 ] - 1; int last_p = buf.mgrid[ last_ndx ]; int last_cell = buf.mgcell[ last_p ]; int first_p = buf.mgrid[ 0 ]; int first_cell = buf.mgcell[ first_p ] ; int id, cell, cnt = 0, curr = 0; cell = first_cell; while ( cell < last_cell ) { buf.mgridactive[ cnt ] = cell; // add cell to active list cnt++; curr += buf.mgridcnt[cell]; // advance to next active cell // id = buf.mgrid[curr]; // get particle id -- when unsorted only cell = buf.mgcell [ curr ]; // get cell we are in -- use id when unsorted } gridActive = cnt; } __syncthreads(); } __device__ float contributePressure ( int i, float3 p, int cell, bufList buf ) { float3 dist; float dsq, c, sum; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2/d2; int j; sum = 0.0; if ( buf.mgridcnt[cell] == 0 ) return 0.0; int cfirst = buf.mgridoff[ cell ]; int clast = cfirst + buf.mgridcnt[ cell ]; for ( int cndx = cfirst; cndx < clast; cndx++ ) { dist = p - buf.mpos[ buf.mgrid[cndx] ]; dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq < r2 && dsq > 0.0) { c = (r2 - dsq)*d2; sum += c * c * c; } } return sum; } __global__ void computePressure ( bufList buf, int pnum ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Pressures float3 pos = buf.mpos[ i ]; float sum = 0.0; for (int c=0; c < simData.gridAdjCnt; c++) { sum += contributePressure ( i, pos, gc + simData.gridAdj[c], buf ); __syncthreads(); } // Compute Density & Pressure sum = sum * simData.pmass * simData.poly6kern; if ( sum == 0.0 ) sum = 1.0; buf.mpress[ i ] = ( sum - simData.prest_dens ) * simData.pintstiff; buf.mdensity[ i ] = 1.0f / sum; } /*FindNeighbors int cid = blockIdx.x * blockSize.x + blockIdx.y; // cluster id int pid = threadIdx.x; // 0 to 85 (max particles per cell) __shared__ Particle clist[ 85 ]; __shared__ Particle plist[ 85*8 ]; if ( pid < clusterCnt[cid] ) clist [ pid ] = particles [ clusterNdx[cid] + pid ]; for ( gid = 0; gid < 8; gid++ ) { if ( pid < gridCnt[ cid + group[gid] ] ) plist [ cid*CELL_CNT + pid ] = particles [ sortNdx[ cid + group[gid] ] + pid ]; } __syncthreads(); for ( int j = 0; j < cellcnt; j++ ) { dst = plist[ pid ] - plist[ j ]; if ( dst < R2 ) { ... } }*/ /*grid block <gx, gy, gz> <1, 32, 64> 256, 256, 256 total: */ #define LOCAL_PMAX 896 #define NUM_CELL 27 #define LAST_CELL 26 #define CENTER_CELL 13 __global__ void computePressureGroup ( bufList buf, int pnum ) { __shared__ float3 cpos[ LOCAL_PMAX ]; __shared__ int ncnt[ NUM_CELL ]; __shared__ int ngridoff[ NUM_CELL ]; __shared__ int noff[ NUM_CELL ]; int bid = __mul24( blockIdx.y, gridDim.x ) + blockIdx.x; if ( bid > gridActive ) return; // block must be in a valid grid uint cell = buf.mgridactive [ bid ]; // get grid cell (from blockID 1:1) register int i = -1; register float3 ipos; uint ndx = threadIdx.x; if ( ndx < buf.mgridcnt[cell] ) { i = buf.mgridoff[cell] + ndx; // particle id to process ipos = buf.mpos[ i ]; } int gid = threadIdx.x; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2 / d2; register float3 dist; register float c, dsq, sum; int neighbor; // copy neighbor cell counts to shared mem if ( gid < NUM_CELL ) { int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; neighbor = cell - nadj + simData.gridAdj[gid]; // neighbor cell id ncnt[gid] = buf.mgridcnt [ neighbor ]; ngridoff[gid] = buf.mgridoff [ neighbor ]; } __syncthreads (); if ( gid == 0 ) { // compute neighbor local ndx (as prefix sum) int nsum = 0; for (int z=0; z < NUM_CELL; z++) { // 27-step prefix sum noff[z] = nsum; nsum += ncnt[z]; } } __syncthreads (); // copy particles into shared memory if ( gid < NUM_CELL ) { for (int j=0; j < ncnt[gid]; j++ ) { neighbor = buf.mgrid [ ngridoff[gid] + j ]; // neighbor particle id ndx = noff[ gid ] + j; cpos[ ndx ] = buf.mpos [ neighbor ]; } } __syncthreads (); // compute pressure for current particle if ( i == -1 ) return; int jnum = noff[LAST_CELL] + ncnt[LAST_CELL]; sum = 0.0; for (int j = 0; j < jnum; j++) { dist = ipos - cpos[ j ]; dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq > 0.0 && dsq < r2 ) { c = (r2 - dsq)*d2; sum += c * c * c; } } __syncthreads (); // put result into global mem sum = sum * simData.pmass * simData.poly6kern; if ( sum == 0.0 ) sum = 1.0; buf.mpress[ i ] = ( sum - simData.prest_dens ) * simData.pintstiff; buf.mdensity[ i ] = 1.0f / sum; } __device__ float3 contributeForce ( int i, float3 ipos, float3 iveleval, float ipress, float idens, int cell, bufList buf ) { float dsq, c, sum; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2/d2; float3 dist, force; float pterm, dterm, vterm; int j; if ( buf.mgridcnt[cell] == 0 ) return make_float3(0,0,0); int cfirst = buf.mgridoff[ cell ]; int clast = cfirst + buf.mgridcnt[ cell ]; force = make_float3(0,0,0); vterm = simData.lapkern * simData.pvisc; for ( int cndx = cfirst; cndx < clast; cndx++ ) { j = buf.mgrid[ cndx ]; dist = ( ipos - buf.mpos[ j ] ); // dist in cm dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq < r2 && dsq > 0) { dsq = sqrt(dsq*d2); c = ( simData.psmoothradius - dsq ); pterm = simData.psimscale * -0.5f * c * simData.spikykern * ( ipress + buf.mpress[ j ] ) / dsq; dterm = c * idens * (buf.mdensity[ j ] ); force += ( pterm * dist + vterm * ( buf.mveleval[ j ] - iveleval )) * dterm; } } return force; } __global__ void computeForce ( bufList buf, int pnum) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= pnum ) return; // Get search cell int nadj = (1*simData.gridRes.z + 1)*simData.gridRes.x + 1; uint gc = buf.mgcell[ i ]; if ( gc == GRID_UNDEF ) return; // particle out-of-range gc -= nadj; // Sum Pressures register float3 ipos = buf.mpos[ i ]; register float3 iveleval = buf.mveleval[ i ]; register float ipress = buf.mpress[ i ]; register float idens = buf.mdensity[ i ]; register float d2 = simData.psimscale * simData.psimscale; register float r2 = simData.r2/d2; register float srad = simData.psmoothradius; register float vterm = simData.lapkern * simData.pvisc; register float spiky = simData.spikykern; register float ss = simData.psimscale; register float dsq, pterm, dterm; register float3 dist, force; register int j, cell, cfirst, clast;; force = make_float3(0,0,0); for (int c=0; c < simData.gridAdjCnt; c++) { force += contributeForce ( i, ipos, iveleval, ipress, idens, gc + simData.gridAdj[c], buf ); /*cell = gc + simData.gridAdj[c]; cfirst = buf.mgridoff[ cell ]; clast = cfirst + buf.mgridcnt[ cell ]; for ( int cndx = cfirst; cndx < clast; cndx++ ) { dist = ( ipos - buf.mpos_sort[cndx] ); // dist in cm dsq = (dist.x*dist.x + dist.y*dist.y + dist.z*dist.z); if ( dsq < r2 && dsq > 0) { j = buf.mgrid[ cndx ]; dsq = sqrt(dsq*d2); c = ( srad - dsq ); pterm = ss * -0.5f * c * spiky * ( ipress + buf.mpress[j] ) / dsq; dterm = c * idens * (buf.mdensity[j] ); //force += ( pterm * dist + vterm * ( buf.mveleval[j] - iveleval )) * dterm; } } __syncthreads ();*/ } buf.mforce[ i ] = force; } /*__global__ void computeForceNbr ( char* bufPnts, int* bufGrid, int numPnt ) { uint ndx = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( ndx >= numPnt ) return; char* ioffs = bufPnts + __mul24(ndx, simData.stride ); float3 ipos = *(float3*) (ioffs + OFFSET_POS); float3 ivelval = *(float3*) (ioffs + OFFSET_VELEVAL); float press = *(float*) (ioffs + OFFSET_PRESS); float dens = *(float*) (ioffs + OFFSET_DENS); int icnt = *(int*) (ioffs + OFFSET_NBRCNT); char* joffs; float3 jpos, jveleval; float3 dist, force; float c, ndistj, pterm, dterm, vterm; vterm = simData.lapkern * simData.visc; force = make_float3(0,0,0); for (int nbr=0; nbr < icnt; nbr++) { // base 1, n[0] = count ndistj = bufNdist[ndx][nbr]; joffs = bufPnts + __mul24(bufNeighbor[ndx][nbr], simData.stride); jpos = *(float3*) (joffs + OFFSET_POS); jveleval = *(float3*) (joffs + OFFSET_VELEVAL); c = ( simData.smooth_rad - ndistj ); dist.x = ( ipos.x - jpos.x ); // dist in cm dist.y = ( ipos.y - jpos.y ); dist.z = ( ipos.z - jpos.z ); pterm = simData.sim_scale * -0.5f * c * simData.spikykern * ( press + *(float*)(joffs+OFFSET_PRESS) ) / ndistj; dterm = c * dens * *(float*)(joffs+OFFSET_DENS); force.x += ( pterm * dist.x + vterm * ( jveleval.x - ivelval.x )) * dterm; force.y += ( pterm * dist.y + vterm * ( jveleval.y - ivelval.y )) * dterm; force.z += ( pterm * dist.z + vterm * ( jveleval.z - ivelval.z )) * dterm; } *(float3*) ( ioffs + OFFSET_FORCE ) = force; }*/ __global__ void advanceParticles ( float time, float dt, float ss, bufList buf, int numPnts ) { uint i = __mul24(blockIdx.x, blockDim.x) + threadIdx.x; // particle index if ( i >= numPnts ) return; if ( buf.mgcell[i] == GRID_UNDEF ) { buf.mpos[i] = make_float3(-1000,-1000,-1000); buf.mvel[i] = make_float3(0,0,0); return; } // Get particle vars register float3 accel, norm; register float diff, adj, speed; register float3 pos = buf.mpos[i]; register float3 veval = buf.mveleval[i]; // Leapfrog integration accel = buf.mforce[i]; accel *= simData.pmass; // Boundaries // Y-axis diff = simData.pradius - (pos.y - (simData.pboundmin.y + (pos.x-simData.pboundmin.x)*simData.pground_slope )) * ss; if ( diff > EPSILON ) { norm = make_float3( -simData.pground_slope, 1.0 - simData.pground_slope, 0); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = simData.pradius - ( simData.pboundmax.y - pos.y )*ss; if ( diff > EPSILON ) { norm = make_float3(0, -1, 0); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // X-axis diff = simData.pradius - (pos.x - (simData.pboundmin.x + (sin(time*simData.pforce_freq)+1)*0.5 * simData.pforce_min))*ss; if ( diff > EPSILON ) { norm = make_float3( 1, 0, 0); adj = (simData.pforce_min+1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = simData.pradius - ( (simData.pboundmax.x - (sin(time*simData.pforce_freq)+1)*0.5*simData.pforce_max) - pos.x)*ss; if ( diff > EPSILON ) { norm = make_float3(-1, 0, 0); adj = (simData.pforce_max+1) * simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // Z-axis diff = simData.pradius - (pos.z - simData.pboundmin.z ) * ss; if ( diff > EPSILON ) { norm = make_float3( 0, 0, 1 ); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } diff = simData.pradius - ( simData.pboundmax.z - pos.z )*ss; if ( diff > EPSILON ) { norm = make_float3( 0, 0, -1 ); adj = simData.pextstiff * diff - simData.pdamp * dot(norm, veval ); norm *= adj; accel += norm; } // Gravity accel += simData.pgravity; // Accel Limit speed = accel.x*accel.x + accel.y*accel.y + accel.z*accel.z; if ( speed > simData.AL2 ) { accel *= simData.AL / sqrt(speed); } // Velocity Limit float3 vel = buf.mvel[i]; speed = vel.x*vel.x + vel.y*vel.y + vel.z*vel.z; if ( speed > simData.VL2 ) { speed = simData.VL2; vel *= simData.VL / sqrt(speed); } // Ocean colors if ( speed > simData.VL2*0.2) { adj = simData.VL2*0.2; buf.mclr[i] += (( buf.mclr[i] & 0xFF) < 0xFD ) ? +0x00000002 : 0; // decrement R by one buf.mclr[i] += (( (buf.mclr[i]>>8) & 0xFF) < 0xFD ) ? +0x00000200 : 0; // decrement G by one buf.mclr[i] += (( (buf.mclr[i]>>16) & 0xFF) < 0xFD ) ? +0x00020000 : 0; // decrement G by one } if ( speed < 0.03 ) { int v = int(speed/.01)+1; buf.mclr[i] += (( buf.mclr[i] & 0xFF) > 0x80 ) ? -0x00000001 * v : 0; // decrement R by one buf.mclr[i] += (( (buf.mclr[i]>>8) & 0xFF) > 0x80 ) ? -0x00000100 * v : 0; // decrement G by one } //-- surface particle density //buf.mclr[i] = buf.mclr[i] & 0x00FFFFFF; //if ( buf.mdensity[i] > 0.0014 ) buf.mclr[i] += 0xAA000000; // Leap-frog Integration float3 vnext = accel*dt + vel; // v(t+1/2) = v(t-1/2) + a(t) dt buf.mveleval[i] = (vel + vnext) * 0.5; // v(t+1) = [v(t-1/2) + v(t+1/2)] * 0.5 buf.mvel[i] = vnext; buf.mpos[i] += vnext * (dt/ss); // p(t+1) = p(t) + v(t+1/2) dt }
0fa572080b97cb8c02a7166e1bafa662dc74d995.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include "slenet_params.h" #define INSIZE 28 #define INFO_BYTE_SIZE 4 #define INITIAL_WEIGHT_VALUE -1.0f #define INITIAL_FC_WEIGHT_VALUE 1.0f #define IMAGE_WIDTH 28 #define IMAGE_HEIGHT 28 #define CONV_FILTER 5 #define SS_FILTER 4 #define FEATURES 6 #define NEURONS 10 #define CONV_OUTPUT 24 #define SS_OUTPUT 6 #define FC_OUTPUT 10 //kernel function that fill mnist_data structure->data with normalized pixel values __global__ void fillArr(unsigned char pixels[INSIZE][INSIZE], double data[INSIZE][INSIZE]){ // TO DO const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; if(i<INSIZE && j<INSIZE) data[i][j] = pixels[i][j]/255.0; } //kernel function that changes the values >0 to 1 and double type to integer type __global__ void showArr(double ddata[INSIZE][INSIZE], int dshow[INSIZE][INSIZE]){ // TO DO const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; if(i<INSIZE && j<INSIZE){ if(ddata[i][j]>0) dshow[i][j] = 1; else dshow[i][j] = 0; } } //mnist data structure typedef struct mnist_data{ double data[INSIZE][INSIZE]; unsigned int label; }mnist_data; //structure for images header information typedef struct images_info{ char magic_num_images[INFO_BYTE_SIZE]; char amount_images[INFO_BYTE_SIZE]; char rows[INFO_BYTE_SIZE]; char columns[INFO_BYTE_SIZE]; }images_info; //structure for labels header information typedef struct labels_info{ char magic_num_labels[INFO_BYTE_SIZE]; char amount_labels[INFO_BYTE_SIZE]; }labels_info; //Hexadecimal to integer static unsigned int mnist_bin_to_int(char *tmp){ int val = (tmp[0] << 24 | tmp[1] << 16 | tmp[2] << 8 | tmp[3] ); return val; } static int mnist_load(const char *image_filename, const char *label_filename, mnist_data **data_set,unsigned int *count){ images_info i_info; labels_info l_info; //opening the files FILE *images = fopen(image_filename,"rb"); FILE *labels = fopen(label_filename,"rb"); if(images==NULL||labels==NULL){ return -1; } //read header info fread(&i_info,sizeof(images_info),1,images); fread(&l_info,sizeof(labels_info),1,labels); //check and print header info int magic_num_images_as_int = mnist_bin_to_int(i_info.magic_num_images); if(magic_num_images_as_int != 2051){ printf("Problems with 'image magic number'. It is equal to %d, but should be 2051.",magic_num_images_as_int); return -1; } else{ printf("image magic number = %d (should be 2051)\n", magic_num_images_as_int); } int magic_num_labels_as_int = mnist_bin_to_int(l_info.magic_num_labels); if(magic_num_labels_as_int != 2049){ printf("Problems with 'label magic number'. It is equal to %d, but should be 2049.",magic_num_labels_as_int); return -1; } else{ printf("label magic number = %d (should be 2049)\n", magic_num_labels_as_int); } int amount_images_as_int = mnist_bin_to_int(i_info.amount_images); if(amount_images_as_int != 10000){ printf("Problems with 'image total number'. It is equal to %d, but should be 10000.",amount_images_as_int); return -1; } else{ printf("image total number = %d (should be 10000)\n", amount_images_as_int); } int amount_labels_as_int = mnist_bin_to_int(l_info.amount_labels); if(amount_labels_as_int != 10000){ printf("Problems with 'label total number'. It is equal to %d, but should be 10000.",amount_labels_as_int); return -1; } else{ printf("label total number = %d (should be 10000)\n", amount_labels_as_int); } int rows_as_int = mnist_bin_to_int(i_info.rows); int columns_as_int = mnist_bin_to_int(i_info.columns); if((rows_as_int != 28)||(columns_as_int!=28)){ printf("Problems with dimensions of images. Dimensions of images are not compitable with 28x28."); return -1; } else{ printf("rows = %d, cols = %d (both should be 28)\n", rows_as_int,columns_as_int); } unsigned char pixels[INSIZE][INSIZE]; char label; for(int k = 0;k<10000;k++){ //read current necessary data point fread(pixels,sizeof(pixels),1,images); fread(&label,sizeof(char),1,labels); //fill mnist_data struct -> data array with double values of pixels using cuda unsigned char (*dpixels)[INSIZE]; double (*ddata)[INSIZE]; hipMalloc((void**)&dpixels, INSIZE*INSIZE*sizeof(char)); hipMalloc((void**)&ddata, INSIZE*INSIZE*sizeof(double)); hipMemcpy(dpixels, pixels, INSIZE*INSIZE*sizeof(unsigned char), hipMemcpyHostToDevice); dim3 blocks(1,1); dim3 threads(INSIZE,INSIZE); hipLaunchKernelGGL(( fillArr), dim3(blocks), dim3(threads), 0, 0, dpixels,ddata); hipMemcpy((*data_set+*count)->data, ddata, INSIZE*INSIZE*sizeof(double), hipMemcpyDeviceToHost); hipFree(dpixels); hipFree(ddata); //assign mnist_data struct -> label with label (*data_set+*count)->label = (int)label; //increment count *count+=1; } //close files fclose(images); fclose(labels); return 0; } //Convolution layer. Filtering. __global__ void conv_filtering(float d_data[28][28], float d_weight[6][5][5], float d_pre_output[6][24][24]){ const int local_row = threadIdx.y; const int local_column = threadIdx.z; const int feature = threadIdx.x; const int global_row = blockIdx.x+threadIdx.y; const int global_column = blockIdx.y+threadIdx.z; const int output_row = blockIdx.x; const int output_column = blockIdx.y; __shared__ float temp[FEATURES][CONV_FILTER][CONV_FILTER]; __shared__ float pre_sum[FEATURES][CONV_FILTER]; temp[feature][local_row][local_column] = d_data[global_row][global_column]*d_weight[feature][local_row][local_column]; __syncthreads(); if(local_column==0){ float temp_sum = 0.0f; for(int i =0; i< CONV_FILTER;i++){ temp_sum+=temp[feature][local_row][i]; } pre_sum[feature][local_row] = temp_sum; __syncthreads(); if(local_row==0){ float sum = 0.0f; for(int i =0; i< CONV_FILTER;i++){ sum+=pre_sum[feature][i]; } d_pre_output[feature][output_row][output_column] = sum; } } } //Convolution layer. Biasing. __global__ void conv_biasing(float d_pre_output[6][24][24], float d_bias[6]){ const int x = blockIdx.x*blockDim.x+threadIdx.x; const int y = blockIdx.y*blockDim.y+threadIdx.y; const int feature = blockIdx.z; d_pre_output[feature][x][y] += d_bias[feature]; } //Convolution layer. Sigmoid. __global__ void conv_sigmoid(float d_pre_output[6][24][24], float d_output[6][24][24]){ const int x = blockIdx.x*blockDim.x+threadIdx.x; const int y = blockIdx.y*blockDim.y+threadIdx.y; const int feature = blockIdx.z; d_output[feature][x][y] = 1/(1+expf((-1)*d_pre_output[feature][x][y])); } //SubSampling layer. Filtering. __global__ void ss_filtering(float d_conv_output[6][24][24], float d_weight[4][4], float d_pre_output[6][6][6]){ const int local_row = threadIdx.y; const int local_column = threadIdx.z; const int feature = threadIdx.x; const int global_row = blockIdx.x*blockDim.y+threadIdx.y; const int global_column = blockIdx.y*blockDim.z+threadIdx.z; const int output_row = blockIdx.x; const int output_column = blockIdx.y; __shared__ float temp[FEATURES][SS_FILTER][SS_FILTER]; temp[feature][local_row][local_column] = d_conv_output[feature][global_row][global_column]*d_weight[local_row][local_column]; __syncthreads(); if(local_row==0 && local_column==0){ float sum = 0.0f; for(int i = 0; i<SS_FILTER; i++){ for(int j =0; j<SS_FILTER; j++){ sum+=temp[feature][i][j]; } } d_pre_output[feature][output_row][output_column] = sum; } } //SubSampling layer. Biasing. __global__ void ss_biasing(float d_pre_output[6][6][6], float d_bias[1]){ const int x = threadIdx.x; const int y = threadIdx.y; const int feature = blockIdx.x; d_pre_output[feature][x][y] += d_bias[0]; } //SubSampling layer. Sigmoid. __global__ void ss_sigmoid(float d_pre_output[6][6][6], float d_output[6][6][6]){ const int x = threadIdx.x; const int y = threadIdx.y; const int feature = blockIdx.x; d_output[feature][x][y] = 1/(1+expf((-1)*d_pre_output[feature][x][y])); } __global__ void fc_linear(float d_ss_output[6][6][6], float d_weight[10][6][6][6],float d_pre_output[10]){ const int neuron = blockIdx.x; const int depth = blockIdx.y*blockDim.x+threadIdx.x; const int local_depth = threadIdx.x; const int row = threadIdx.y; const int column = threadIdx.z; __shared__ float temp[3][6][6]; __shared__ float temp_sums[3][6]; __shared__ float pre_sums[3]; temp[local_depth][row][column] = d_ss_output[depth][row][column]*d_weight[neuron][depth][row][column]; __syncthreads(); if(column==0){ float temp_sum = 0.0f; for(int i = 0; i<6;i++){ temp_sum+=temp[local_depth][row][i]; } temp_sums[local_depth][row] = temp_sum; if(row==0){ float pre_sum = 0.0f; for(int i = 0; i<6;i++){ pre_sum+=temp_sums[local_depth][i]; } pre_sums[local_depth] = pre_sum; if(local_depth==0){ float sum = 0.0f; for(int i = 0; i<3;i++){ sum+=pre_sums[i]; } atomicAdd(&d_pre_output[neuron],sum); } } } } //Fully-connected layer.Biasing. __global__ void fc_biasing(float d_pre_output[10], float d_bias[10]){ const int idx = threadIdx.x; d_pre_output[idx] += d_bias[idx]; } //Fully-connected layer.Sigmoid. __global__ void fc_sigmoid(float d_pre_output[10], float d_output[10]){ const int idx = threadIdx.x; d_output[idx] = 1/(1+expf((-1)*d_pre_output[idx])); } class Conv{ public: int filter_size, features_num, output_dim; float *weight, *bias,*pre_output, *output; Conv(int filter_size, int features_num, int output); void reset(); ~Conv(); }; Conv::Conv(int filter_size, int features_num, int output_dim){ //Assigning attributes this->filter_size = filter_size; this->features_num = features_num; this->output_dim = output_dim; //CUDA memory allocation hipMalloc((void **)&weight, features_num*filter_size*filter_size*sizeof(float)); hipMemcpy(weight, c1_weight, features_num*filter_size*filter_size*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&bias, features_num*sizeof(float)); hipMemcpy(bias, c1_bias, features_num*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&pre_output, features_num*output_dim*output_dim*sizeof(float)); hipMalloc((void **)&output, features_num*output_dim*output_dim*sizeof(float)); } void Conv::reset(){ hipMemset(pre_output,0x00, features_num*output_dim*output_dim*sizeof(float)); hipMemset(output,0x00,features_num*output_dim*output_dim*sizeof(float)); } Conv::~Conv(){ //CUDA memory deallocation hipFree(weight); hipFree(bias); hipFree(pre_output); hipFree(output); } class SS{ public: int filter_size, features_num, output_dim; float *weight, *bias,*pre_output, *output; SS(int filter_size, int features_num, int output); void reset(); ~SS(); }; SS::SS(int filter_size, int features_num, int output_dim){ //Assigning attributes this->filter_size = filter_size; this->features_num = features_num; this->output_dim = output_dim; //CUDA memory allocation hipMalloc((void **)&weight, filter_size*filter_size*sizeof(float)); hipMemcpy(weight, s2_weight, filter_size*filter_size*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&bias, filter_size*filter_size*sizeof(float)); hipMemcpy(bias, s2_bias, sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&pre_output, features_num*output_dim*output_dim*sizeof(float)); hipMalloc((void **)&output, features_num*output_dim*output_dim*sizeof(float)); } void SS::reset(){ hipMemset(pre_output,0x00, features_num*output_dim*output_dim*sizeof(float)); hipMemset(output,0x00,features_num*output_dim*output_dim*sizeof(float)); } SS::~SS(){ //CUDA memory deallocation hipFree(weight); hipFree(bias); hipFree(pre_output); hipFree(output); } class FC{ public: int neurons, output_dim; float *weight, *bias,*pre_output, *output; FC(int neurons, int output); void reset(); ~FC(); }; FC::FC(int neurons, int output_dim){ //Assigning attributes this->neurons = neurons; this->output_dim = output_dim; //CUDA memory allocation hipMalloc((void **)&weight, neurons*FEATURES*SS_OUTPUT*SS_OUTPUT*sizeof(float)); hipMemcpy(weight, f3_weight, neurons*FEATURES*SS_OUTPUT*SS_OUTPUT*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&bias, neurons*sizeof(float)); hipMemcpy(bias, f3_bias, neurons*sizeof(float), hipMemcpyHostToDevice); hipMalloc((void **)&pre_output, output_dim*sizeof(float)); hipMalloc((void **)&output, output_dim*sizeof(float)); } void FC::reset(){ hipMemset(pre_output,0x00, output_dim*sizeof(float)); hipMemset(output,0x00,output_dim*sizeof(float)); } FC::~FC(){ //CUDA memory deallocation hipFree(weight); hipFree(bias); hipFree(pre_output); hipFree(output); } static Conv conv = Conv(CONV_FILTER, FEATURES, CONV_OUTPUT); static SS ss = SS(SS_FILTER, FEATURES, SS_OUTPUT); static FC fc = FC(NEURONS, FC_OUTPUT); //Forward pass static float forward_pass(float data[IMAGE_WIDTH][IMAGE_HEIGHT]){ //unsigned int label, unsigned int *error){ conv.reset(); hipError_t conv_reset_checker = hipGetLastError(); if (conv_reset_checker!=hipSuccess){ printf("CONV reset PROBLEM:: %s", hipGetErrorString(conv_reset_checker)); exit(1); } ss.reset(); hipError_t ss_reset_checker = hipGetLastError(); if (ss_reset_checker!=hipSuccess){ printf("ss reset PROBLEM:: %s", hipGetErrorString(ss_reset_checker)); exit(1); } fc.reset(); hipError_t fc_reset_checker = hipGetLastError(); if (fc_reset_checker!=hipSuccess){ printf("fc reset PROBLEM:: %s", hipGetErrorString(fc_reset_checker)); exit(1); } float (*kernel_data)[IMAGE_HEIGHT]; float time = 0.0f; float ms = 0.0f; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipMalloc((void**)&kernel_data,IMAGE_WIDTH*IMAGE_HEIGHT*sizeof(float)); hipMemcpy(kernel_data, data, IMAGE_WIDTH*IMAGE_HEIGHT*sizeof(float), hipMemcpyHostToDevice); dim3 conv_filter_blocks(CONV_OUTPUT, CONV_OUTPUT); dim3 conv_filter_thread(FEATURES, CONV_FILTER, CONV_FILTER); hipEventRecord(start); hipLaunchKernelGGL(( conv_filtering), dim3(conv_filter_blocks), dim3(conv_filter_thread), 0, 0, kernel_data, (float (*)[CONV_FILTER][CONV_FILTER])conv.weight, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.pre_output); hipError_t conv_filter_checker = hipGetLastError(); if (conv_filter_checker!=hipSuccess){ printf("CONV FILTERING PROBLEM:: %s", hipGetErrorString(conv_filter_checker)); exit(1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); time+=ms; int conv_block_dim = CONV_OUTPUT/3; dim3 conv_bias_blocks(CONV_OUTPUT/conv_block_dim,CONV_OUTPUT/conv_block_dim,FEATURES); dim3 conv_bias_thread(conv_block_dim,conv_block_dim); hipEventRecord(start); hipLaunchKernelGGL(( conv_biasing), dim3(conv_bias_blocks), dim3(conv_bias_thread), 0, 0, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.pre_output, conv.bias); hipError_t conv_bias_checker = hipGetLastError(); if (conv_bias_checker!=hipSuccess){ printf("CONV BIASING PROBLEM:: %s", hipGetErrorString(conv_bias_checker)); exit(1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); time+=ms; dim3 conv_sigmoid_blocks(CONV_OUTPUT/conv_block_dim,CONV_OUTPUT/conv_block_dim,FEATURES); dim3 conv_sigmoid_thread(conv_block_dim,conv_block_dim); hipEventRecord(start); hipLaunchKernelGGL(( conv_sigmoid), dim3(conv_sigmoid_blocks), dim3(conv_sigmoid_thread), 0, 0, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.pre_output, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.output); hipError_t conv_sigmoid_checker = hipGetLastError(); if (conv_sigmoid_checker!=hipSuccess){ printf("CONV SIGMOID PROBLEM:: %s", hipGetErrorString(conv_sigmoid_checker)); exit(1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); time+=ms; dim3 ss_filter_blocks(SS_OUTPUT, SS_OUTPUT); dim3 ss_filter_thread(FEATURES, SS_FILTER, SS_FILTER); hipEventRecord(start); hipLaunchKernelGGL(( ss_filtering), dim3(ss_filter_blocks), dim3(ss_filter_thread), 0, 0, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.output, (float (*)[SS_FILTER])ss.weight, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.pre_output); hipError_t ss_filter_checker = hipGetLastError(); if (ss_filter_checker!=hipSuccess){ printf("SS FILTERING PROBLEM:: %s", hipGetErrorString(ss_filter_checker)); exit(1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); time+=ms; dim3 ss_bias_blocks(FEATURES); dim3 ss_bias_thread(SS_OUTPUT,SS_OUTPUT); hipEventRecord(start); hipLaunchKernelGGL(( ss_biasing), dim3(ss_bias_blocks), dim3(ss_bias_thread), 0, 0, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.pre_output, (float (*))ss.bias); hipError_t ss_bias_checker = hipGetLastError(); if (ss_bias_checker!=hipSuccess){ printf("SS BIASING PROBLEM:: %s", hipGetErrorString(ss_bias_checker)); exit(1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); time+=ms; dim3 ss_sigmoid_blocks(FEATURES); dim3 ss_sigmoid_thread(SS_OUTPUT,SS_OUTPUT); hipEventRecord(start); hipLaunchKernelGGL(( ss_sigmoid), dim3(ss_sigmoid_blocks), dim3(ss_sigmoid_thread), 0, 0, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.pre_output, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.output); hipError_t ss_sigmoid_checker = hipGetLastError(); if (ss_sigmoid_checker!=hipSuccess){ printf("SS SIGMOID PROBLEM:: %s", hipGetErrorString(ss_sigmoid_checker)); exit(1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); time+=ms; int div = FEATURES/2; dim3 fc_linear_blocks(FC_OUTPUT, FEATURES/div); dim3 fc_linear_thread(div, SS_OUTPUT, SS_OUTPUT); hipEventRecord(start); hipLaunchKernelGGL(( fc_linear), dim3(fc_linear_blocks), dim3(fc_linear_thread), 0, 0, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.output, (float (*)[FEATURES][SS_OUTPUT][SS_OUTPUT])fc.weight, fc.pre_output); hipError_t fc_linear_checker = hipGetLastError(); if (fc_linear_checker!=hipSuccess){ printf("FC LINEAR PROBLEM:: %s", hipGetErrorString(fc_linear_checker)); exit(1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); time+=ms; dim3 fc_bias_blocks(1); dim3 fc_bias_thread(NEURONS); hipEventRecord(start); hipLaunchKernelGGL(( fc_biasing), dim3(fc_bias_blocks), dim3(fc_bias_thread), 0, 0, fc.pre_output, fc.bias); hipError_t fc_bias_checker = hipGetLastError(); if (fc_bias_checker!=hipSuccess){ printf("FC BIASING PROBLEM:: %s", hipGetErrorString(fc_bias_checker)); exit(1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); time+=ms; dim3 fc_sigmoid_blocks(1); dim3 fc_sigmoid_thread(NEURONS); hipEventRecord(start); hipLaunchKernelGGL(( fc_sigmoid), dim3(fc_sigmoid_blocks), dim3(fc_sigmoid_thread), 0, 0, fc.pre_output,fc.output); hipError_t fc_sigmoid_checker = hipGetLastError(); if (fc_sigmoid_checker!=hipSuccess){ printf("FC SIGMOID PROBLEM:: %s", hipGetErrorString(fc_sigmoid_checker)); exit(1); } hipEventRecord(stop); hipEventSynchronize(stop); hipEventElapsedTime(&ms, start, stop); time+=ms; hipFree(kernel_data); hipEventDestroy(start); hipEventDestroy(stop); return time; } int main(){ const char *image_filename = "data/t10k-images.idx3-ubyte"; const char *label_filename = "data/t10k-labels.idx1-ubyte"; mnist_data *data_set = (mnist_data *)malloc(sizeof(*data_set)*10000); unsigned int count = 0; if(mnist_load(image_filename,label_filename, &data_set,&count)!=0){ printf("Problems with loading data."); exit(1); } printf("test_cnt = %d (should be 10000)\n\n",count); unsigned int error = 0; float time_taken = 0.0f; for(int k = 0; k<count;k++){ float data[IMAGE_HEIGHT][IMAGE_WIDTH]; for(int i = 0; i< IMAGE_HEIGHT;i++){ for(int j = 0; j< IMAGE_WIDTH;j++){ data[i][j] = data_set[k].data[i][j]; } } time_taken += forward_pass(data); unsigned int max = 0; float res[10]; hipMemcpy(res, fc.output, sizeof(float)*10, hipMemcpyDeviceToHost); for(int j=0; j<10; j++){ if (res[max] < res[j]) max = j; } if(max!=data_set[k].label) error+=1; } printf("Error Rate = %f%% (%d out of 10,000)\n", double(error)/double(count)*100.0, error); printf("Accuracy = %.3f%% (%d out of 10,000)\n", 100.0 - double(error)/double(count)*100.0, count - error); printf("Ex time = %f (ms) \n", time_taken); return 0; }
0fa572080b97cb8c02a7166e1bafa662dc74d995.cu
#include <stdio.h> #include "slenet_params.h" #define INSIZE 28 #define INFO_BYTE_SIZE 4 #define INITIAL_WEIGHT_VALUE -1.0f #define INITIAL_FC_WEIGHT_VALUE 1.0f #define IMAGE_WIDTH 28 #define IMAGE_HEIGHT 28 #define CONV_FILTER 5 #define SS_FILTER 4 #define FEATURES 6 #define NEURONS 10 #define CONV_OUTPUT 24 #define SS_OUTPUT 6 #define FC_OUTPUT 10 //kernel function that fill mnist_data structure->data with normalized pixel values __global__ void fillArr(unsigned char pixels[INSIZE][INSIZE], double data[INSIZE][INSIZE]){ // TO DO const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; if(i<INSIZE && j<INSIZE) data[i][j] = pixels[i][j]/255.0; } //kernel function that changes the values >0 to 1 and double type to integer type __global__ void showArr(double ddata[INSIZE][INSIZE], int dshow[INSIZE][INSIZE]){ // TO DO const int i = threadIdx.x + blockIdx.x * blockDim.x; const int j = threadIdx.y + blockIdx.y * blockDim.y; if(i<INSIZE && j<INSIZE){ if(ddata[i][j]>0) dshow[i][j] = 1; else dshow[i][j] = 0; } } //mnist data structure typedef struct mnist_data{ double data[INSIZE][INSIZE]; unsigned int label; }mnist_data; //structure for images header information typedef struct images_info{ char magic_num_images[INFO_BYTE_SIZE]; char amount_images[INFO_BYTE_SIZE]; char rows[INFO_BYTE_SIZE]; char columns[INFO_BYTE_SIZE]; }images_info; //structure for labels header information typedef struct labels_info{ char magic_num_labels[INFO_BYTE_SIZE]; char amount_labels[INFO_BYTE_SIZE]; }labels_info; //Hexadecimal to integer static unsigned int mnist_bin_to_int(char *tmp){ int val = (tmp[0] << 24 | tmp[1] << 16 | tmp[2] << 8 | tmp[3] ); return val; } static int mnist_load(const char *image_filename, const char *label_filename, mnist_data **data_set,unsigned int *count){ images_info i_info; labels_info l_info; //opening the files FILE *images = fopen(image_filename,"rb"); FILE *labels = fopen(label_filename,"rb"); if(images==NULL||labels==NULL){ return -1; } //read header info fread(&i_info,sizeof(images_info),1,images); fread(&l_info,sizeof(labels_info),1,labels); //check and print header info int magic_num_images_as_int = mnist_bin_to_int(i_info.magic_num_images); if(magic_num_images_as_int != 2051){ printf("Problems with 'image magic number'. It is equal to %d, but should be 2051.",magic_num_images_as_int); return -1; } else{ printf("image magic number = %d (should be 2051)\n", magic_num_images_as_int); } int magic_num_labels_as_int = mnist_bin_to_int(l_info.magic_num_labels); if(magic_num_labels_as_int != 2049){ printf("Problems with 'label magic number'. It is equal to %d, but should be 2049.",magic_num_labels_as_int); return -1; } else{ printf("label magic number = %d (should be 2049)\n", magic_num_labels_as_int); } int amount_images_as_int = mnist_bin_to_int(i_info.amount_images); if(amount_images_as_int != 10000){ printf("Problems with 'image total number'. It is equal to %d, but should be 10000.",amount_images_as_int); return -1; } else{ printf("image total number = %d (should be 10000)\n", amount_images_as_int); } int amount_labels_as_int = mnist_bin_to_int(l_info.amount_labels); if(amount_labels_as_int != 10000){ printf("Problems with 'label total number'. It is equal to %d, but should be 10000.",amount_labels_as_int); return -1; } else{ printf("label total number = %d (should be 10000)\n", amount_labels_as_int); } int rows_as_int = mnist_bin_to_int(i_info.rows); int columns_as_int = mnist_bin_to_int(i_info.columns); if((rows_as_int != 28)||(columns_as_int!=28)){ printf("Problems with dimensions of images. Dimensions of images are not compitable with 28x28."); return -1; } else{ printf("rows = %d, cols = %d (both should be 28)\n", rows_as_int,columns_as_int); } unsigned char pixels[INSIZE][INSIZE]; char label; for(int k = 0;k<10000;k++){ //read current necessary data point fread(pixels,sizeof(pixels),1,images); fread(&label,sizeof(char),1,labels); //fill mnist_data struct -> data array with double values of pixels using cuda unsigned char (*dpixels)[INSIZE]; double (*ddata)[INSIZE]; cudaMalloc((void**)&dpixels, INSIZE*INSIZE*sizeof(char)); cudaMalloc((void**)&ddata, INSIZE*INSIZE*sizeof(double)); cudaMemcpy(dpixels, pixels, INSIZE*INSIZE*sizeof(unsigned char), cudaMemcpyHostToDevice); dim3 blocks(1,1); dim3 threads(INSIZE,INSIZE); fillArr<<<blocks, threads>>>(dpixels,ddata); cudaMemcpy((*data_set+*count)->data, ddata, INSIZE*INSIZE*sizeof(double), cudaMemcpyDeviceToHost); cudaFree(dpixels); cudaFree(ddata); //assign mnist_data struct -> label with label (*data_set+*count)->label = (int)label; //increment count *count+=1; } //close files fclose(images); fclose(labels); return 0; } //Convolution layer. Filtering. __global__ void conv_filtering(float d_data[28][28], float d_weight[6][5][5], float d_pre_output[6][24][24]){ const int local_row = threadIdx.y; const int local_column = threadIdx.z; const int feature = threadIdx.x; const int global_row = blockIdx.x+threadIdx.y; const int global_column = blockIdx.y+threadIdx.z; const int output_row = blockIdx.x; const int output_column = blockIdx.y; __shared__ float temp[FEATURES][CONV_FILTER][CONV_FILTER]; __shared__ float pre_sum[FEATURES][CONV_FILTER]; temp[feature][local_row][local_column] = d_data[global_row][global_column]*d_weight[feature][local_row][local_column]; __syncthreads(); if(local_column==0){ float temp_sum = 0.0f; for(int i =0; i< CONV_FILTER;i++){ temp_sum+=temp[feature][local_row][i]; } pre_sum[feature][local_row] = temp_sum; __syncthreads(); if(local_row==0){ float sum = 0.0f; for(int i =0; i< CONV_FILTER;i++){ sum+=pre_sum[feature][i]; } d_pre_output[feature][output_row][output_column] = sum; } } } //Convolution layer. Biasing. __global__ void conv_biasing(float d_pre_output[6][24][24], float d_bias[6]){ const int x = blockIdx.x*blockDim.x+threadIdx.x; const int y = blockIdx.y*blockDim.y+threadIdx.y; const int feature = blockIdx.z; d_pre_output[feature][x][y] += d_bias[feature]; } //Convolution layer. Sigmoid. __global__ void conv_sigmoid(float d_pre_output[6][24][24], float d_output[6][24][24]){ const int x = blockIdx.x*blockDim.x+threadIdx.x; const int y = blockIdx.y*blockDim.y+threadIdx.y; const int feature = blockIdx.z; d_output[feature][x][y] = 1/(1+expf((-1)*d_pre_output[feature][x][y])); } //SubSampling layer. Filtering. __global__ void ss_filtering(float d_conv_output[6][24][24], float d_weight[4][4], float d_pre_output[6][6][6]){ const int local_row = threadIdx.y; const int local_column = threadIdx.z; const int feature = threadIdx.x; const int global_row = blockIdx.x*blockDim.y+threadIdx.y; const int global_column = blockIdx.y*blockDim.z+threadIdx.z; const int output_row = blockIdx.x; const int output_column = blockIdx.y; __shared__ float temp[FEATURES][SS_FILTER][SS_FILTER]; temp[feature][local_row][local_column] = d_conv_output[feature][global_row][global_column]*d_weight[local_row][local_column]; __syncthreads(); if(local_row==0 && local_column==0){ float sum = 0.0f; for(int i = 0; i<SS_FILTER; i++){ for(int j =0; j<SS_FILTER; j++){ sum+=temp[feature][i][j]; } } d_pre_output[feature][output_row][output_column] = sum; } } //SubSampling layer. Biasing. __global__ void ss_biasing(float d_pre_output[6][6][6], float d_bias[1]){ const int x = threadIdx.x; const int y = threadIdx.y; const int feature = blockIdx.x; d_pre_output[feature][x][y] += d_bias[0]; } //SubSampling layer. Sigmoid. __global__ void ss_sigmoid(float d_pre_output[6][6][6], float d_output[6][6][6]){ const int x = threadIdx.x; const int y = threadIdx.y; const int feature = blockIdx.x; d_output[feature][x][y] = 1/(1+expf((-1)*d_pre_output[feature][x][y])); } __global__ void fc_linear(float d_ss_output[6][6][6], float d_weight[10][6][6][6],float d_pre_output[10]){ const int neuron = blockIdx.x; const int depth = blockIdx.y*blockDim.x+threadIdx.x; const int local_depth = threadIdx.x; const int row = threadIdx.y; const int column = threadIdx.z; __shared__ float temp[3][6][6]; __shared__ float temp_sums[3][6]; __shared__ float pre_sums[3]; temp[local_depth][row][column] = d_ss_output[depth][row][column]*d_weight[neuron][depth][row][column]; __syncthreads(); if(column==0){ float temp_sum = 0.0f; for(int i = 0; i<6;i++){ temp_sum+=temp[local_depth][row][i]; } temp_sums[local_depth][row] = temp_sum; if(row==0){ float pre_sum = 0.0f; for(int i = 0; i<6;i++){ pre_sum+=temp_sums[local_depth][i]; } pre_sums[local_depth] = pre_sum; if(local_depth==0){ float sum = 0.0f; for(int i = 0; i<3;i++){ sum+=pre_sums[i]; } atomicAdd(&d_pre_output[neuron],sum); } } } } //Fully-connected layer.Biasing. __global__ void fc_biasing(float d_pre_output[10], float d_bias[10]){ const int idx = threadIdx.x; d_pre_output[idx] += d_bias[idx]; } //Fully-connected layer.Sigmoid. __global__ void fc_sigmoid(float d_pre_output[10], float d_output[10]){ const int idx = threadIdx.x; d_output[idx] = 1/(1+expf((-1)*d_pre_output[idx])); } class Conv{ public: int filter_size, features_num, output_dim; float *weight, *bias,*pre_output, *output; Conv(int filter_size, int features_num, int output); void reset(); ~Conv(); }; Conv::Conv(int filter_size, int features_num, int output_dim){ //Assigning attributes this->filter_size = filter_size; this->features_num = features_num; this->output_dim = output_dim; //CUDA memory allocation cudaMalloc((void **)&weight, features_num*filter_size*filter_size*sizeof(float)); cudaMemcpy(weight, c1_weight, features_num*filter_size*filter_size*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&bias, features_num*sizeof(float)); cudaMemcpy(bias, c1_bias, features_num*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&pre_output, features_num*output_dim*output_dim*sizeof(float)); cudaMalloc((void **)&output, features_num*output_dim*output_dim*sizeof(float)); } void Conv::reset(){ cudaMemset(pre_output,0x00, features_num*output_dim*output_dim*sizeof(float)); cudaMemset(output,0x00,features_num*output_dim*output_dim*sizeof(float)); } Conv::~Conv(){ //CUDA memory deallocation cudaFree(weight); cudaFree(bias); cudaFree(pre_output); cudaFree(output); } class SS{ public: int filter_size, features_num, output_dim; float *weight, *bias,*pre_output, *output; SS(int filter_size, int features_num, int output); void reset(); ~SS(); }; SS::SS(int filter_size, int features_num, int output_dim){ //Assigning attributes this->filter_size = filter_size; this->features_num = features_num; this->output_dim = output_dim; //CUDA memory allocation cudaMalloc((void **)&weight, filter_size*filter_size*sizeof(float)); cudaMemcpy(weight, s2_weight, filter_size*filter_size*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&bias, filter_size*filter_size*sizeof(float)); cudaMemcpy(bias, s2_bias, sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&pre_output, features_num*output_dim*output_dim*sizeof(float)); cudaMalloc((void **)&output, features_num*output_dim*output_dim*sizeof(float)); } void SS::reset(){ cudaMemset(pre_output,0x00, features_num*output_dim*output_dim*sizeof(float)); cudaMemset(output,0x00,features_num*output_dim*output_dim*sizeof(float)); } SS::~SS(){ //CUDA memory deallocation cudaFree(weight); cudaFree(bias); cudaFree(pre_output); cudaFree(output); } class FC{ public: int neurons, output_dim; float *weight, *bias,*pre_output, *output; FC(int neurons, int output); void reset(); ~FC(); }; FC::FC(int neurons, int output_dim){ //Assigning attributes this->neurons = neurons; this->output_dim = output_dim; //CUDA memory allocation cudaMalloc((void **)&weight, neurons*FEATURES*SS_OUTPUT*SS_OUTPUT*sizeof(float)); cudaMemcpy(weight, f3_weight, neurons*FEATURES*SS_OUTPUT*SS_OUTPUT*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&bias, neurons*sizeof(float)); cudaMemcpy(bias, f3_bias, neurons*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc((void **)&pre_output, output_dim*sizeof(float)); cudaMalloc((void **)&output, output_dim*sizeof(float)); } void FC::reset(){ cudaMemset(pre_output,0x00, output_dim*sizeof(float)); cudaMemset(output,0x00,output_dim*sizeof(float)); } FC::~FC(){ //CUDA memory deallocation cudaFree(weight); cudaFree(bias); cudaFree(pre_output); cudaFree(output); } static Conv conv = Conv(CONV_FILTER, FEATURES, CONV_OUTPUT); static SS ss = SS(SS_FILTER, FEATURES, SS_OUTPUT); static FC fc = FC(NEURONS, FC_OUTPUT); //Forward pass static float forward_pass(float data[IMAGE_WIDTH][IMAGE_HEIGHT]){ //unsigned int label, unsigned int *error){ conv.reset(); cudaError_t conv_reset_checker = cudaGetLastError(); if (conv_reset_checker!=cudaSuccess){ printf("CONV reset PROBLEM:: %s", cudaGetErrorString(conv_reset_checker)); exit(1); } ss.reset(); cudaError_t ss_reset_checker = cudaGetLastError(); if (ss_reset_checker!=cudaSuccess){ printf("ss reset PROBLEM:: %s", cudaGetErrorString(ss_reset_checker)); exit(1); } fc.reset(); cudaError_t fc_reset_checker = cudaGetLastError(); if (fc_reset_checker!=cudaSuccess){ printf("fc reset PROBLEM:: %s", cudaGetErrorString(fc_reset_checker)); exit(1); } float (*kernel_data)[IMAGE_HEIGHT]; float time = 0.0f; float ms = 0.0f; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaMalloc((void**)&kernel_data,IMAGE_WIDTH*IMAGE_HEIGHT*sizeof(float)); cudaMemcpy(kernel_data, data, IMAGE_WIDTH*IMAGE_HEIGHT*sizeof(float), cudaMemcpyHostToDevice); dim3 conv_filter_blocks(CONV_OUTPUT, CONV_OUTPUT); dim3 conv_filter_thread(FEATURES, CONV_FILTER, CONV_FILTER); cudaEventRecord(start); conv_filtering<<<conv_filter_blocks, conv_filter_thread>>>(kernel_data, (float (*)[CONV_FILTER][CONV_FILTER])conv.weight, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.pre_output); cudaError_t conv_filter_checker = cudaGetLastError(); if (conv_filter_checker!=cudaSuccess){ printf("CONV FILTERING PROBLEM:: %s", cudaGetErrorString(conv_filter_checker)); exit(1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); time+=ms; int conv_block_dim = CONV_OUTPUT/3; dim3 conv_bias_blocks(CONV_OUTPUT/conv_block_dim,CONV_OUTPUT/conv_block_dim,FEATURES); dim3 conv_bias_thread(conv_block_dim,conv_block_dim); cudaEventRecord(start); conv_biasing<<<conv_bias_blocks, conv_bias_thread>>>((float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.pre_output, conv.bias); cudaError_t conv_bias_checker = cudaGetLastError(); if (conv_bias_checker!=cudaSuccess){ printf("CONV BIASING PROBLEM:: %s", cudaGetErrorString(conv_bias_checker)); exit(1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); time+=ms; dim3 conv_sigmoid_blocks(CONV_OUTPUT/conv_block_dim,CONV_OUTPUT/conv_block_dim,FEATURES); dim3 conv_sigmoid_thread(conv_block_dim,conv_block_dim); cudaEventRecord(start); conv_sigmoid<<<conv_sigmoid_blocks, conv_sigmoid_thread>>>((float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.pre_output, (float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.output); cudaError_t conv_sigmoid_checker = cudaGetLastError(); if (conv_sigmoid_checker!=cudaSuccess){ printf("CONV SIGMOID PROBLEM:: %s", cudaGetErrorString(conv_sigmoid_checker)); exit(1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); time+=ms; dim3 ss_filter_blocks(SS_OUTPUT, SS_OUTPUT); dim3 ss_filter_thread(FEATURES, SS_FILTER, SS_FILTER); cudaEventRecord(start); ss_filtering<<<ss_filter_blocks, ss_filter_thread>>>((float (*)[CONV_OUTPUT][CONV_OUTPUT])conv.output, (float (*)[SS_FILTER])ss.weight, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.pre_output); cudaError_t ss_filter_checker = cudaGetLastError(); if (ss_filter_checker!=cudaSuccess){ printf("SS FILTERING PROBLEM:: %s", cudaGetErrorString(ss_filter_checker)); exit(1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); time+=ms; dim3 ss_bias_blocks(FEATURES); dim3 ss_bias_thread(SS_OUTPUT,SS_OUTPUT); cudaEventRecord(start); ss_biasing<<<ss_bias_blocks, ss_bias_thread>>>((float (*)[SS_OUTPUT][SS_OUTPUT])ss.pre_output, (float (*))ss.bias); cudaError_t ss_bias_checker = cudaGetLastError(); if (ss_bias_checker!=cudaSuccess){ printf("SS BIASING PROBLEM:: %s", cudaGetErrorString(ss_bias_checker)); exit(1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); time+=ms; dim3 ss_sigmoid_blocks(FEATURES); dim3 ss_sigmoid_thread(SS_OUTPUT,SS_OUTPUT); cudaEventRecord(start); ss_sigmoid<<<ss_sigmoid_blocks, ss_sigmoid_thread>>>((float (*)[SS_OUTPUT][SS_OUTPUT])ss.pre_output, (float (*)[SS_OUTPUT][SS_OUTPUT])ss.output); cudaError_t ss_sigmoid_checker = cudaGetLastError(); if (ss_sigmoid_checker!=cudaSuccess){ printf("SS SIGMOID PROBLEM:: %s", cudaGetErrorString(ss_sigmoid_checker)); exit(1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); time+=ms; int div = FEATURES/2; dim3 fc_linear_blocks(FC_OUTPUT, FEATURES/div); dim3 fc_linear_thread(div, SS_OUTPUT, SS_OUTPUT); cudaEventRecord(start); fc_linear<<<fc_linear_blocks, fc_linear_thread>>>((float (*)[SS_OUTPUT][SS_OUTPUT])ss.output, (float (*)[FEATURES][SS_OUTPUT][SS_OUTPUT])fc.weight, fc.pre_output); cudaError_t fc_linear_checker = cudaGetLastError(); if (fc_linear_checker!=cudaSuccess){ printf("FC LINEAR PROBLEM:: %s", cudaGetErrorString(fc_linear_checker)); exit(1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); time+=ms; dim3 fc_bias_blocks(1); dim3 fc_bias_thread(NEURONS); cudaEventRecord(start); fc_biasing<<<fc_bias_blocks, fc_bias_thread>>>(fc.pre_output, fc.bias); cudaError_t fc_bias_checker = cudaGetLastError(); if (fc_bias_checker!=cudaSuccess){ printf("FC BIASING PROBLEM:: %s", cudaGetErrorString(fc_bias_checker)); exit(1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); time+=ms; dim3 fc_sigmoid_blocks(1); dim3 fc_sigmoid_thread(NEURONS); cudaEventRecord(start); fc_sigmoid<<<fc_sigmoid_blocks, fc_sigmoid_thread>>>(fc.pre_output,fc.output); cudaError_t fc_sigmoid_checker = cudaGetLastError(); if (fc_sigmoid_checker!=cudaSuccess){ printf("FC SIGMOID PROBLEM:: %s", cudaGetErrorString(fc_sigmoid_checker)); exit(1); } cudaEventRecord(stop); cudaEventSynchronize(stop); cudaEventElapsedTime(&ms, start, stop); time+=ms; cudaFree(kernel_data); cudaEventDestroy(start); cudaEventDestroy(stop); return time; } int main(){ const char *image_filename = "data/t10k-images.idx3-ubyte"; const char *label_filename = "data/t10k-labels.idx1-ubyte"; mnist_data *data_set = (mnist_data *)malloc(sizeof(*data_set)*10000); unsigned int count = 0; if(mnist_load(image_filename,label_filename, &data_set,&count)!=0){ printf("Problems with loading data."); exit(1); } printf("test_cnt = %d (should be 10000)\n\n",count); unsigned int error = 0; float time_taken = 0.0f; for(int k = 0; k<count;k++){ float data[IMAGE_HEIGHT][IMAGE_WIDTH]; for(int i = 0; i< IMAGE_HEIGHT;i++){ for(int j = 0; j< IMAGE_WIDTH;j++){ data[i][j] = data_set[k].data[i][j]; } } time_taken += forward_pass(data); unsigned int max = 0; float res[10]; cudaMemcpy(res, fc.output, sizeof(float)*10, cudaMemcpyDeviceToHost); for(int j=0; j<10; j++){ if (res[max] < res[j]) max = j; } if(max!=data_set[k].label) error+=1; } printf("Error Rate = %f%% (%d out of 10,000)\n", double(error)/double(count)*100.0, error); printf("Accuracy = %.3f%% (%d out of 10,000)\n", 100.0 - double(error)/double(count)*100.0, count - error); printf("Ex time = %f (ms) \n", time_taken); return 0; }
7cfe9c444e71fd763d500d181ca8212555352796.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // std::system includes #include <cstdio> #include <vector> // CUDA-C includes #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <helper_cuda.h> #include <helper_string.h> // Semaphore include #include <sys/types.h> #include <unistd.h> // throw error on equality #define ERR_EQ(X,Y) do { if ((X) == (Y)) { \ fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \ exit(-1);}} while(0) // throw error on difference #define ERR_NE(X,Y) do { if ((X) != (Y)) { \ fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \ exit(-1);}} while(0) #define ROUND_UP(N, BASE) \ (N + BASE - 1) / BASE // copy from source -> destination arrays __device__ void slow_kernel(int *dst, int *src, int n, int delay) { int id = blockDim.x * blockIdx.x + threadIdx.x; for (volatile int i = 0; i < delay; i++); if (id < n) { dst[id] = src[id]; } } // Named kernels for easier profiling __global__ void low_priority(int *dst, int *src, int n, int delay) { slow_kernel(dst, src, n, delay); } __global__ void high_priority(int *dst, int *src, int n, int delay) { slow_kernel(dst, src, n, delay); } // initialize memory void mem_init(int *buf, size_t n) { for (int i = 0; i < n; i++) { buf[i] = i; } } // Forward declarations void run_experiment(const int priority, const int size, const int iterations, const int delay); int main(int argc, char **argv) { hipDeviceProp_t device_prop; int dev_id; fprintf(stderr, "Starting [%s]...\n", argv[0]); // set device dev_id = findCudaDevice(argc, (const char **) argv); checkCudaErrors(hipGetDeviceProperties(&device_prop, dev_id)); if ((device_prop.major << 4) + device_prop.minor < 0x35) { fprintf(stderr, "%s requires Compute Capability of SM 3.5 or higher to run.\nexiting...\n", argv[0]); exit (EXIT_WAIVED); } // command line args const int delay = getCmdLineArgumentInt(argc, (const char **) argv, "delay"); const size_t size = getCmdLineArgumentInt(argc, (const char **) argv, "size"); const int priority = getCmdLineArgumentInt(argc, (const char **) argv, "priority"); const int iterations = getCmdLineArgumentInt(argc, (const char **) argv, "iterations"); // get the range of priorities available // [ greatest_priority, least_priority ] int priority_low; int priority_hi; checkCudaErrors(hipDeviceGetStreamPriorityRange(&priority_low, &priority_hi)); if (size == 0 || iterations == 0) { fprintf(stderr, "Please provide --size=<int> --priority=<int> --iterations=<int> " "and --delay=<int> (optional) flags.\nexting...\n"); exit (EXIT_FAILURE); } else { fprintf(stderr, "Called with arguments size %zu, priority %d, iterations %d, and delay %d\n", size, priority, iterations, delay); } if (priority_hi > priority || priority_low < priority) { fprintf(stderr, "Priority must be within %d and %d.\nexting...\n", priority_hi, priority_low); exit (EXIT_FAILURE); } // Set kernel to run void (*kernel)(int*, int*, int, int) = priority ? &high_priority : &low_priority; // Create memory regions #define N_MEMORY_REGIONS 8 size_t n_regions = min(iterations, N_MEMORY_REGIONS); // initialise host data std::vector<int*> h_src(n_regions); for (int i = 0; i < n_regions; i++) { ERR_EQ(h_src[i] = (int *) malloc(sizeof(int) * size), NULL); mem_init(h_src[i], size); } // initialise device data std::vector<int*> h_dst(n_regions); for (int i = 0; i < n_regions; i++) { ERR_EQ(h_dst[i] = (int *) malloc(sizeof(int) * size), NULL); memset(h_dst[i], 0, size); } // copy source data -> device std::vector<int*> d_src(n_regions); for (int i = 0; i < n_regions; i++) { checkCudaErrors(hipMalloc(&d_src[i], sizeof(int) * size)); checkCudaErrors(hipMemcpy(d_src[i], h_src[i], sizeof(int) * size, hipMemcpyHostToDevice)); } // allocate memory for memcopy destination std::vector<int*> d_dst(n_regions); for (int i = 0; i < n_regions; i++) { checkCudaErrors(hipMalloc(&d_dst[i], sizeof(int) * size)); } hipDeviceSynchronize(); // Create stream hipStream_t stream; checkCudaErrors(hipStreamCreateWithPriority(&stream, hipStreamNonBlocking, priority)); // Compute number of threads and blocks int blockSize; int minGridSize; int gridSize; hipOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, kernel); gridSize = (size + blockSize - 1) / blockSize; // launch the kernel iteration times. // each consecutive launch uses a different memory region hipEvent_t start, end; checkCudaErrors(hipEventCreate(&start)); checkCudaErrors(hipEventCreate(&end)); checkCudaErrors(hipEventRecord(start, stream)); for (int i = 0; i < iterations; i++) { for (int j = 0; j < n_regions && i < iterations; j++, i++) { hipLaunchKernelGGL(( kernel), dim3(gridSize), dim3(blockSize), 0, stream, d_dst[j], d_src[j], size, delay); checkCudaErrors(hipStreamSynchronize(stream)); } } checkCudaErrors(hipEventRecord(end, stream)); checkCudaErrors(hipEventSynchronize(end)); for (int i = 0; i < n_regions; i++) { checkCudaErrors(hipMemcpy(h_dst[i], d_dst[i], size, hipMemcpyDeviceToHost)); } // check results of the last computation for (int i = 0; i < n_regions; i++) { ERR_NE(memcmp(h_dst[i], h_src[i], size), 0); } // Clean up for (int i = 0; i < n_regions; i++) { checkCudaErrors (hipFree(d_src[i])); checkCudaErrors(hipFree(d_dst[i])); free(h_src[i]); free(h_dst[i]); } // Print out average time float ms; checkCudaErrors(hipEventElapsedTime(&ms, start, end)); // size iterations ms average printf("%zu, %d, %f, %f\n", size, iterations, ms, ms / (float) iterations); exit (EXIT_SUCCESS); }
7cfe9c444e71fd763d500d181ca8212555352796.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // std::system includes #include <cstdio> #include <vector> // CUDA-C includes #include <cuda.h> #include <cuda_runtime.h> #include <cuda_profiler_api.h> #include <helper_cuda.h> #include <helper_string.h> // Semaphore include #include <sys/types.h> #include <unistd.h> // throw error on equality #define ERR_EQ(X,Y) do { if ((X) == (Y)) { \ fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \ exit(-1);}} while(0) // throw error on difference #define ERR_NE(X,Y) do { if ((X) != (Y)) { \ fprintf(stderr,"Error in %s at %s:%d\n",__func__,__FILE__,__LINE__); \ exit(-1);}} while(0) #define ROUND_UP(N, BASE) \ (N + BASE - 1) / BASE // copy from source -> destination arrays __device__ void slow_kernel(int *dst, int *src, int n, int delay) { int id = blockDim.x * blockIdx.x + threadIdx.x; for (volatile int i = 0; i < delay; i++); if (id < n) { dst[id] = src[id]; } } // Named kernels for easier profiling __global__ void low_priority(int *dst, int *src, int n, int delay) { slow_kernel(dst, src, n, delay); } __global__ void high_priority(int *dst, int *src, int n, int delay) { slow_kernel(dst, src, n, delay); } // initialize memory void mem_init(int *buf, size_t n) { for (int i = 0; i < n; i++) { buf[i] = i; } } // Forward declarations void run_experiment(const int priority, const int size, const int iterations, const int delay); int main(int argc, char **argv) { cudaDeviceProp device_prop; int dev_id; fprintf(stderr, "Starting [%s]...\n", argv[0]); // set device dev_id = findCudaDevice(argc, (const char **) argv); checkCudaErrors(cudaGetDeviceProperties(&device_prop, dev_id)); if ((device_prop.major << 4) + device_prop.minor < 0x35) { fprintf(stderr, "%s requires Compute Capability of SM 3.5 or higher to run.\nexiting...\n", argv[0]); exit (EXIT_WAIVED); } // command line args const int delay = getCmdLineArgumentInt(argc, (const char **) argv, "delay"); const size_t size = getCmdLineArgumentInt(argc, (const char **) argv, "size"); const int priority = getCmdLineArgumentInt(argc, (const char **) argv, "priority"); const int iterations = getCmdLineArgumentInt(argc, (const char **) argv, "iterations"); // get the range of priorities available // [ greatest_priority, least_priority ] int priority_low; int priority_hi; checkCudaErrors(cudaDeviceGetStreamPriorityRange(&priority_low, &priority_hi)); if (size == 0 || iterations == 0) { fprintf(stderr, "Please provide --size=<int> --priority=<int> --iterations=<int> " "and --delay=<int> (optional) flags.\nexting...\n"); exit (EXIT_FAILURE); } else { fprintf(stderr, "Called with arguments size %zu, priority %d, iterations %d, and delay %d\n", size, priority, iterations, delay); } if (priority_hi > priority || priority_low < priority) { fprintf(stderr, "Priority must be within %d and %d.\nexting...\n", priority_hi, priority_low); exit (EXIT_FAILURE); } // Set kernel to run void (*kernel)(int*, int*, int, int) = priority ? &high_priority : &low_priority; // Create memory regions #define N_MEMORY_REGIONS 8 size_t n_regions = min(iterations, N_MEMORY_REGIONS); // initialise host data std::vector<int*> h_src(n_regions); for (int i = 0; i < n_regions; i++) { ERR_EQ(h_src[i] = (int *) malloc(sizeof(int) * size), NULL); mem_init(h_src[i], size); } // initialise device data std::vector<int*> h_dst(n_regions); for (int i = 0; i < n_regions; i++) { ERR_EQ(h_dst[i] = (int *) malloc(sizeof(int) * size), NULL); memset(h_dst[i], 0, size); } // copy source data -> device std::vector<int*> d_src(n_regions); for (int i = 0; i < n_regions; i++) { checkCudaErrors(cudaMalloc(&d_src[i], sizeof(int) * size)); checkCudaErrors(cudaMemcpy(d_src[i], h_src[i], sizeof(int) * size, cudaMemcpyHostToDevice)); } // allocate memory for memcopy destination std::vector<int*> d_dst(n_regions); for (int i = 0; i < n_regions; i++) { checkCudaErrors(cudaMalloc(&d_dst[i], sizeof(int) * size)); } cudaDeviceSynchronize(); // Create stream cudaStream_t stream; checkCudaErrors(cudaStreamCreateWithPriority(&stream, cudaStreamNonBlocking, priority)); // Compute number of threads and blocks int blockSize; int minGridSize; int gridSize; cudaOccupancyMaxPotentialBlockSize(&minGridSize, &blockSize, kernel); gridSize = (size + blockSize - 1) / blockSize; // launch the kernel iteration times. // each consecutive launch uses a different memory region cudaEvent_t start, end; checkCudaErrors(cudaEventCreate(&start)); checkCudaErrors(cudaEventCreate(&end)); checkCudaErrors(cudaEventRecord(start, stream)); for (int i = 0; i < iterations; i++) { for (int j = 0; j < n_regions && i < iterations; j++, i++) { kernel<<<gridSize, blockSize, 0, stream>>>(d_dst[j], d_src[j], size, delay); checkCudaErrors(cudaStreamSynchronize(stream)); } } checkCudaErrors(cudaEventRecord(end, stream)); checkCudaErrors(cudaEventSynchronize(end)); for (int i = 0; i < n_regions; i++) { checkCudaErrors(cudaMemcpy(h_dst[i], d_dst[i], size, cudaMemcpyDeviceToHost)); } // check results of the last computation for (int i = 0; i < n_regions; i++) { ERR_NE(memcmp(h_dst[i], h_src[i], size), 0); } // Clean up for (int i = 0; i < n_regions; i++) { checkCudaErrors (cudaFree(d_src[i])); checkCudaErrors(cudaFree(d_dst[i])); free(h_src[i]); free(h_dst[i]); } // Print out average time float ms; checkCudaErrors(cudaEventElapsedTime(&ms, start, end)); // size iterations ms average printf("%zu, %d, %f, %f\n", size, iterations, ms, ms / (float) iterations); exit (EXIT_SUCCESS); }
425d7af08de5a06e3cb3a0e054ae460b66db734b.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2005 - 2015 Marc de Kamps // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF // USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // If you use this software in work leading to a scientific publication, you should include a reference there to // the 'currently valid reference', which can be found at http://miind.sourceforge.net #include <iostream> #include <hip/hip_runtime.h> #include <cstdio> #include <cmath> #include "CudaEuler.cuh" #include "CSRAdapter.cuh" using namespace CudaTwoDLib; const fptype TOLERANCE = 1e-9; #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true) { if (code != hipSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void CSRAdapter::FillMatrixMaps(const std::vector<TwoDLib::CSRMatrix>& vecmat) { for(inttype m = 0; m < vecmat.size(); m++) { _nval[m] = vecmat[m].Val().size(); checkCudaErrors(hipMalloc((fptype**)&_val[m],_nval[m]*sizeof(fptype))); // dont't depend on Val() being of fptype std::vector<fptype> vecval; for (fptype val: vecmat[m].Val()) vecval.push_back(val); checkCudaErrors(hipMemcpy(_val[m],&vecval[0],sizeof(fptype)*_nval[m],hipMemcpyHostToDevice)); _nia[m] = vecmat[m].Ia().size(); checkCudaErrors(hipMalloc((inttype**)&_ia[m],_nia[m]*sizeof(inttype))); std::vector<inttype> vecia; for(inttype ia: vecmat[m].Ia()) vecia.push_back(ia); checkCudaErrors(hipMemcpy(_ia[m],&vecia[0],sizeof(inttype)*_nia[m],hipMemcpyHostToDevice)); _nja[m] = vecmat[m].Ja().size(); checkCudaErrors(hipMalloc((inttype**)&_ja[m],_nja[m]*sizeof(inttype))); std::vector<inttype> vecja; for(inttype ja: vecmat[m].Ja()) vecja.push_back(ja); checkCudaErrors(hipMemcpy(_ja[m],&vecja[0],sizeof(inttype)*_nja[m],hipMemcpyHostToDevice)); } } void CSRAdapter::InitializeStaticGridEfficacies(const std::vector<inttype>& vecindex,const std::vector<fptype>& efficacy) { _nr_grid_connections = efficacy.size(); for(inttype m = 0; m < efficacy.size(); m++) { checkCudaErrors(hipMalloc((fptype**)&_goes[m],_nr_rows[vecindex[m]]*sizeof(fptype))); checkCudaErrors(hipMalloc((fptype**)&_stays[m],_nr_rows[vecindex[m]]*sizeof(fptype))); checkCudaErrors(hipMalloc((inttype**)&_offset1s[m],_nr_rows[vecindex[m]]*sizeof(inttype))); checkCudaErrors(hipMalloc((inttype**)&_offset2s[m],_nr_rows[vecindex[m]]*sizeof(inttype))); inttype numBlocks = (_nr_rows[vecindex[m]] + _blockSize - 1)/_blockSize; hipLaunchKernelGGL(( CudaCalculateGridEfficacies), dim3(numBlocks),dim3(_blockSize), 0, 0, _nr_rows[vecindex[m]], efficacy[m], _cell_widths[vecindex[m]], _stays[m], _goes[m], _offset1s[m], _offset2s[m]); } } void CSRAdapter::InitializeStaticGridConductanceEfficacies(const std::vector<inttype>& vecindex, const std::vector<fptype>& efficacy, const std::vector<fptype>& rest_vs) { _nr_grid_connections = efficacy.size(); checkCudaErrors(hipMalloc((fptype**)&_cell_vs,_group.getGroup().Vs().size()*sizeof(fptype))); std::vector<fptype> vecval; for (double val: _group.getGroup().Vs()) vecval.push_back((fptype)val); checkCudaErrors(hipMemcpy(_cell_vs,&vecval[0],_group.getGroup().Vs().size()*sizeof(fptype),hipMemcpyHostToDevice)); for(inttype m = 0; m < efficacy.size(); m++) { checkCudaErrors(hipMalloc((fptype**)&_goes[m],_nr_rows[vecindex[m]]*sizeof(fptype))); checkCudaErrors(hipMalloc((fptype**)&_stays[m],_nr_rows[vecindex[m]]*sizeof(fptype))); checkCudaErrors(hipMalloc((inttype**)&_offset1s[m],_nr_rows[vecindex[m]]*sizeof(inttype))); checkCudaErrors(hipMalloc((inttype**)&_offset2s[m],_nr_rows[vecindex[m]]*sizeof(inttype))); inttype numBlocks = (_nr_rows[vecindex[m]] + _blockSize - 1)/_blockSize; hipLaunchKernelGGL(( CudaCalculateGridEfficaciesWithConductance), dim3(numBlocks),dim3(_blockSize), 0, 0, _nr_rows[vecindex[m]], efficacy[m], _cell_widths[vecindex[m]], _cell_vs, rest_vs[m], _stays[m], _goes[m], _offset1s[m], _offset2s[m],_offsets[vecindex[m]]); } } void CSRAdapter::DeleteMatrixMaps() { for(inttype m = 0; m < _nr_m; m++) { hipFree(_val[m]); hipFree(_ia[m]); hipFree(_ja[m]); } } inttype CSRAdapter::NumberIterations(const CudaOde2DSystemAdapter& group, fptype euler_timestep) const { fptype tstep = group._group.MeshObjects()[0].TimeStep(); for ( const auto& mesh: group._group.MeshObjects() ) if (fabs(tstep - mesh.TimeStep()) > TOLERANCE){ std::cerr << "Not all meshes in this group have the same time step. " << tstep << " " << mesh.TimeStep() << " " << tstep - mesh.TimeStep() << std::endl; exit(0); } inttype n_steps = static_cast<inttype>(std::round(tstep/euler_timestep)); return n_steps; } void CSRAdapter::InspectMass(inttype i) { std::vector<fptype> hostvec(_group._n); checkCudaErrors(hipMemcpy(&hostvec[0],_group._mass,sizeof(fptype)*_group._n,hipMemcpyDeviceToHost)); } CSRAdapter::CSRAdapter(CudaOde2DSystemAdapter& group, const std::vector<TwoDLib::CSRMatrix>& vecmat, inttype nr_connections, fptype euler_timestep, const std::vector<inttype>& vecmat_indexes,const std::vector<inttype>& grid_transforms): _group(group), _euler_timestep(euler_timestep), _nr_iterations(NumberIterations(group,euler_timestep)), _nr_m(vecmat.size()), _nr_streams(vecmat.size()), _vecmats(vecmat_indexes), _grid_transforms(grid_transforms), _nval(std::vector<inttype>(vecmat.size())), _val(std::vector<fptype*>(vecmat.size())), _nia(std::vector<inttype>(vecmat.size())), _ia(std::vector<inttype*>(vecmat.size())), _nja(std::vector<inttype>(vecmat.size())), _ja(std::vector<inttype*>(vecmat.size())), _offsets(this->Offsets(vecmat)), _nr_rows(this->NrRows(vecmat)), _cell_widths(this->CellWidths(vecmat)), _goes(std::vector<fptype*>(grid_transforms.size())), _stays(std::vector<fptype*>(grid_transforms.size())), _offset1s(std::vector<int*>(grid_transforms.size())), _offset2s(std::vector<int*>(grid_transforms.size())), _blockSize(256), _numBlocks( (_group._n + _blockSize - 1) / _blockSize) { this->FillMatrixMaps(vecmat); this->FillDerivative(); this->CreateStreams(); } CSRAdapter::CSRAdapter(CudaOde2DSystemAdapter& group, const std::vector<TwoDLib::CSRMatrix>& vecmat, fptype euler_timestep): CSRAdapter(group,vecmat,vecmat.size(),euler_timestep, std::vector<inttype>(),std::vector<inttype>()){ for(unsigned int i=0; i<vecmat.size(); i++) _vecmats.push_back(i); } CSRAdapter::~CSRAdapter() { this->DeleteMatrixMaps(); this->DeleteDerivative(); this->DeleteStreams(); } void CSRAdapter::CreateStreams() { _streams = (hipStream_t *)malloc(_nr_streams*sizeof(hipStream_t)); for(int i = 0; i < _nr_streams; i++) hipStreamCreate(&_streams[i]); } void CSRAdapter::DeleteStreams() { free(_streams); } void CSRAdapter::FillDerivative() { checkCudaErrors(hipMalloc((fptype**)&_dydt,_group._n*sizeof(fptype))); } void CSRAdapter::DeleteDerivative() { hipFree(_dydt); } void CSRAdapter::ClearDerivative() { inttype n=_group._n; hipLaunchKernelGGL(( CudaClearDerivative), dim3(_numBlocks),dim3(_blockSize), 0, 0, n,_dydt,_group._mass); } std::vector<inttype> CSRAdapter::NrRows(const std::vector<TwoDLib::CSRMatrix>& vecmat) const { std::vector<inttype> vecret; for (inttype m = 0; m < vecmat.size(); m++) vecret.push_back(vecmat[m].NrRows()); return vecret; } std::vector<fptype> CSRAdapter::CellWidths(const std::vector<TwoDLib::CSRMatrix>& vecmat) const { std::vector<fptype> vecret; for (inttype m = 0; m < _grid_transforms.size(); m++){ vecret.push_back(_group.getGroup().MeshObjects()[_grid_transforms[m]].getCellWidth()); } return vecret; } std::vector<inttype> CSRAdapter::Offsets(const std::vector<TwoDLib::CSRMatrix>& vecmat) const { std::vector<inttype> vecret; for (inttype m = 0; m < vecmat.size(); m++) vecret.push_back(vecmat[m].Offset()); return vecret; } void CSRAdapter::CalculateDerivative(const std::vector<fptype>& vecrates) { for(inttype m : _vecmats) { // be careful to use this block size inttype numBlocks = (_nr_rows[m] + _blockSize - 1)/_blockSize; hipLaunchKernelGGL(( CudaCalculateDerivative), dim3(numBlocks),dim3(_blockSize), 0, 0, _nr_rows[m],vecrates[m],_dydt,_group._mass,_val[m],_ia[m],_ja[m],_group._map,_offsets[m]); } } void CSRAdapter::CalculateGridDerivative(const std::vector<inttype>& vecindex, const std::vector<fptype>& vecrates, const std::vector<fptype>& vecstays, const std::vector<fptype>& vecgoes, const std::vector<int>& vecoff1s, const std::vector<int>& vecoff2s) { for(inttype m = 0; m < vecindex.size(); m++) { // be careful to use this block size inttype numBlocks = (_nr_rows[vecindex[m]] + _blockSize - 1)/_blockSize; hipLaunchKernelGGL(( CudaCalculateGridDerivative), dim3(numBlocks),dim3(_blockSize),0,_streams[vecindex[m]], _nr_rows[vecindex[m]],vecrates[m],vecstays[m],vecgoes[m],vecoff1s[m],vecoff2s[m],_dydt,_group._mass,_offsets[m]); } hipDeviceSynchronize(); } void CSRAdapter::CalculateMeshGridDerivative(const std::vector<inttype>& vecindex, const std::vector<fptype>& vecrates, const std::vector<fptype>& vecstays, const std::vector<fptype>& vecgoes, const std::vector<int>& vecoff1s, const std::vector<int>& vecoff2s) { for(inttype m = 0; m < vecstays.size(); m++) { // be careful to use this blo void CalculateMeshGridDerivativeForward(const std::vector<inttype>& vecindex,ck size inttype numBlocks = (_nr_rows[vecindex[m]] + _blockSize - 1)/_blockSize; hipLaunchKernelGGL(( CudaCalculateGridDerivative), dim3(numBlocks),dim3(_blockSize),0,_streams[vecindex[m]], _nr_rows[vecindex[m]],vecrates[m],vecstays[m],vecgoes[m],vecoff1s[m],vecoff2s[m],_dydt,_group._mass,_offsets[vecindex[m]]); } for(int n=vecstays.size(); n<vecrates.size(); n++) { inttype mat_index = _grid_transforms.size() + (n - vecstays.size()); // be careful to use this block size inttype numBlocks = (_nr_rows[mat_index] + _blockSize - 1)/_blockSize; hipLaunchKernelGGL(( CudaCalculateDerivative), dim3(numBlocks),dim3(_blockSize),0,_streams[vecindex[n]], _nr_rows[mat_index],vecrates[n],_dydt,_group._mass,_val[mat_index],_ia[mat_index],_ja[mat_index],_group._map,_offsets[mat_index]); } hipDeviceSynchronize(); } void CSRAdapter::CalculateMeshGridDerivativeWithEfficacy(const std::vector<inttype>& vecindex, const std::vector<fptype>& vecrates) { for(inttype m = 0; m < _nr_grid_connections; m++) { // be careful to use this block size inttype numBlocks = (_nr_rows[vecindex[m]] + _blockSize - 1)/_blockSize; hipLaunchKernelGGL(( CudaCalculateGridDerivativeWithEfficacy), dim3(numBlocks),dim3(_blockSize),0,_streams[vecindex[m]], _nr_rows[vecindex[m]], vecrates[m],_stays[m], _goes[m], _offset1s[m], _offset2s[m],_dydt,_group._mass,_offsets[vecindex[m]]); } for(int n=_nr_grid_connections; n<vecrates.size(); n++) { inttype mat_index = _grid_transforms.size() + (n - _nr_grid_connections); // be careful to use this block size inttype numBlocks = (_nr_rows[mat_index] + _blockSize - 1)/_blockSize; hipLaunchKernelGGL(( CudaCalculateDerivative), dim3(numBlocks),dim3(_blockSize),0,_streams[vecindex[n]], _nr_rows[mat_index],vecrates[n],_dydt,_group._mass,_val[mat_index],_ia[mat_index],_ja[mat_index],_group._map,_offsets[mat_index]); } hipDeviceSynchronize(); } void CSRAdapter::SingleTransformStep() { for(inttype m : _grid_transforms) { // be careful to use this block size inttype numBlocks = (_nr_rows[m] + _blockSize - 1)/_blockSize; hipLaunchKernelGGL(( CudaSingleTransformStep), dim3(numBlocks),dim3(_blockSize),0,_streams[m], _nr_rows[m],_dydt,_group._mass,_val[m],_ia[m],_ja[m],_group._map,_offsets[m]); } } void CSRAdapter::AddDerivative() { hipLaunchKernelGGL(( EulerStep), dim3(_numBlocks),dim3(_blockSize), 0, 0, _group._n,_dydt,_group._mass,_euler_timestep); } void CSRAdapter::AddDerivativeFull() { hipLaunchKernelGGL(( EulerStep), dim3(_numBlocks),dim3(_blockSize), 0, 0, _group._n,_dydt,_group._mass, 1.0); }
425d7af08de5a06e3cb3a0e054ae460b66db734b.cu
// Copyright (c) 2005 - 2015 Marc de Kamps // All rights reserved. // // Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software // without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY // DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF // USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // // If you use this software in work leading to a scientific publication, you should include a reference there to // the 'currently valid reference', which can be found at http://miind.sourceforge.net #include <iostream> #include <cuda_runtime.h> #include <cstdio> #include <cmath> #include "CudaEuler.cuh" #include "CSRAdapter.cuh" using namespace CudaTwoDLib; const fptype TOLERANCE = 1e-9; #define checkCudaErrors(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void CSRAdapter::FillMatrixMaps(const std::vector<TwoDLib::CSRMatrix>& vecmat) { for(inttype m = 0; m < vecmat.size(); m++) { _nval[m] = vecmat[m].Val().size(); checkCudaErrors(cudaMalloc((fptype**)&_val[m],_nval[m]*sizeof(fptype))); // dont't depend on Val() being of fptype std::vector<fptype> vecval; for (fptype val: vecmat[m].Val()) vecval.push_back(val); checkCudaErrors(cudaMemcpy(_val[m],&vecval[0],sizeof(fptype)*_nval[m],cudaMemcpyHostToDevice)); _nia[m] = vecmat[m].Ia().size(); checkCudaErrors(cudaMalloc((inttype**)&_ia[m],_nia[m]*sizeof(inttype))); std::vector<inttype> vecia; for(inttype ia: vecmat[m].Ia()) vecia.push_back(ia); checkCudaErrors(cudaMemcpy(_ia[m],&vecia[0],sizeof(inttype)*_nia[m],cudaMemcpyHostToDevice)); _nja[m] = vecmat[m].Ja().size(); checkCudaErrors(cudaMalloc((inttype**)&_ja[m],_nja[m]*sizeof(inttype))); std::vector<inttype> vecja; for(inttype ja: vecmat[m].Ja()) vecja.push_back(ja); checkCudaErrors(cudaMemcpy(_ja[m],&vecja[0],sizeof(inttype)*_nja[m],cudaMemcpyHostToDevice)); } } void CSRAdapter::InitializeStaticGridEfficacies(const std::vector<inttype>& vecindex,const std::vector<fptype>& efficacy) { _nr_grid_connections = efficacy.size(); for(inttype m = 0; m < efficacy.size(); m++) { checkCudaErrors(cudaMalloc((fptype**)&_goes[m],_nr_rows[vecindex[m]]*sizeof(fptype))); checkCudaErrors(cudaMalloc((fptype**)&_stays[m],_nr_rows[vecindex[m]]*sizeof(fptype))); checkCudaErrors(cudaMalloc((inttype**)&_offset1s[m],_nr_rows[vecindex[m]]*sizeof(inttype))); checkCudaErrors(cudaMalloc((inttype**)&_offset2s[m],_nr_rows[vecindex[m]]*sizeof(inttype))); inttype numBlocks = (_nr_rows[vecindex[m]] + _blockSize - 1)/_blockSize; CudaCalculateGridEfficacies<<<numBlocks,_blockSize>>>(_nr_rows[vecindex[m]], efficacy[m], _cell_widths[vecindex[m]], _stays[m], _goes[m], _offset1s[m], _offset2s[m]); } } void CSRAdapter::InitializeStaticGridConductanceEfficacies(const std::vector<inttype>& vecindex, const std::vector<fptype>& efficacy, const std::vector<fptype>& rest_vs) { _nr_grid_connections = efficacy.size(); checkCudaErrors(cudaMalloc((fptype**)&_cell_vs,_group.getGroup().Vs().size()*sizeof(fptype))); std::vector<fptype> vecval; for (double val: _group.getGroup().Vs()) vecval.push_back((fptype)val); checkCudaErrors(cudaMemcpy(_cell_vs,&vecval[0],_group.getGroup().Vs().size()*sizeof(fptype),cudaMemcpyHostToDevice)); for(inttype m = 0; m < efficacy.size(); m++) { checkCudaErrors(cudaMalloc((fptype**)&_goes[m],_nr_rows[vecindex[m]]*sizeof(fptype))); checkCudaErrors(cudaMalloc((fptype**)&_stays[m],_nr_rows[vecindex[m]]*sizeof(fptype))); checkCudaErrors(cudaMalloc((inttype**)&_offset1s[m],_nr_rows[vecindex[m]]*sizeof(inttype))); checkCudaErrors(cudaMalloc((inttype**)&_offset2s[m],_nr_rows[vecindex[m]]*sizeof(inttype))); inttype numBlocks = (_nr_rows[vecindex[m]] + _blockSize - 1)/_blockSize; CudaCalculateGridEfficaciesWithConductance<<<numBlocks,_blockSize>>>(_nr_rows[vecindex[m]], efficacy[m], _cell_widths[vecindex[m]], _cell_vs, rest_vs[m], _stays[m], _goes[m], _offset1s[m], _offset2s[m],_offsets[vecindex[m]]); } } void CSRAdapter::DeleteMatrixMaps() { for(inttype m = 0; m < _nr_m; m++) { cudaFree(_val[m]); cudaFree(_ia[m]); cudaFree(_ja[m]); } } inttype CSRAdapter::NumberIterations(const CudaOde2DSystemAdapter& group, fptype euler_timestep) const { fptype tstep = group._group.MeshObjects()[0].TimeStep(); for ( const auto& mesh: group._group.MeshObjects() ) if (fabs(tstep - mesh.TimeStep()) > TOLERANCE){ std::cerr << "Not all meshes in this group have the same time step. " << tstep << " " << mesh.TimeStep() << " " << tstep - mesh.TimeStep() << std::endl; exit(0); } inttype n_steps = static_cast<inttype>(std::round(tstep/euler_timestep)); return n_steps; } void CSRAdapter::InspectMass(inttype i) { std::vector<fptype> hostvec(_group._n); checkCudaErrors(cudaMemcpy(&hostvec[0],_group._mass,sizeof(fptype)*_group._n,cudaMemcpyDeviceToHost)); } CSRAdapter::CSRAdapter(CudaOde2DSystemAdapter& group, const std::vector<TwoDLib::CSRMatrix>& vecmat, inttype nr_connections, fptype euler_timestep, const std::vector<inttype>& vecmat_indexes,const std::vector<inttype>& grid_transforms): _group(group), _euler_timestep(euler_timestep), _nr_iterations(NumberIterations(group,euler_timestep)), _nr_m(vecmat.size()), _nr_streams(vecmat.size()), _vecmats(vecmat_indexes), _grid_transforms(grid_transforms), _nval(std::vector<inttype>(vecmat.size())), _val(std::vector<fptype*>(vecmat.size())), _nia(std::vector<inttype>(vecmat.size())), _ia(std::vector<inttype*>(vecmat.size())), _nja(std::vector<inttype>(vecmat.size())), _ja(std::vector<inttype*>(vecmat.size())), _offsets(this->Offsets(vecmat)), _nr_rows(this->NrRows(vecmat)), _cell_widths(this->CellWidths(vecmat)), _goes(std::vector<fptype*>(grid_transforms.size())), _stays(std::vector<fptype*>(grid_transforms.size())), _offset1s(std::vector<int*>(grid_transforms.size())), _offset2s(std::vector<int*>(grid_transforms.size())), _blockSize(256), _numBlocks( (_group._n + _blockSize - 1) / _blockSize) { this->FillMatrixMaps(vecmat); this->FillDerivative(); this->CreateStreams(); } CSRAdapter::CSRAdapter(CudaOde2DSystemAdapter& group, const std::vector<TwoDLib::CSRMatrix>& vecmat, fptype euler_timestep): CSRAdapter(group,vecmat,vecmat.size(),euler_timestep, std::vector<inttype>(),std::vector<inttype>()){ for(unsigned int i=0; i<vecmat.size(); i++) _vecmats.push_back(i); } CSRAdapter::~CSRAdapter() { this->DeleteMatrixMaps(); this->DeleteDerivative(); this->DeleteStreams(); } void CSRAdapter::CreateStreams() { _streams = (cudaStream_t *)malloc(_nr_streams*sizeof(cudaStream_t)); for(int i = 0; i < _nr_streams; i++) cudaStreamCreate(&_streams[i]); } void CSRAdapter::DeleteStreams() { free(_streams); } void CSRAdapter::FillDerivative() { checkCudaErrors(cudaMalloc((fptype**)&_dydt,_group._n*sizeof(fptype))); } void CSRAdapter::DeleteDerivative() { cudaFree(_dydt); } void CSRAdapter::ClearDerivative() { inttype n=_group._n; CudaClearDerivative<<<_numBlocks,_blockSize>>>(n,_dydt,_group._mass); } std::vector<inttype> CSRAdapter::NrRows(const std::vector<TwoDLib::CSRMatrix>& vecmat) const { std::vector<inttype> vecret; for (inttype m = 0; m < vecmat.size(); m++) vecret.push_back(vecmat[m].NrRows()); return vecret; } std::vector<fptype> CSRAdapter::CellWidths(const std::vector<TwoDLib::CSRMatrix>& vecmat) const { std::vector<fptype> vecret; for (inttype m = 0; m < _grid_transforms.size(); m++){ vecret.push_back(_group.getGroup().MeshObjects()[_grid_transforms[m]].getCellWidth()); } return vecret; } std::vector<inttype> CSRAdapter::Offsets(const std::vector<TwoDLib::CSRMatrix>& vecmat) const { std::vector<inttype> vecret; for (inttype m = 0; m < vecmat.size(); m++) vecret.push_back(vecmat[m].Offset()); return vecret; } void CSRAdapter::CalculateDerivative(const std::vector<fptype>& vecrates) { for(inttype m : _vecmats) { // be careful to use this block size inttype numBlocks = (_nr_rows[m] + _blockSize - 1)/_blockSize; CudaCalculateDerivative<<<numBlocks,_blockSize>>>(_nr_rows[m],vecrates[m],_dydt,_group._mass,_val[m],_ia[m],_ja[m],_group._map,_offsets[m]); } } void CSRAdapter::CalculateGridDerivative(const std::vector<inttype>& vecindex, const std::vector<fptype>& vecrates, const std::vector<fptype>& vecstays, const std::vector<fptype>& vecgoes, const std::vector<int>& vecoff1s, const std::vector<int>& vecoff2s) { for(inttype m = 0; m < vecindex.size(); m++) { // be careful to use this block size inttype numBlocks = (_nr_rows[vecindex[m]] + _blockSize - 1)/_blockSize; CudaCalculateGridDerivative<<<numBlocks,_blockSize,0,_streams[vecindex[m]]>>>(_nr_rows[vecindex[m]],vecrates[m],vecstays[m],vecgoes[m],vecoff1s[m],vecoff2s[m],_dydt,_group._mass,_offsets[m]); } cudaDeviceSynchronize(); } void CSRAdapter::CalculateMeshGridDerivative(const std::vector<inttype>& vecindex, const std::vector<fptype>& vecrates, const std::vector<fptype>& vecstays, const std::vector<fptype>& vecgoes, const std::vector<int>& vecoff1s, const std::vector<int>& vecoff2s) { for(inttype m = 0; m < vecstays.size(); m++) { // be careful to use this blo void CalculateMeshGridDerivativeForward(const std::vector<inttype>& vecindex,ck size inttype numBlocks = (_nr_rows[vecindex[m]] + _blockSize - 1)/_blockSize; CudaCalculateGridDerivative<<<numBlocks,_blockSize,0,_streams[vecindex[m]]>>>(_nr_rows[vecindex[m]],vecrates[m],vecstays[m],vecgoes[m],vecoff1s[m],vecoff2s[m],_dydt,_group._mass,_offsets[vecindex[m]]); } for(int n=vecstays.size(); n<vecrates.size(); n++) { inttype mat_index = _grid_transforms.size() + (n - vecstays.size()); // be careful to use this block size inttype numBlocks = (_nr_rows[mat_index] + _blockSize - 1)/_blockSize; CudaCalculateDerivative<<<numBlocks,_blockSize,0,_streams[vecindex[n]]>>>(_nr_rows[mat_index],vecrates[n],_dydt,_group._mass,_val[mat_index],_ia[mat_index],_ja[mat_index],_group._map,_offsets[mat_index]); } cudaDeviceSynchronize(); } void CSRAdapter::CalculateMeshGridDerivativeWithEfficacy(const std::vector<inttype>& vecindex, const std::vector<fptype>& vecrates) { for(inttype m = 0; m < _nr_grid_connections; m++) { // be careful to use this block size inttype numBlocks = (_nr_rows[vecindex[m]] + _blockSize - 1)/_blockSize; CudaCalculateGridDerivativeWithEfficacy<<<numBlocks,_blockSize,0,_streams[vecindex[m]]>>>(_nr_rows[vecindex[m]], vecrates[m],_stays[m], _goes[m], _offset1s[m], _offset2s[m],_dydt,_group._mass,_offsets[vecindex[m]]); } for(int n=_nr_grid_connections; n<vecrates.size(); n++) { inttype mat_index = _grid_transforms.size() + (n - _nr_grid_connections); // be careful to use this block size inttype numBlocks = (_nr_rows[mat_index] + _blockSize - 1)/_blockSize; CudaCalculateDerivative<<<numBlocks,_blockSize,0,_streams[vecindex[n]]>>>(_nr_rows[mat_index],vecrates[n],_dydt,_group._mass,_val[mat_index],_ia[mat_index],_ja[mat_index],_group._map,_offsets[mat_index]); } cudaDeviceSynchronize(); } void CSRAdapter::SingleTransformStep() { for(inttype m : _grid_transforms) { // be careful to use this block size inttype numBlocks = (_nr_rows[m] + _blockSize - 1)/_blockSize; CudaSingleTransformStep<<<numBlocks,_blockSize,0,_streams[m]>>>(_nr_rows[m],_dydt,_group._mass,_val[m],_ia[m],_ja[m],_group._map,_offsets[m]); } } void CSRAdapter::AddDerivative() { EulerStep<<<_numBlocks,_blockSize>>>(_group._n,_dydt,_group._mass,_euler_timestep); } void CSRAdapter::AddDerivativeFull() { EulerStep<<<_numBlocks,_blockSize>>>(_group._n,_dydt,_group._mass, 1.0); }
099e610649f2dbc90d47727f8df4e7089ac7e416.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> int main() { int n; int i, j, k; printf("Please enter the size of matrix: \n"); scanf("%d", &n); int *a, *b, *c; hipHostMalloc((void**)&a, sizeof(int) * n * n); hipHostMalloc((void**)&b, sizeof(int) * n * n); hipHostMalloc((void**)&c, sizeof(int) * n * n); for (i = 0; i < n; i++){ for (j = 0; j < n; j++){ a[i * n + j] = round(rand() % 2); b[i * n + j] = round(rand() % 2); } } printf("Start calculating...\n"); clock_t start_time = clock(); for (i = 0; i < n; i++){ for (j = 0; j < n; j++){ int tmp = 0; for (k = 0; k < n; k++) tmp += a[i * n + k] * b[k * n + j]; c[i * n + j] = tmp; } } clock_t end_time = clock(); printf("Time consuming of calculating %dx%d matrix using CPU is %f ms.\n", n, n, static_cast<double>(end_time - start_time)/CLOCKS_PER_SEC*1000); hipHostFree(a); hipHostFree(b); hipHostFree(c); return 0; }
099e610649f2dbc90d47727f8df4e7089ac7e416.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <time.h> int main() { int n; int i, j, k; printf("Please enter the size of matrix: \n"); scanf("%d", &n); int *a, *b, *c; cudaMallocHost((void**)&a, sizeof(int) * n * n); cudaMallocHost((void**)&b, sizeof(int) * n * n); cudaMallocHost((void**)&c, sizeof(int) * n * n); for (i = 0; i < n; i++){ for (j = 0; j < n; j++){ a[i * n + j] = round(rand() % 2); b[i * n + j] = round(rand() % 2); } } printf("Start calculating...\n"); clock_t start_time = clock(); for (i = 0; i < n; i++){ for (j = 0; j < n; j++){ int tmp = 0; for (k = 0; k < n; k++) tmp += a[i * n + k] * b[k * n + j]; c[i * n + j] = tmp; } } clock_t end_time = clock(); printf("Time consuming of calculating %dx%d matrix using CPU is %f ms.\n", n, n, static_cast<double>(end_time - start_time)/CLOCKS_PER_SEC*1000); cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); return 0; }
882844634b2083db6c4aa7013e9548ba39de1eb8.hip
// !!! This is a file automatically generated by hipify!!! /* Implements the sequential cusp vectors. */ #include <petscconf.h> PETSC_CUDA_EXTERN_C_BEGIN #include <petsc-private/vecimpl.h> /*I "petscvec.h" I*/ #include <../src/vec/vec/impls/dvecimpl.h> PETSC_CUDA_EXTERN_C_END #include <../src/vec/vec/impls/seq/seqcusp/cuspvecimpl.h> #include <hip/hip_runtime.h> #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheckHost" /* Allocates space for the vector array on the Host if it does not exist. Does NOT change the PetscCUSPFlag for the vector Does NOT zero the CUSP array */ PetscErrorCode VecCUSPAllocateCheckHost(Vec v) { PetscErrorCode ierr; hipError_t err; PetscScalar *array; Vec_Seq *s; PetscInt n = v->map->n; PetscFunctionBegin; s = (Vec_Seq*)v->data; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (s->array == 0) { ierr = PetscMalloc(n*sizeof(PetscScalar),&array);CHKERRQ(ierr); ierr = PetscLogObjectMemory((PetscObject)v,n*sizeof(PetscScalar));CHKERRQ(ierr); s->array = array; s->array_allocated = array; err = hipHostRegister(s->array, n*sizeof(PetscScalar),hipHostRegisterMapped);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_TRUE; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheck" /* Allocates space for the vector array on the GPU if it does not exist. Does NOT change the PetscCUSPFlag for the vector Does NOT zero the CUSP array */ PetscErrorCode VecCUSPAllocateCheck(Vec v) { hipError_t err; hipStream_t stream; Vec_Seq *s = (Vec_Seq*)v->data; PetscFunctionBegin; // First allocate memory on the GPU if needed if (!v->spptr) { try { v->spptr = new Vec_CUSP; ((Vec_CUSP*)v->spptr)->GPUarray = new CUSPARRAY; ((Vec_CUSP*)v->spptr)->GPUarray->resize((PetscBLASInt)v->map->n); err = hipStreamCreate(&stream);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->stream = stream; ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_FALSE; /* If the array is already allocated, one can register it as (page-locked) mapped. This can substantially accelerate data transfer across the PCI Express */ if (s->array) { err = hipHostRegister(s->array, v->map->n*sizeof(PetscScalar),hipHostRegisterMapped);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_TRUE; } v->ops->destroy = VecDestroy_SeqCUSP; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPU" /* Copies a vector from the CPU to the GPU unless we already have an up-to-date copy on the GPU */ PetscErrorCode VecCUSPCopyToGPU(Vec v) { PetscErrorCode ierr; hipError_t err; Vec_CUSP *veccusp; CUSPARRAY *varray; hipStream_t stream; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_CPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyToGPU,v,0,0,0);CHKERRQ(ierr); try { veccusp=(Vec_CUSP*)v->spptr; varray=veccusp->GPUarray; stream=veccusp->stream; err = hipMemcpyAsync(varray->data().get(), *(PetscScalar**)v->data, v->map->n*sizeof(PetscScalar), hipMemcpyHostToDevice, stream);CHKERRCUSP(err); err = hipStreamSynchronize(stream);CHKERRCUSP(err); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogEventEnd(VEC_CUSPCopyToGPU,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPUSome" static PetscErrorCode VecCUSPCopyToGPUSome(Vec v, PetscCUSPIndices ci) { CUSPARRAY *varray; PetscErrorCode ierr; hipError_t err; PetscScalar *cpuPtr, *gpuPtr; hipStream_t stream; Vec_Seq *s; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_CPU) { stream=((Vec_CUSP*)v->spptr)->stream; s = (Vec_Seq*)v->data; ierr = PetscLogEventBegin(VEC_CUSPCopyToGPUSome,v,0,0,0);CHKERRQ(ierr); varray = ((Vec_CUSP*)v->spptr)->GPUarray; gpuPtr = varray->data().get() + ci->recvLowestIndex; cpuPtr = s->array + ci->recvLowestIndex; /* Note : this code copies the smallest contiguous chunk of data containing ALL of the indices */ err = hipMemcpyAsync(gpuPtr, cpuPtr, ci->nr*sizeof(PetscScalar), hipMemcpyHostToDevice, stream);CHKERRCUSP(err); err = hipStreamSynchronize(stream);CHKERRCUSP(err); #if 0 Vec_Seq *s; s = (Vec_Seq*)v->data; CUSPINTARRAYCPU *indicesCPU=&ci->recvIndicesCPU; CUSPINTARRAYGPU *indicesGPU=&ci->recvIndicesGPU; thrust::copy(thrust::make_permutation_iterator(s->array,indicesCPU->begin()), thrust::make_permutation_iterator(s->array,indicesCPU->end()), thrust::make_permutation_iterator(varray->begin(),indicesGPU->begin())); #endif // Set the buffer states v->valid_GPU_array = PETSC_CUSP_BOTH; ierr = PetscLogEventEnd(VEC_CUSPCopyToGPUSome,v,0,0,0);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPU" /* VecCUSPCopyFromGPU - Copies a vector from the GPU to the CPU unless we already have an up-to-date copy on the CPU */ PetscErrorCode VecCUSPCopyFromGPU(Vec v) { PetscErrorCode ierr; hipError_t err; Vec_CUSP *veccusp; CUSPARRAY *varray; hipStream_t stream; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_GPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyFromGPU,v,0,0,0);CHKERRQ(ierr); try { veccusp=(Vec_CUSP*)v->spptr; varray=veccusp->GPUarray; stream=veccusp->stream; err = hipMemcpyAsync(*(PetscScalar**)v->data, varray->data().get(), v->map->n*sizeof(PetscScalar), hipMemcpyDeviceToHost, stream);CHKERRCUSP(err); err = hipStreamSynchronize(stream);CHKERRCUSP(err); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogEventEnd(VEC_CUSPCopyFromGPU,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPUSome" /* Note that this function only copies *some* of the values up from the GPU to CPU, which means that we need recombine the data at some point before using any of the standard functions. We could add another few flag-types to keep track of this, or treat things like VecGetArray VecRestoreArray where you have to always call in pairs */ PetscErrorCode VecCUSPCopyFromGPUSome(Vec v, PetscCUSPIndices ci) { CUSPARRAY *varray; PetscErrorCode ierr; hipError_t err; PetscScalar *cpuPtr, *gpuPtr; hipStream_t stream; Vec_Seq *s; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_GPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyFromGPUSome,v,0,0,0);CHKERRQ(ierr); stream=((Vec_CUSP*)v->spptr)->stream; varray=((Vec_CUSP*)v->spptr)->GPUarray; s = (Vec_Seq*)v->data; gpuPtr = varray->data().get() + ci->sendLowestIndex; cpuPtr = s->array + ci->sendLowestIndex; /* Note : this code copies the smallest contiguous chunk of data containing ALL of the indices */ err = hipMemcpyAsync(cpuPtr, gpuPtr, ci->ns*sizeof(PetscScalar), hipMemcpyDeviceToHost, stream);CHKERRCUSP(err); err = hipStreamSynchronize(stream);CHKERRCUSP(err); #if 0 Vec_Seq *s; s = (Vec_Seq*)v->data; CUSPINTARRAYCPU *indicesCPU=&ci->sendIndicesCPU; CUSPINTARRAYGPU *indicesGPU=&ci->sendIndicesGPU; thrust::copy(thrust::make_permutation_iterator(varray->begin(),indicesGPU->begin()), thrust::make_permutation_iterator(varray->begin(),indicesGPU->end()), thrust::make_permutation_iterator(s->array,indicesCPU->begin())); #endif ierr = VecCUSPRestoreArrayRead(v,&varray);CHKERRQ(ierr); ierr = PetscLogEventEnd(VEC_CUSPCopyFromGPUSome,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCopy_SeqCUSP_Private" static PetscErrorCode VecCopy_SeqCUSP_Private(Vec xin,Vec yin) { PetscScalar *ya; const PetscScalar *xa; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(xin); ierr = VecCUSPAllocateCheckHost(yin); if (xin != yin) { ierr = VecGetArrayRead(xin,&xa);CHKERRQ(ierr); ierr = VecGetArray(yin,&ya);CHKERRQ(ierr); ierr = PetscMemcpy(ya,xa,xin->map->n*sizeof(PetscScalar));CHKERRQ(ierr); ierr = VecRestoreArrayRead(xin,&xa);CHKERRQ(ierr); ierr = VecRestoreArray(yin,&ya);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecSetRandom_SeqCUSP_Private" static PetscErrorCode VecSetRandom_SeqCUSP_Private(Vec xin,PetscRandom r) { PetscErrorCode ierr; PetscInt n = xin->map->n,i; PetscScalar *xx; PetscFunctionBegin; ierr = VecGetArray(xin,&xx);CHKERRQ(ierr); for (i=0; i<n; i++) {ierr = PetscRandomGetValue(r,&xx[i]);CHKERRQ(ierr);} ierr = VecRestoreArray(xin,&xx);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDestroy_SeqCUSP_Private" static PetscErrorCode VecDestroy_SeqCUSP_Private(Vec v) { Vec_Seq *vs = (Vec_Seq*)v->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscObjectAMSViewOff(v);CHKERRQ(ierr); #if defined(PETSC_USE_LOG) PetscLogObjectState((PetscObject)v,"Length=%D",v->map->n); #endif if (vs->array_allocated) ierr = PetscFree(vs->array_allocated);CHKERRQ(ierr); ierr = PetscFree(vs);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecResetArray_SeqCUSP_Private" static PetscErrorCode VecResetArray_SeqCUSP_Private(Vec vin) { Vec_Seq *v = (Vec_Seq*)vin->data; PetscFunctionBegin; v->array = v->unplacedarray; v->unplacedarray = 0; PetscFunctionReturn(0); } /* these following 3 public versions are necessary because we use CUSP in the regular PETSc code and these need to be called from plain C code. */ #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheck_Public" PetscErrorCode VecCUSPAllocateCheck_Public(Vec v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPU_Public" PetscErrorCode VecCUSPCopyToGPU_Public(Vec v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "PetscCUSPIndicesCreate" /* PetscCUSPIndicesCreate - creates the data structure needed by VecCUSPCopyToGPUSome_Public() Input Parameters: + n - the number of indices - indices - integer list of indices Output Parameter: . ci - the CUSPIndices object suitable to pass to VecCUSPCopyToGPUSome_Public() .seealso: PetscCUSPIndicesDestroy(), VecCUSPCopyToGPUSome_Public() */ PetscErrorCode PetscCUSPIndicesCreate(PetscInt ns,PetscInt *sendIndices,PetscInt nr,PetscInt *recvIndices,PetscCUSPIndices *ci) { PetscCUSPIndices cci; PetscFunctionBegin; cci = new struct _p_PetscCUSPIndices; /* this calculation assumes that the input indices are sorted */ cci->ns = sendIndices[ns-1]-sendIndices[0]+1; cci->sendLowestIndex = sendIndices[0]; cci->nr = recvIndices[nr-1]-recvIndices[0]+1; cci->recvLowestIndex = recvIndices[0]; *ci = cci; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "PetscCUSPIndicesDestroy" /* PetscCUSPIndicesDestroy - destroys the data structure needed by VecCUSPCopyToGPUSome_Public() Input Parameters: . ci - the CUSPIndices object suitable to pass to VecCUSPCopyToGPUSome_Public() .seealso: PetscCUSPIndicesCreate(), VecCUSPCopyToGPUSome_Public() */ PetscErrorCode PetscCUSPIndicesDestroy(PetscCUSPIndices *ci) { PetscFunctionBegin; if (!(*ci)) PetscFunctionReturn(0); try { if (ci) delete *ci; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } *ci = 0; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPUSome_Public" /* VecCUSPCopyToGPUSome_Public - Copies certain entries down to the GPU from the CPU of a vector Input Parameters: + v - the vector - indices - the requested indices, this should be created with CUSPIndicesCreate() */ PetscErrorCode VecCUSPCopyToGPUSome_Public(Vec v, PetscCUSPIndices ci) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyToGPUSome(v,ci);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPUSome_Public" /* VecCUSPCopyFromGPUSome_Public - Copies certain entries up to the CPU from the GPU of a vector Input Parameters: + v - the vector - indices - the requested indices, this should be created with CUSPIndicesCreate() */ PetscErrorCode VecCUSPCopyFromGPUSome_Public(Vec v, PetscCUSPIndices ci) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPUSome(v,ci);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC VECSEQCUSP - VECSEQCUSP = "seqcusp" - The basic sequential vector, modified to use CUSP Options Database Keys: . -vec_type seqcusp - sets the vector type to VECSEQCUSP during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateSeqWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateSeq() M*/ /* for VecAYPX_SeqCUSP*/ namespace cusp { namespace blas { namespace detail { template <typename T> struct AYPX : public thrust::binary_function<T,T,T> { T alpha; AYPX(T _alpha) : alpha(_alpha) {} __host__ __device__ T operator()(T x, T y) { return alpha * y + x; } }; } template <typename ForwardIterator1, typename ForwardIterator2, typename ScalarType> void aypx(ForwardIterator1 first1,ForwardIterator1 last1,ForwardIterator2 first2,ScalarType alpha) { thrust::transform(first1,last1,first2,first2,detail::AYPX<ScalarType>(alpha)); } template <typename Array1, typename Array2, typename ScalarType> void aypx(const Array1& x, Array2& y, ScalarType alpha) { detail::assert_same_dimensions(x,y); aypx(x.begin(),x.end(),y.begin(),alpha); } } } #undef __FUNCT__ #define __FUNCT__ "VecAYPX_SeqCUSP" PetscErrorCode VecAYPX_SeqCUSP(Vec yin, PetscScalar alpha, Vec xin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; if (alpha != 0.0) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::aypx(*xarray,*yarray,alpha); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecAXPY_SeqCUSP" PetscErrorCode VecAXPY_SeqCUSP(Vec yin,PetscScalar alpha,Vec xin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; if (alpha != 0.0) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::axpy(*xarray,*yarray,alpha); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct VecCUSPPointwiseDivide { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) / thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecPointwiseDivide_SeqCUSP" PetscErrorCode VecPointwiseDivide_SeqCUSP(Vec win, Vec xin, Vec yin) { CUSPARRAY *warray=NULL,*xarray=NULL,*yarray=NULL; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(win,&warray);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), xarray->begin(), yarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), xarray->end(), yarray->end())), VecCUSPPointwiseDivide()); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(win,&warray);CHKERRQ(ierr); PetscFunctionReturn(0); } struct VecCUSPWAXPY { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) + thrust::get<2>(t)*thrust::get<3>(t); } }; struct VecCUSPSum { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) + thrust::get<2>(t); } }; struct VecCUSPDiff { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) - thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecWAXPY_SeqCUSP" PetscErrorCode VecWAXPY_SeqCUSP(Vec win,PetscScalar alpha,Vec xin, Vec yin) { CUSPARRAY *xarray=NULL,*yarray=NULL,*warray=NULL; PetscErrorCode ierr; PetscFunctionBegin; if (alpha == 0.0) { ierr = VecCopy_SeqCUSP(yin,win);CHKERRQ(ierr); } else { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(win,&warray);CHKERRQ(ierr); if (alpha == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), xarray->end())), VecCUSPSum()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); } else if (alpha == -1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), xarray->end())), VecCUSPDiff()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); } else { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), thrust::make_constant_iterator(alpha), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), thrust::make_constant_iterator(alpha), xarray->end())), VecCUSPWAXPY()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(2*win->map->n);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(win,&warray);CHKERRQ(ierr); } PetscFunctionReturn(0); } /* These functions are for the CUSP implementation of MAXPY with the loop unrolled on the CPU */ struct VecCUSPMAXPY4 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2 + 13*x3 +a4*x4 */ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t)+thrust::get<5>(t)*thrust::get<6>(t)+thrust::get<7>(t)*thrust::get<8>(t); } }; struct VecCUSPMAXPY3 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2 + a3*x3 */ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t)+thrust::get<5>(t)*thrust::get<6>(t); } }; struct VecCUSPMAXPY2 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2*/ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecMAXPY_SeqCUSP" PetscErrorCode VecMAXPY_SeqCUSP(Vec xin, PetscInt nv,const PetscScalar *alpha,Vec *y) { PetscErrorCode ierr; CUSPARRAY *xarray,*yy0,*yy1,*yy2,*yy3; PetscInt n = xin->map->n,j,j_rem; PetscScalar alpha0,alpha1,alpha2,alpha3; PetscFunctionBegin; ierr = PetscLogFlops(nv*2.0*n);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); switch (j_rem=nv&0x3) { case 3: alpha0 = alpha[0]; alpha1 = alpha[1]; alpha2 = alpha[2]; alpha += 3; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[2],&yy2);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin(), thrust::make_constant_iterator(alpha2), yy2->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end(), thrust::make_constant_iterator(alpha2), yy2->end())), VecCUSPMAXPY3()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[2],&yy2);CHKERRQ(ierr); y += 3; break; case 2: alpha0 = alpha[0]; alpha1 = alpha[1]; alpha +=2; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end())), VecCUSPMAXPY2()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } y +=2; break; case 1: alpha0 = *alpha++; ierr = VecAXPY_SeqCUSP(xin,alpha0,y[0]); y +=1; break; } for (j=j_rem; j<nv; j+=4) { alpha0 = alpha[0]; alpha1 = alpha[1]; alpha2 = alpha[2]; alpha3 = alpha[3]; alpha += 4; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[2],&yy2);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[3],&yy3);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin(), thrust::make_constant_iterator(alpha2), yy2->begin(), thrust::make_constant_iterator(alpha3), yy3->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end(), thrust::make_constant_iterator(alpha2), yy2->end(), thrust::make_constant_iterator(alpha3), yy3->end())), VecCUSPMAXPY4()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[2],&yy2);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[3],&yy3);CHKERRQ(ierr); y += 4; } ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDot_SeqCUSP" PetscErrorCode VecDot_SeqCUSP(Vec xin,Vec yin,PetscScalar *z) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; // PetscScalar *xptr,*yptr,*zgpu; //PetscReal tmp; PetscFunctionBegin; //VecNorm_SeqCUSP(xin, NORM_2, &tmp); //VecNorm_SeqCUSP(yin, NORM_2, &tmp); ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); try { #if defined(PETSC_USE_COMPLEX) *z = cusp::blas::dotc(*yarray,*xarray); #else *z = cusp::blas::dot(*yarray,*xarray); #endif } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); if (xin->map->n >0) { ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); PetscFunctionReturn(0); } // // CUDA kernels for MDot to follow // // set work group size to be a power of 2 (128 is usually a good compromise between portability and speed) #define MDOT_WORKGROUP_SIZE 128 #define MDOT_WORKGROUP_NUM 128 // M = 2: __global__ void VecMDot_SeqCUSP_kernel2(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[2*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[MDOT_WORKGROUP_SIZE]; } } // M = 3: __global__ void VecMDot_SeqCUSP_kernel3(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[3*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; } } // M = 4: __global__ void VecMDot_SeqCUSP_kernel4(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[4*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; } } // M = 8: __global__ void VecMDot_SeqCUSP_kernel8(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, const PetscScalar *y4,const PetscScalar *y5,const PetscScalar *y6,const PetscScalar *y7, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[8*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; PetscScalar group_sum4 = 0; PetscScalar group_sum5 = 0; PetscScalar group_sum6 = 0; PetscScalar group_sum7 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; group_sum4 += entry_x * y4[i]; group_sum5 += entry_x * y5[i]; group_sum6 += entry_x * y6[i]; group_sum7 += entry_x * y7[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] = group_sum4; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] = group_sum5; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] = group_sum6; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] = group_sum7; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 4 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 5 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 6 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 7 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 4 * gridDim.x] = tmp_buffer[4 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 5 * gridDim.x] = tmp_buffer[5 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 6 * gridDim.x] = tmp_buffer[6 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 7 * gridDim.x] = tmp_buffer[7 * MDOT_WORKGROUP_SIZE]; } } #undef __FUNCT__ #define __FUNCT__ "VecMDot_SeqCUSP" PetscErrorCode VecMDot_SeqCUSP(Vec xin,PetscInt nv,const Vec yin[],PetscScalar *z) { PetscErrorCode ierr; PetscInt i,j,n = xin->map->n,current_y_index = 0; CUSPARRAY *xarray,*y0array,*y1array,*y2array,*y3array,*y4array,*y5array,*y6array,*y7array; PetscScalar *group_results_gpu,*xptr,*y0ptr,*y1ptr,*y2ptr,*y3ptr,*y4ptr,*y5ptr,*y6ptr,*y7ptr; PetscScalar group_results_cpu[MDOT_WORKGROUP_NUM * 8]; // we process at most eight vectors in one kernel hipError_t cuda_ierr; PetscFunctionBegin; // allocate scratchpad memory for the results of individual work groups: if (nv <= 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Number of vectors provided to VecMDot_SeqCUSP not positive."); cuda_ierr = hipMalloc((void**)&group_results_gpu, sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not allocate CUDA work memory. Error code: %d", (int)cuda_ierr); ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); xptr = thrust::raw_pointer_cast(xarray->data()); while (current_y_index < nv) { switch (nv - current_y_index) { case 7: case 6: case 5: case 4: ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); z[current_y_index+3] = cusp::blas::dot(*y3array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); y3ptr = thrust::raw_pointer_cast(y3array->data()); // run kernel: hipLaunchKernelGGL(( VecMDot_SeqCUSP_kernel4), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,y3ptr,n,group_results_gpu); // copy results back to cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 4,hipMemcpyDeviceToHost); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<4; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); current_y_index += 4; break; case 3: ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); // run kernel: hipLaunchKernelGGL(( VecMDot_SeqCUSP_kernel3), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,n,group_results_gpu); // copy results back to cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 3,hipMemcpyDeviceToHost); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<3; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); current_y_index += 3; break; case 2: ierr = VecCUSPGetArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); // run kernel: hipLaunchKernelGGL(( VecMDot_SeqCUSP_kernel2), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,n,group_results_gpu); // copy results back to cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 2,hipMemcpyDeviceToHost); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<2; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); current_y_index += 2; break; case 1: ierr = VecCUSPGetArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dotc(*y0array, *xarray); #else z[current_y_index] = cusp::blas::dot(*xarray, *y0array); #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); current_y_index += 1; break; default: // 8 or more vectors left ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+4],&y4array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+5],&y5array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+6],&y6array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+7],&y7array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); z[current_y_index+3] = cusp::blas::dot(*y3array,*xarray); z[current_y_index+4] = cusp::blas::dot(*y4array,*xarray); z[current_y_index+5] = cusp::blas::dot(*y5array,*xarray); z[current_y_index+6] = cusp::blas::dot(*y6array,*xarray); z[current_y_index+7] = cusp::blas::dot(*y7array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); y3ptr = thrust::raw_pointer_cast(y3array->data()); y4ptr = thrust::raw_pointer_cast(y4array->data()); y5ptr = thrust::raw_pointer_cast(y5array->data()); y6ptr = thrust::raw_pointer_cast(y6array->data()); y7ptr = thrust::raw_pointer_cast(y7array->data()); // run kernel: hipLaunchKernelGGL(( VecMDot_SeqCUSP_kernel8), dim3(MDOT_WORKGROUP_NUM),dim3(MDOT_WORKGROUP_SIZE), 0, 0, xptr,y0ptr,y1ptr,y2ptr,y3ptr,y4ptr,y5ptr,y6ptr,y7ptr,n,group_results_gpu); // copy results back to cuda_ierr = hipMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8,hipMemcpyDeviceToHost); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<8; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+4],&y4array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+5],&y5array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+6],&y6array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+7],&y7array);CHKERRQ(ierr); current_y_index += 8; break; } } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); cuda_ierr = hipFree(group_results_gpu); if (cuda_ierr != hipSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host: %d", (int)cuda_ierr); ierr = PetscLogFlops(PetscMax(nv*(2.0*n-1),0.0));CHKERRQ(ierr); PetscFunctionReturn(0); } #undef MDOT_WORKGROUP_SIZE #undef MDOT_WORKGROUP_NUM #undef __FUNCT__ #define __FUNCT__ "VecSet_SeqCUSP" PetscErrorCode VecSet_SeqCUSP(Vec xin,PetscScalar alpha) { CUSPARRAY *xarray=NULL; PetscErrorCode ierr; PetscFunctionBegin; /* if there's a faster way to do the case alpha=0.0 on the GPU we should do that*/ ierr = VecCUSPGetArrayWrite(xin,&xarray);CHKERRQ(ierr); try { cusp::blas::fill(*xarray,alpha); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayWrite(xin,&xarray); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecScale_SeqCUSP" PetscErrorCode VecScale_SeqCUSP(Vec xin, PetscScalar alpha) { CUSPARRAY *xarray; PetscErrorCode ierr; PetscFunctionBegin; if (alpha == 0.0) { ierr = VecSet_SeqCUSP(xin,alpha);CHKERRQ(ierr); } else if (alpha != 1.0) { ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); try { cusp::blas::scal(*xarray,alpha); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecTDot_SeqCUSP" PetscErrorCode VecTDot_SeqCUSP(Vec xin,Vec yin,PetscScalar *z) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; //#if defined(PETSC_USE_COMPLEX) /*Not working for complex*/ //#else ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); try { *z = cusp::blas::dot(*xarray,*yarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } //#endif ierr = WaitForGPU();CHKERRCUSP(ierr); if (xin->map->n > 0) { ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCopy_SeqCUSP" PetscErrorCode VecCopy_SeqCUSP(Vec xin,Vec yin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; if (xin != yin) { if (xin->valid_GPU_array == PETSC_CUSP_GPU) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else if (xin->valid_GPU_array == PETSC_CUSP_CPU) { /* copy in CPU if we are on the CPU*/ ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } else if (xin->valid_GPU_array == PETSC_CUSP_BOTH) { /* if xin is valid in both places, see where yin is and copy there (because it's probably where we'll want to next use it) */ if (yin->valid_GPU_array == PETSC_CUSP_CPU) { /* copy in CPU */ ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } else if (yin->valid_GPU_array == PETSC_CUSP_GPU) { /* copy in GPU */ ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else if (yin->valid_GPU_array == PETSC_CUSP_BOTH) { /* xin and yin are both valid in both places (or yin was unallocated before the earlier call to allocatecheck default to copy in GPU (this is an arbitrary choice) */ ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecSwap_SeqCUSP" PetscErrorCode VecSwap_SeqCUSP(Vec xin,Vec yin) { PetscErrorCode ierr; PetscBLASInt one = 1,bn; CUSPARRAY *xarray,*yarray; PetscFunctionBegin; ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); if (xin != yin) { ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) hipblasCswap(bn,(cuFloatComplex*)VecCUSPCastToRawPtr(*xarray),one,(cuFloatComplex*)VecCUSPCastToRawPtr(*yarray),one); #else hipblasZswap(bn,(hipDoubleComplex*)VecCUSPCastToRawPtr(*xarray),one,(hipDoubleComplex*)VecCUSPCastToRawPtr(*yarray),one); #endif #else #if defined(PETSC_USE_REAL_SINGLE) hipblasSswap(bn,VecCUSPCastToRawPtr(*xarray),one,VecCUSPCastToRawPtr(*yarray),one); #else hipblasDswap(bn,VecCUSPCastToRawPtr(*xarray),one,VecCUSPCastToRawPtr(*yarray),one); #endif #endif ierr = hipblasGetError();CHKERRCUSP(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct VecCUSPAX { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t)*thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecAXPBY_SeqCUSP" PetscErrorCode VecAXPBY_SeqCUSP(Vec yin,PetscScalar alpha,PetscScalar beta,Vec xin) { PetscErrorCode ierr; PetscScalar a = alpha,b = beta; CUSPARRAY *xarray,*yarray; PetscFunctionBegin; if (a == 0.0) { ierr = VecScale_SeqCUSP(yin,beta);CHKERRQ(ierr); } else if (b == 1.0) { ierr = VecAXPY_SeqCUSP(yin,alpha,xin);CHKERRQ(ierr); } else if (a == 1.0) { ierr = VecAYPX_SeqCUSP(yin,beta,xin);CHKERRQ(ierr); } else if (b == 0.0) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( yarray->begin(), thrust::make_constant_iterator(a), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( yarray->end(), thrust::make_constant_iterator(a), xarray->end())), VecCUSPAX()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::axpby(*xarray,*yarray,*yarray,a,b); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(3.0*xin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } /* structs below are for special cases of VecAXPBYPCZ_SeqCUSP */ struct VecCUSPXPBYPCZ { /* z = x + b*y + c*z */ template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t)*thrust::get<0>(t)+thrust::get<2>(t)+thrust::get<4>(t)*thrust::get<3>(t); } }; struct VecCUSPAXPBYPZ { /* z = ax + b*y + z */ template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) += thrust::get<2>(t)*thrust::get<1>(t)+thrust::get<4>(t)*thrust::get<3>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecAXPBYPCZ_SeqCUSP" PetscErrorCode VecAXPBYPCZ_SeqCUSP(Vec zin,PetscScalar alpha,PetscScalar beta,PetscScalar gamma,Vec xin,Vec yin) { PetscErrorCode ierr; PetscInt n = zin->map->n; CUSPARRAY *xarray,*yarray,*zarray; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(zin,&zarray);CHKERRQ(ierr); if (alpha == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( zarray->begin(), thrust::make_constant_iterator(gamma), xarray->begin(), yarray->begin(), thrust::make_constant_iterator(beta))), thrust::make_zip_iterator( thrust::make_tuple( zarray->end(), thrust::make_constant_iterator(gamma), xarray->end(), yarray->end(), thrust::make_constant_iterator(beta))), VecCUSPXPBYPCZ()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); } else if (gamma == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( zarray->begin(), xarray->begin(), thrust::make_constant_iterator(alpha), yarray->begin(), thrust::make_constant_iterator(beta))), thrust::make_zip_iterator( thrust::make_tuple( zarray->end(), xarray->end(), thrust::make_constant_iterator(alpha), yarray->end(), thrust::make_constant_iterator(beta))), VecCUSPAXPBYPZ()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); } else { try { cusp::blas::axpbypcz(*xarray,*yarray,*zarray,*zarray,alpha,beta,gamma); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayReadWrite(zin,&zarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogFlops(5.0*n);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecPointwiseMult_SeqCUSP" PetscErrorCode VecPointwiseMult_SeqCUSP(Vec win,Vec xin,Vec yin) { PetscErrorCode ierr; PetscInt n = win->map->n; CUSPARRAY *xarray,*yarray,*warray; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(win,&warray);CHKERRQ(ierr); try { cusp::blas::xmy(*xarray,*yarray,*warray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogFlops(n);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } /* should do infinity norm in cusp */ #undef __FUNCT__ #define __FUNCT__ "VecNorm_SeqCUSP" PetscErrorCode VecNorm_SeqCUSP(Vec xin,NormType type,PetscReal *z) { const PetscScalar *xx; PetscErrorCode ierr; PetscInt n = xin->map->n; PetscBLASInt one = 1, bn; CUSPARRAY *xarray; PetscFunctionBegin; ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr); if (type == NORM_2 || type == NORM_FROBENIUS) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); try { *z = cusp::blas::nrm2(*xarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogFlops(PetscMax(2.0*n-1,0.0));CHKERRQ(ierr); } else if (type == NORM_INFINITY) { PetscInt i; PetscReal max = 0.0,tmp; ierr = VecGetArrayRead(xin,&xx);CHKERRQ(ierr); for (i=0; i<n; i++) { if ((tmp = PetscAbsScalar(*xx)) > max) max = tmp; /* check special case of tmp == NaN */ if (tmp != tmp) {max = tmp; break;} xx++; } ierr = VecRestoreArrayRead(xin,&xx);CHKERRQ(ierr); *z = max; } else if (type == NORM_1) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) *z = hipblasScasum(bn,(cuFloatComplex*)VecCUSPCastToRawPtr(*xarray),one); #else *z = hipblasDzasum(bn,(hipDoubleComplex*)VecCUSPCastToRawPtr(*xarray),one); #endif #else #if defined(PETSC_USE_REAL_SINGLE) *z = hipblasSasum(bn,VecCUSPCastToRawPtr(*xarray),one); #else *z = hipblasDasum(bn,VecCUSPCastToRawPtr(*xarray),one); #endif #endif ierr = hipblasGetError();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(PetscMax(n-1.0,0.0));CHKERRQ(ierr); } else if (type == NORM_1_AND_2) { ierr = VecNorm_SeqCUSP(xin,NORM_1,z);CHKERRQ(ierr); ierr = VecNorm_SeqCUSP(xin,NORM_2,z+1);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*the following few functions should be modified to actually work with the GPU so they don't force unneccesary allocation of CPU memory */ #undef __FUNCT__ #define __FUNCT__ "VecSetRandom_SeqCUSP" PetscErrorCode VecSetRandom_SeqCUSP(Vec xin,PetscRandom r) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecSetRandom_SeqCUSP_Private(xin,r);CHKERRQ(ierr); xin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecResetArray_SeqCUSP" PetscErrorCode VecResetArray_SeqCUSP(Vec vin) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecResetArray_SeqCUSP_Private(vin);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecPlaceArray_SeqCUSP" PetscErrorCode VecPlaceArray_SeqCUSP(Vec vin,const PetscScalar *a) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecPlaceArray_Seq(vin,a);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecReplaceArray_SeqCUSP" PetscErrorCode VecReplaceArray_SeqCUSP(Vec vin,const PetscScalar *a) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecReplaceArray_Seq(vin,a);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCreateSeqCUSP" /*@ VecCreateSeqCUSP - Creates a standard, sequential array-style vector. Collective on MPI_Comm Input Parameter: + comm - the communicator, should be PETSC_COMM_SELF - n - the vector length Output Parameter: . V - the vector Notes: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. Level: intermediate Concepts: vectors^creating sequential .seealso: VecCreateMPI(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost() @*/ PetscErrorCode VecCreateSeqCUSP(MPI_Comm comm,PetscInt n,Vec *v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreate(comm,v);CHKERRQ(ierr); ierr = VecSetSizes(*v,n,n);CHKERRQ(ierr); ierr = VecSetType(*v,VECSEQCUSP);CHKERRQ(ierr); PetscFunctionReturn(0); } /*The following template functions are for VecDotNorm2_SeqCUSP. Note that there is no complex support as currently written*/ template <typename T> struct cuspdotnormcalculate : thrust::unary_function<T,T> { __host__ __device__ T operator()(T x) { #if defined(PETSC_USE_COMPLEX) //return thrust::make_tuple(thrust::get<0>(x)*thrust::get<1>(x), thrust::get<1>(x)*thrust::get<1>(x)); #else return thrust::make_tuple(thrust::get<0>(x)*thrust::get<1>(x), thrust::get<1>(x)*thrust::get<1>(x)); #endif } }; template <typename T> struct cuspdotnormreduce : thrust::binary_function<T,T,T> { __host__ __device__ T operator()(T x,T y) { return thrust::make_tuple(thrust::get<0>(x)+thrust::get<0>(y), thrust::get<1>(x)+thrust::get<1>(y)); } }; #undef __FUNCT__ #define __FUNCT__ "VecDotNorm2_SeqCUSP" PetscErrorCode VecDotNorm2_SeqCUSP(Vec s, Vec t, PetscScalar *dp, PetscScalar *nm) { PetscErrorCode ierr; PetscScalar zero = 0.0; PetscReal n=s->map->n; thrust::tuple<PetscScalar,PetscScalar> result; CUSPARRAY *sarray,*tarray; PetscFunctionBegin; /*ierr = VecCUSPCopyToGPU(s);CHKERRQ(ierr); ierr = VecCUSPCopyToGPU(t);CHKERRQ(ierr);*/ ierr = VecCUSPGetArrayRead(s,&sarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(t,&tarray);CHKERRQ(ierr); try { #if defined(PETSC_USE_COMPLEX) ierr = VecDot_SeqCUSP(s,t,dp);CHKERRQ(ierr); ierr = VecDot_SeqCUSP(t,t,nm);CHKERRQ(ierr); //printf("VecDotNorm2_SeqCUSP=%1.5g,%1.5g\n",PetscRealPart(*dp),PetscImaginaryPart(*dp)); //printf("VecDotNorm2_SeqCUSP=%1.5g,%1.5g\n",PetscRealPart(*nm),PetscImaginaryPart(*nm)); #else result = thrust::transform_reduce( thrust::make_zip_iterator( thrust::make_tuple( sarray->begin(), tarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( sarray->end(), tarray->end())), cuspdotnormcalculate<thrust::tuple<PetscScalar,PetscScalar> >(), thrust::make_tuple(zero,zero), /*init */ cuspdotnormreduce<thrust::tuple<PetscScalar, PetscScalar> >()); /* binary function */ *dp = thrust::get<0>(result); *nm = thrust::get<1>(result); #endif } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(s,&sarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(t,&tarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDuplicate_SeqCUSP" PetscErrorCode VecDuplicate_SeqCUSP(Vec win,Vec *V) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreateSeqCUSP(PetscObjectComm((PetscObject)win),win->map->n,V);CHKERRQ(ierr); ierr = PetscLayoutReference(win->map,&(*V)->map);CHKERRQ(ierr); ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*V))->olist);CHKERRQ(ierr); ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*V))->qlist);CHKERRQ(ierr); (*V)->stash.ignorenegidx = win->stash.ignorenegidx; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDestroy_SeqCUSP" PetscErrorCode VecDestroy_SeqCUSP(Vec v) { PetscErrorCode ierr; Vec_Seq *s = (Vec_Seq*)v->data; hipError_t err; PetscFunctionBegin; try { if (v->spptr) { delete ((Vec_CUSP*)v->spptr)->GPUarray; err = hipStreamDestroy(((Vec_CUSP*)v->spptr)->stream);CHKERRCUSP(err); /* If the host array has been registered as (page-locked) mapped, one must unregister the buffer */ if (((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked) { err = hipHostUnregister(s->array);CHKERRCUSP(err); } delete (Vec_CUSP*) v->spptr; } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecDestroy_SeqCUSP_Private(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #if defined(PETSC_USE_COMPLEX) struct conjugate { __host__ __device__ PetscScalar operator()(PetscScalar x) { return cusp::conj(x); } }; #endif #undef __FUNCT__ #define __FUNCT__ "VecConjugate_SeqCUSP" PetscErrorCode VecConjugate_SeqCUSP(Vec xin) { PetscErrorCode ierr; CUSPARRAY *xarray; PetscFunctionBegin; ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) thrust::transform(xarray->begin(), xarray->end(), xarray->begin(), conjugate()); #endif ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCreate_SeqCUSP" PETSC_EXTERN PetscErrorCode VecCreate_SeqCUSP(Vec V) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(PetscObjectComm((PetscObject)V),&size);CHKERRQ(ierr); if (size > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Cannot create VECSEQCUSP on more than one process"); ierr = VecCreate_Seq_Private(V,0);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)V,VECSEQCUSP);CHKERRQ(ierr); V->ops->dot = VecDot_SeqCUSP; V->ops->norm = VecNorm_SeqCUSP; V->ops->tdot = VecTDot_SeqCUSP; V->ops->scale = VecScale_SeqCUSP; V->ops->copy = VecCopy_SeqCUSP; V->ops->set = VecSet_SeqCUSP; V->ops->swap = VecSwap_SeqCUSP; V->ops->axpy = VecAXPY_SeqCUSP; V->ops->axpby = VecAXPBY_SeqCUSP; V->ops->axpbypcz = VecAXPBYPCZ_SeqCUSP; V->ops->pointwisemult = VecPointwiseMult_SeqCUSP; V->ops->pointwisedivide = VecPointwiseDivide_SeqCUSP; V->ops->setrandom = VecSetRandom_SeqCUSP; V->ops->dot_local = VecDot_SeqCUSP; V->ops->tdot_local = VecTDot_SeqCUSP; V->ops->norm_local = VecNorm_SeqCUSP; V->ops->mdot_local = VecMDot_SeqCUSP; V->ops->maxpy = VecMAXPY_SeqCUSP; V->ops->mdot = VecMDot_SeqCUSP; V->ops->aypx = VecAYPX_SeqCUSP; V->ops->waxpy = VecWAXPY_SeqCUSP; V->ops->dotnorm2 = VecDotNorm2_SeqCUSP; V->ops->placearray = VecPlaceArray_SeqCUSP; V->ops->replacearray = VecReplaceArray_SeqCUSP; V->ops->resetarray = VecResetArray_SeqCUSP; V->ops->destroy = VecDestroy_SeqCUSP; V->ops->duplicate = VecDuplicate_SeqCUSP; V->ops->conjugate = VecConjugate_SeqCUSP; ierr = VecCUSPAllocateCheck(V);CHKERRQ(ierr); V->valid_GPU_array = PETSC_CUSP_GPU; ierr = VecSet(V,0.0);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayReadWrite" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayReadWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayReadWrite" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayReadWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; v->valid_GPU_array = PETSC_CUSP_GPU; ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayRead" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayRead(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayRead" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayRead(Vec v, CUSPARRAY **a) { PetscFunctionBegin; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayWrite" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayWrite" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; v->valid_GPU_array = PETSC_CUSP_GPU; ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr); PetscFunctionReturn(0); }
882844634b2083db6c4aa7013e9548ba39de1eb8.cu
/* Implements the sequential cusp vectors. */ #include <petscconf.h> PETSC_CUDA_EXTERN_C_BEGIN #include <petsc-private/vecimpl.h> /*I "petscvec.h" I*/ #include <../src/vec/vec/impls/dvecimpl.h> PETSC_CUDA_EXTERN_C_END #include <../src/vec/vec/impls/seq/seqcusp/cuspvecimpl.h> #include <cuda_runtime.h> #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheckHost" /* Allocates space for the vector array on the Host if it does not exist. Does NOT change the PetscCUSPFlag for the vector Does NOT zero the CUSP array */ PetscErrorCode VecCUSPAllocateCheckHost(Vec v) { PetscErrorCode ierr; cudaError_t err; PetscScalar *array; Vec_Seq *s; PetscInt n = v->map->n; PetscFunctionBegin; s = (Vec_Seq*)v->data; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (s->array == 0) { ierr = PetscMalloc(n*sizeof(PetscScalar),&array);CHKERRQ(ierr); ierr = PetscLogObjectMemory((PetscObject)v,n*sizeof(PetscScalar));CHKERRQ(ierr); s->array = array; s->array_allocated = array; err = cudaHostRegister(s->array, n*sizeof(PetscScalar),cudaHostRegisterMapped);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_TRUE; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheck" /* Allocates space for the vector array on the GPU if it does not exist. Does NOT change the PetscCUSPFlag for the vector Does NOT zero the CUSP array */ PetscErrorCode VecCUSPAllocateCheck(Vec v) { cudaError_t err; cudaStream_t stream; Vec_Seq *s = (Vec_Seq*)v->data; PetscFunctionBegin; // First allocate memory on the GPU if needed if (!v->spptr) { try { v->spptr = new Vec_CUSP; ((Vec_CUSP*)v->spptr)->GPUarray = new CUSPARRAY; ((Vec_CUSP*)v->spptr)->GPUarray->resize((PetscBLASInt)v->map->n); err = cudaStreamCreate(&stream);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->stream = stream; ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_FALSE; /* If the array is already allocated, one can register it as (page-locked) mapped. This can substantially accelerate data transfer across the PCI Express */ if (s->array) { err = cudaHostRegister(s->array, v->map->n*sizeof(PetscScalar),cudaHostRegisterMapped);CHKERRCUSP(err); ((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked = PETSC_TRUE; } v->ops->destroy = VecDestroy_SeqCUSP; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPU" /* Copies a vector from the CPU to the GPU unless we already have an up-to-date copy on the GPU */ PetscErrorCode VecCUSPCopyToGPU(Vec v) { PetscErrorCode ierr; cudaError_t err; Vec_CUSP *veccusp; CUSPARRAY *varray; cudaStream_t stream; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_CPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyToGPU,v,0,0,0);CHKERRQ(ierr); try { veccusp=(Vec_CUSP*)v->spptr; varray=veccusp->GPUarray; stream=veccusp->stream; err = cudaMemcpyAsync(varray->data().get(), *(PetscScalar**)v->data, v->map->n*sizeof(PetscScalar), cudaMemcpyHostToDevice, stream);CHKERRCUSP(err); err = cudaStreamSynchronize(stream);CHKERRCUSP(err); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogEventEnd(VEC_CUSPCopyToGPU,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPUSome" static PetscErrorCode VecCUSPCopyToGPUSome(Vec v, PetscCUSPIndices ci) { CUSPARRAY *varray; PetscErrorCode ierr; cudaError_t err; PetscScalar *cpuPtr, *gpuPtr; cudaStream_t stream; Vec_Seq *s; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_CPU) { stream=((Vec_CUSP*)v->spptr)->stream; s = (Vec_Seq*)v->data; ierr = PetscLogEventBegin(VEC_CUSPCopyToGPUSome,v,0,0,0);CHKERRQ(ierr); varray = ((Vec_CUSP*)v->spptr)->GPUarray; gpuPtr = varray->data().get() + ci->recvLowestIndex; cpuPtr = s->array + ci->recvLowestIndex; /* Note : this code copies the smallest contiguous chunk of data containing ALL of the indices */ err = cudaMemcpyAsync(gpuPtr, cpuPtr, ci->nr*sizeof(PetscScalar), cudaMemcpyHostToDevice, stream);CHKERRCUSP(err); err = cudaStreamSynchronize(stream);CHKERRCUSP(err); #if 0 Vec_Seq *s; s = (Vec_Seq*)v->data; CUSPINTARRAYCPU *indicesCPU=&ci->recvIndicesCPU; CUSPINTARRAYGPU *indicesGPU=&ci->recvIndicesGPU; thrust::copy(thrust::make_permutation_iterator(s->array,indicesCPU->begin()), thrust::make_permutation_iterator(s->array,indicesCPU->end()), thrust::make_permutation_iterator(varray->begin(),indicesGPU->begin())); #endif // Set the buffer states v->valid_GPU_array = PETSC_CUSP_BOTH; ierr = PetscLogEventEnd(VEC_CUSPCopyToGPUSome,v,0,0,0);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPU" /* VecCUSPCopyFromGPU - Copies a vector from the GPU to the CPU unless we already have an up-to-date copy on the CPU */ PetscErrorCode VecCUSPCopyFromGPU(Vec v) { PetscErrorCode ierr; cudaError_t err; Vec_CUSP *veccusp; CUSPARRAY *varray; cudaStream_t stream; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_GPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyFromGPU,v,0,0,0);CHKERRQ(ierr); try { veccusp=(Vec_CUSP*)v->spptr; varray=veccusp->GPUarray; stream=veccusp->stream; err = cudaMemcpyAsync(*(PetscScalar**)v->data, varray->data().get(), v->map->n*sizeof(PetscScalar), cudaMemcpyDeviceToHost, stream);CHKERRCUSP(err); err = cudaStreamSynchronize(stream);CHKERRCUSP(err); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogEventEnd(VEC_CUSPCopyFromGPU,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPUSome" /* Note that this function only copies *some* of the values up from the GPU to CPU, which means that we need recombine the data at some point before using any of the standard functions. We could add another few flag-types to keep track of this, or treat things like VecGetArray VecRestoreArray where you have to always call in pairs */ PetscErrorCode VecCUSPCopyFromGPUSome(Vec v, PetscCUSPIndices ci) { CUSPARRAY *varray; PetscErrorCode ierr; cudaError_t err; PetscScalar *cpuPtr, *gpuPtr; cudaStream_t stream; Vec_Seq *s; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(v);CHKERRQ(ierr); if (v->valid_GPU_array == PETSC_CUSP_GPU) { ierr = PetscLogEventBegin(VEC_CUSPCopyFromGPUSome,v,0,0,0);CHKERRQ(ierr); stream=((Vec_CUSP*)v->spptr)->stream; varray=((Vec_CUSP*)v->spptr)->GPUarray; s = (Vec_Seq*)v->data; gpuPtr = varray->data().get() + ci->sendLowestIndex; cpuPtr = s->array + ci->sendLowestIndex; /* Note : this code copies the smallest contiguous chunk of data containing ALL of the indices */ err = cudaMemcpyAsync(cpuPtr, gpuPtr, ci->ns*sizeof(PetscScalar), cudaMemcpyDeviceToHost, stream);CHKERRCUSP(err); err = cudaStreamSynchronize(stream);CHKERRCUSP(err); #if 0 Vec_Seq *s; s = (Vec_Seq*)v->data; CUSPINTARRAYCPU *indicesCPU=&ci->sendIndicesCPU; CUSPINTARRAYGPU *indicesGPU=&ci->sendIndicesGPU; thrust::copy(thrust::make_permutation_iterator(varray->begin(),indicesGPU->begin()), thrust::make_permutation_iterator(varray->begin(),indicesGPU->end()), thrust::make_permutation_iterator(s->array,indicesCPU->begin())); #endif ierr = VecCUSPRestoreArrayRead(v,&varray);CHKERRQ(ierr); ierr = PetscLogEventEnd(VEC_CUSPCopyFromGPUSome,v,0,0,0);CHKERRQ(ierr); v->valid_GPU_array = PETSC_CUSP_BOTH; } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCopy_SeqCUSP_Private" static PetscErrorCode VecCopy_SeqCUSP_Private(Vec xin,Vec yin) { PetscScalar *ya; const PetscScalar *xa; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPAllocateCheckHost(xin); ierr = VecCUSPAllocateCheckHost(yin); if (xin != yin) { ierr = VecGetArrayRead(xin,&xa);CHKERRQ(ierr); ierr = VecGetArray(yin,&ya);CHKERRQ(ierr); ierr = PetscMemcpy(ya,xa,xin->map->n*sizeof(PetscScalar));CHKERRQ(ierr); ierr = VecRestoreArrayRead(xin,&xa);CHKERRQ(ierr); ierr = VecRestoreArray(yin,&ya);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecSetRandom_SeqCUSP_Private" static PetscErrorCode VecSetRandom_SeqCUSP_Private(Vec xin,PetscRandom r) { PetscErrorCode ierr; PetscInt n = xin->map->n,i; PetscScalar *xx; PetscFunctionBegin; ierr = VecGetArray(xin,&xx);CHKERRQ(ierr); for (i=0; i<n; i++) {ierr = PetscRandomGetValue(r,&xx[i]);CHKERRQ(ierr);} ierr = VecRestoreArray(xin,&xx);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDestroy_SeqCUSP_Private" static PetscErrorCode VecDestroy_SeqCUSP_Private(Vec v) { Vec_Seq *vs = (Vec_Seq*)v->data; PetscErrorCode ierr; PetscFunctionBegin; ierr = PetscObjectAMSViewOff(v);CHKERRQ(ierr); #if defined(PETSC_USE_LOG) PetscLogObjectState((PetscObject)v,"Length=%D",v->map->n); #endif if (vs->array_allocated) ierr = PetscFree(vs->array_allocated);CHKERRQ(ierr); ierr = PetscFree(vs);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecResetArray_SeqCUSP_Private" static PetscErrorCode VecResetArray_SeqCUSP_Private(Vec vin) { Vec_Seq *v = (Vec_Seq*)vin->data; PetscFunctionBegin; v->array = v->unplacedarray; v->unplacedarray = 0; PetscFunctionReturn(0); } /* these following 3 public versions are necessary because we use CUSP in the regular PETSc code and these need to be called from plain C code. */ #undef __FUNCT__ #define __FUNCT__ "VecCUSPAllocateCheck_Public" PetscErrorCode VecCUSPAllocateCheck_Public(Vec v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPU_Public" PetscErrorCode VecCUSPCopyToGPU_Public(Vec v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "PetscCUSPIndicesCreate" /* PetscCUSPIndicesCreate - creates the data structure needed by VecCUSPCopyToGPUSome_Public() Input Parameters: + n - the number of indices - indices - integer list of indices Output Parameter: . ci - the CUSPIndices object suitable to pass to VecCUSPCopyToGPUSome_Public() .seealso: PetscCUSPIndicesDestroy(), VecCUSPCopyToGPUSome_Public() */ PetscErrorCode PetscCUSPIndicesCreate(PetscInt ns,PetscInt *sendIndices,PetscInt nr,PetscInt *recvIndices,PetscCUSPIndices *ci) { PetscCUSPIndices cci; PetscFunctionBegin; cci = new struct _p_PetscCUSPIndices; /* this calculation assumes that the input indices are sorted */ cci->ns = sendIndices[ns-1]-sendIndices[0]+1; cci->sendLowestIndex = sendIndices[0]; cci->nr = recvIndices[nr-1]-recvIndices[0]+1; cci->recvLowestIndex = recvIndices[0]; *ci = cci; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "PetscCUSPIndicesDestroy" /* PetscCUSPIndicesDestroy - destroys the data structure needed by VecCUSPCopyToGPUSome_Public() Input Parameters: . ci - the CUSPIndices object suitable to pass to VecCUSPCopyToGPUSome_Public() .seealso: PetscCUSPIndicesCreate(), VecCUSPCopyToGPUSome_Public() */ PetscErrorCode PetscCUSPIndicesDestroy(PetscCUSPIndices *ci) { PetscFunctionBegin; if (!(*ci)) PetscFunctionReturn(0); try { if (ci) delete *ci; } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } *ci = 0; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyToGPUSome_Public" /* VecCUSPCopyToGPUSome_Public - Copies certain entries down to the GPU from the CPU of a vector Input Parameters: + v - the vector - indices - the requested indices, this should be created with CUSPIndicesCreate() */ PetscErrorCode VecCUSPCopyToGPUSome_Public(Vec v, PetscCUSPIndices ci) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyToGPUSome(v,ci);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPCopyFromGPUSome_Public" /* VecCUSPCopyFromGPUSome_Public - Copies certain entries up to the CPU from the GPU of a vector Input Parameters: + v - the vector - indices - the requested indices, this should be created with CUSPIndicesCreate() */ PetscErrorCode VecCUSPCopyFromGPUSome_Public(Vec v, PetscCUSPIndices ci) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPUSome(v,ci);CHKERRQ(ierr); PetscFunctionReturn(0); } /*MC VECSEQCUSP - VECSEQCUSP = "seqcusp" - The basic sequential vector, modified to use CUSP Options Database Keys: . -vec_type seqcusp - sets the vector type to VECSEQCUSP during a call to VecSetFromOptions() Level: beginner .seealso: VecCreate(), VecSetType(), VecSetFromOptions(), VecCreateSeqWithArray(), VECMPI, VecType, VecCreateMPI(), VecCreateSeq() M*/ /* for VecAYPX_SeqCUSP*/ namespace cusp { namespace blas { namespace detail { template <typename T> struct AYPX : public thrust::binary_function<T,T,T> { T alpha; AYPX(T _alpha) : alpha(_alpha) {} __host__ __device__ T operator()(T x, T y) { return alpha * y + x; } }; } template <typename ForwardIterator1, typename ForwardIterator2, typename ScalarType> void aypx(ForwardIterator1 first1,ForwardIterator1 last1,ForwardIterator2 first2,ScalarType alpha) { thrust::transform(first1,last1,first2,first2,detail::AYPX<ScalarType>(alpha)); } template <typename Array1, typename Array2, typename ScalarType> void aypx(const Array1& x, Array2& y, ScalarType alpha) { detail::assert_same_dimensions(x,y); aypx(x.begin(),x.end(),y.begin(),alpha); } } } #undef __FUNCT__ #define __FUNCT__ "VecAYPX_SeqCUSP" PetscErrorCode VecAYPX_SeqCUSP(Vec yin, PetscScalar alpha, Vec xin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; if (alpha != 0.0) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::aypx(*xarray,*yarray,alpha); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecAXPY_SeqCUSP" PetscErrorCode VecAXPY_SeqCUSP(Vec yin,PetscScalar alpha,Vec xin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; if (alpha != 0.0) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::axpy(*xarray,*yarray,alpha); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogFlops(2.0*yin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct VecCUSPPointwiseDivide { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) / thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecPointwiseDivide_SeqCUSP" PetscErrorCode VecPointwiseDivide_SeqCUSP(Vec win, Vec xin, Vec yin) { CUSPARRAY *warray=NULL,*xarray=NULL,*yarray=NULL; PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(win,&warray);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), xarray->begin(), yarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), xarray->end(), yarray->end())), VecCUSPPointwiseDivide()); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(win,&warray);CHKERRQ(ierr); PetscFunctionReturn(0); } struct VecCUSPWAXPY { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) + thrust::get<2>(t)*thrust::get<3>(t); } }; struct VecCUSPSum { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) + thrust::get<2>(t); } }; struct VecCUSPDiff { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t) - thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecWAXPY_SeqCUSP" PetscErrorCode VecWAXPY_SeqCUSP(Vec win,PetscScalar alpha,Vec xin, Vec yin) { CUSPARRAY *xarray=NULL,*yarray=NULL,*warray=NULL; PetscErrorCode ierr; PetscFunctionBegin; if (alpha == 0.0) { ierr = VecCopy_SeqCUSP(yin,win);CHKERRQ(ierr); } else { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(win,&warray);CHKERRQ(ierr); if (alpha == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), xarray->end())), VecCUSPSum()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); } else if (alpha == -1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), xarray->end())), VecCUSPDiff()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(win->map->n);CHKERRQ(ierr); } else { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( warray->begin(), yarray->begin(), thrust::make_constant_iterator(alpha), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( warray->end(), yarray->end(), thrust::make_constant_iterator(alpha), xarray->end())), VecCUSPWAXPY()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(2*win->map->n);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(win,&warray);CHKERRQ(ierr); } PetscFunctionReturn(0); } /* These functions are for the CUSP implementation of MAXPY with the loop unrolled on the CPU */ struct VecCUSPMAXPY4 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2 + 13*x3 +a4*x4 */ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t)+thrust::get<5>(t)*thrust::get<6>(t)+thrust::get<7>(t)*thrust::get<8>(t); } }; struct VecCUSPMAXPY3 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2 + a3*x3 */ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t)+thrust::get<5>(t)*thrust::get<6>(t); } }; struct VecCUSPMAXPY2 { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { /*y += a1*x1 +a2*x2*/ thrust::get<0>(t) += thrust::get<1>(t)*thrust::get<2>(t)+thrust::get<3>(t)*thrust::get<4>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecMAXPY_SeqCUSP" PetscErrorCode VecMAXPY_SeqCUSP(Vec xin, PetscInt nv,const PetscScalar *alpha,Vec *y) { PetscErrorCode ierr; CUSPARRAY *xarray,*yy0,*yy1,*yy2,*yy3; PetscInt n = xin->map->n,j,j_rem; PetscScalar alpha0,alpha1,alpha2,alpha3; PetscFunctionBegin; ierr = PetscLogFlops(nv*2.0*n);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); switch (j_rem=nv&0x3) { case 3: alpha0 = alpha[0]; alpha1 = alpha[1]; alpha2 = alpha[2]; alpha += 3; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[2],&yy2);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin(), thrust::make_constant_iterator(alpha2), yy2->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end(), thrust::make_constant_iterator(alpha2), yy2->end())), VecCUSPMAXPY3()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[2],&yy2);CHKERRQ(ierr); y += 3; break; case 2: alpha0 = alpha[0]; alpha1 = alpha[1]; alpha +=2; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end())), VecCUSPMAXPY2()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } y +=2; break; case 1: alpha0 = *alpha++; ierr = VecAXPY_SeqCUSP(xin,alpha0,y[0]); y +=1; break; } for (j=j_rem; j<nv; j+=4) { alpha0 = alpha[0]; alpha1 = alpha[1]; alpha2 = alpha[2]; alpha3 = alpha[3]; alpha += 4; ierr = VecCUSPGetArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[2],&yy2);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(y[3],&yy3);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( xarray->begin(), thrust::make_constant_iterator(alpha0), yy0->begin(), thrust::make_constant_iterator(alpha1), yy1->begin(), thrust::make_constant_iterator(alpha2), yy2->begin(), thrust::make_constant_iterator(alpha3), yy3->begin())), thrust::make_zip_iterator( thrust::make_tuple( xarray->end(), thrust::make_constant_iterator(alpha0), yy0->end(), thrust::make_constant_iterator(alpha1), yy1->end(), thrust::make_constant_iterator(alpha2), yy2->end(), thrust::make_constant_iterator(alpha3), yy3->end())), VecCUSPMAXPY4()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(y[0],&yy0);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[1],&yy1);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[2],&yy2);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(y[3],&yy3);CHKERRQ(ierr); y += 4; } ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDot_SeqCUSP" PetscErrorCode VecDot_SeqCUSP(Vec xin,Vec yin,PetscScalar *z) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; // PetscScalar *xptr,*yptr,*zgpu; //PetscReal tmp; PetscFunctionBegin; //VecNorm_SeqCUSP(xin, NORM_2, &tmp); //VecNorm_SeqCUSP(yin, NORM_2, &tmp); ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); try { #if defined(PETSC_USE_COMPLEX) *z = cusp::blas::dotc(*yarray,*xarray); #else *z = cusp::blas::dot(*yarray,*xarray); #endif } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); if (xin->map->n >0) { ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); PetscFunctionReturn(0); } // // CUDA kernels for MDot to follow // // set work group size to be a power of 2 (128 is usually a good compromise between portability and speed) #define MDOT_WORKGROUP_SIZE 128 #define MDOT_WORKGROUP_NUM 128 // M = 2: __global__ void VecMDot_SeqCUSP_kernel2(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[2*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[MDOT_WORKGROUP_SIZE]; } } // M = 3: __global__ void VecMDot_SeqCUSP_kernel3(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[3*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; } } // M = 4: __global__ void VecMDot_SeqCUSP_kernel4(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[4*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; } } // M = 8: __global__ void VecMDot_SeqCUSP_kernel8(const PetscScalar *x,const PetscScalar *y0,const PetscScalar *y1,const PetscScalar *y2,const PetscScalar *y3, const PetscScalar *y4,const PetscScalar *y5,const PetscScalar *y6,const PetscScalar *y7, PetscInt size, PetscScalar *group_results) { __shared__ PetscScalar tmp_buffer[8*MDOT_WORKGROUP_SIZE]; PetscInt entries_per_group = (size - 1) / gridDim.x + 1; entries_per_group = (entries_per_group == 0) ? 1 : entries_per_group; // for very small vectors, a group should still do some work PetscInt vec_start_index = blockIdx.x * entries_per_group; PetscInt vec_stop_index = min((blockIdx.x + 1) * entries_per_group, size); // don't go beyond vec size PetscScalar entry_x = 0; PetscScalar group_sum0 = 0; PetscScalar group_sum1 = 0; PetscScalar group_sum2 = 0; PetscScalar group_sum3 = 0; PetscScalar group_sum4 = 0; PetscScalar group_sum5 = 0; PetscScalar group_sum6 = 0; PetscScalar group_sum7 = 0; for (PetscInt i = vec_start_index + threadIdx.x; i < vec_stop_index; i += blockDim.x) { entry_x = x[i]; // load only once from global memory! group_sum0 += entry_x * y0[i]; group_sum1 += entry_x * y1[i]; group_sum2 += entry_x * y2[i]; group_sum3 += entry_x * y3[i]; group_sum4 += entry_x * y4[i]; group_sum5 += entry_x * y5[i]; group_sum6 += entry_x * y6[i]; group_sum7 += entry_x * y7[i]; } tmp_buffer[threadIdx.x] = group_sum0; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] = group_sum1; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] = group_sum2; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] = group_sum3; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] = group_sum4; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] = group_sum5; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] = group_sum6; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] = group_sum7; // parallel reduction for (PetscInt stride = blockDim.x/2; stride > 0; stride /= 2) { __syncthreads(); if (threadIdx.x < stride) { tmp_buffer[threadIdx.x ] += tmp_buffer[threadIdx.x+stride ]; tmp_buffer[threadIdx.x + MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 2 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 2 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 3 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 3 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 4 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 4 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 5 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 5 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 6 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 6 * MDOT_WORKGROUP_SIZE]; tmp_buffer[threadIdx.x + 7 * MDOT_WORKGROUP_SIZE] += tmp_buffer[threadIdx.x+stride + 7 * MDOT_WORKGROUP_SIZE]; } } // write result of group to group_results if (threadIdx.x == 0) { group_results[blockIdx.x ] = tmp_buffer[0]; group_results[blockIdx.x + gridDim.x] = tmp_buffer[ MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 2 * gridDim.x] = tmp_buffer[2 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 3 * gridDim.x] = tmp_buffer[3 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 4 * gridDim.x] = tmp_buffer[4 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 5 * gridDim.x] = tmp_buffer[5 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 6 * gridDim.x] = tmp_buffer[6 * MDOT_WORKGROUP_SIZE]; group_results[blockIdx.x + 7 * gridDim.x] = tmp_buffer[7 * MDOT_WORKGROUP_SIZE]; } } #undef __FUNCT__ #define __FUNCT__ "VecMDot_SeqCUSP" PetscErrorCode VecMDot_SeqCUSP(Vec xin,PetscInt nv,const Vec yin[],PetscScalar *z) { PetscErrorCode ierr; PetscInt i,j,n = xin->map->n,current_y_index = 0; CUSPARRAY *xarray,*y0array,*y1array,*y2array,*y3array,*y4array,*y5array,*y6array,*y7array; PetscScalar *group_results_gpu,*xptr,*y0ptr,*y1ptr,*y2ptr,*y3ptr,*y4ptr,*y5ptr,*y6ptr,*y7ptr; PetscScalar group_results_cpu[MDOT_WORKGROUP_NUM * 8]; // we process at most eight vectors in one kernel cudaError_t cuda_ierr; PetscFunctionBegin; // allocate scratchpad memory for the results of individual work groups: if (nv <= 0) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_LIB,"Number of vectors provided to VecMDot_SeqCUSP not positive."); cuda_ierr = cudaMalloc((void**)&group_results_gpu, sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not allocate CUDA work memory. Error code: %d", (int)cuda_ierr); ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); xptr = thrust::raw_pointer_cast(xarray->data()); while (current_y_index < nv) { switch (nv - current_y_index) { case 7: case 6: case 5: case 4: ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); z[current_y_index+3] = cusp::blas::dot(*y3array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); y3ptr = thrust::raw_pointer_cast(y3array->data()); // run kernel: VecMDot_SeqCUSP_kernel4<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,y3ptr,n,group_results_gpu); // copy results back to cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 4,cudaMemcpyDeviceToHost); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<4; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); current_y_index += 4; break; case 3: ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); // run kernel: VecMDot_SeqCUSP_kernel3<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,n,group_results_gpu); // copy results back to cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 3,cudaMemcpyDeviceToHost); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<3; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); current_y_index += 3; break; case 2: ierr = VecCUSPGetArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); // run kernel: VecMDot_SeqCUSP_kernel2<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,n,group_results_gpu); // copy results back to cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 2,cudaMemcpyDeviceToHost); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<2; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); current_y_index += 2; break; case 1: ierr = VecCUSPGetArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dotc(*y0array, *xarray); #else z[current_y_index] = cusp::blas::dot(*xarray, *y0array); #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index],&y0array);CHKERRQ(ierr); current_y_index += 1; break; default: // 8 or more vectors left ierr = VecCUSPGetArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+4],&y4array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+5],&y5array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+6],&y6array);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin[current_y_index+7],&y7array);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) z[current_y_index] = cusp::blas::dot(*y0array,*xarray); z[current_y_index+1] = cusp::blas::dot(*y1array,*xarray); z[current_y_index+2] = cusp::blas::dot(*y2array,*xarray); z[current_y_index+3] = cusp::blas::dot(*y3array,*xarray); z[current_y_index+4] = cusp::blas::dot(*y4array,*xarray); z[current_y_index+5] = cusp::blas::dot(*y5array,*xarray); z[current_y_index+6] = cusp::blas::dot(*y6array,*xarray); z[current_y_index+7] = cusp::blas::dot(*y7array,*xarray); #else // extract raw device pointers: y0ptr = thrust::raw_pointer_cast(y0array->data()); y1ptr = thrust::raw_pointer_cast(y1array->data()); y2ptr = thrust::raw_pointer_cast(y2array->data()); y3ptr = thrust::raw_pointer_cast(y3array->data()); y4ptr = thrust::raw_pointer_cast(y4array->data()); y5ptr = thrust::raw_pointer_cast(y5array->data()); y6ptr = thrust::raw_pointer_cast(y6array->data()); y7ptr = thrust::raw_pointer_cast(y7array->data()); // run kernel: VecMDot_SeqCUSP_kernel8<<<MDOT_WORKGROUP_NUM,MDOT_WORKGROUP_SIZE>>>(xptr,y0ptr,y1ptr,y2ptr,y3ptr,y4ptr,y5ptr,y6ptr,y7ptr,n,group_results_gpu); // copy results back to cuda_ierr = cudaMemcpy(group_results_cpu,group_results_gpu,sizeof(PetscScalar) * MDOT_WORKGROUP_NUM * 8,cudaMemcpyDeviceToHost); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host. Error code: %d", (int)cuda_ierr); // sum group results into z: for (j=0; j<8; ++j) { z[current_y_index + j] = 0; for (i=j*MDOT_WORKGROUP_NUM; i<(j+1)*MDOT_WORKGROUP_NUM; ++i) z[current_y_index + j] += group_results_cpu[i]; } #endif ierr = VecCUSPRestoreArrayRead(yin[current_y_index ],&y0array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+1],&y1array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+2],&y2array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+3],&y3array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+4],&y4array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+5],&y5array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+6],&y6array);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin[current_y_index+7],&y7array);CHKERRQ(ierr); current_y_index += 8; break; } } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); cuda_ierr = cudaFree(group_results_gpu); if (cuda_ierr != cudaSuccess) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"Could not copy CUDA buffer to host: %d", (int)cuda_ierr); ierr = PetscLogFlops(PetscMax(nv*(2.0*n-1),0.0));CHKERRQ(ierr); PetscFunctionReturn(0); } #undef MDOT_WORKGROUP_SIZE #undef MDOT_WORKGROUP_NUM #undef __FUNCT__ #define __FUNCT__ "VecSet_SeqCUSP" PetscErrorCode VecSet_SeqCUSP(Vec xin,PetscScalar alpha) { CUSPARRAY *xarray=NULL; PetscErrorCode ierr; PetscFunctionBegin; /* if there's a faster way to do the case alpha=0.0 on the GPU we should do that*/ ierr = VecCUSPGetArrayWrite(xin,&xarray);CHKERRQ(ierr); try { cusp::blas::fill(*xarray,alpha); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayWrite(xin,&xarray); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecScale_SeqCUSP" PetscErrorCode VecScale_SeqCUSP(Vec xin, PetscScalar alpha) { CUSPARRAY *xarray; PetscErrorCode ierr; PetscFunctionBegin; if (alpha == 0.0) { ierr = VecSet_SeqCUSP(xin,alpha);CHKERRQ(ierr); } else if (alpha != 1.0) { ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); try { cusp::blas::scal(*xarray,alpha); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecTDot_SeqCUSP" PetscErrorCode VecTDot_SeqCUSP(Vec xin,Vec yin,PetscScalar *z) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; //#if defined(PETSC_USE_COMPLEX) /*Not working for complex*/ //#else ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); try { *z = cusp::blas::dot(*xarray,*yarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } //#endif ierr = WaitForGPU();CHKERRCUSP(ierr); if (xin->map->n > 0) { ierr = PetscLogFlops(2.0*xin->map->n-1);CHKERRQ(ierr); } ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCopy_SeqCUSP" PetscErrorCode VecCopy_SeqCUSP(Vec xin,Vec yin) { CUSPARRAY *xarray,*yarray; PetscErrorCode ierr; PetscFunctionBegin; if (xin != yin) { if (xin->valid_GPU_array == PETSC_CUSP_GPU) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else if (xin->valid_GPU_array == PETSC_CUSP_CPU) { /* copy in CPU if we are on the CPU*/ ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } else if (xin->valid_GPU_array == PETSC_CUSP_BOTH) { /* if xin is valid in both places, see where yin is and copy there (because it's probably where we'll want to next use it) */ if (yin->valid_GPU_array == PETSC_CUSP_CPU) { /* copy in CPU */ ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } else if (yin->valid_GPU_array == PETSC_CUSP_GPU) { /* copy in GPU */ ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else if (yin->valid_GPU_array == PETSC_CUSP_BOTH) { /* xin and yin are both valid in both places (or yin was unallocated before the earlier call to allocatecheck default to copy in GPU (this is an arbitrary choice) */ ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::copy(*xarray,*yarray); ierr = WaitForGPU();CHKERRCUSP(ierr); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCopy_SeqCUSP_Private(xin,yin);CHKERRQ(ierr); } } } PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecSwap_SeqCUSP" PetscErrorCode VecSwap_SeqCUSP(Vec xin,Vec yin) { PetscErrorCode ierr; PetscBLASInt one = 1,bn; CUSPARRAY *xarray,*yarray; PetscFunctionBegin; ierr = PetscBLASIntCast(xin->map->n,&bn);CHKERRQ(ierr); if (xin != yin) { ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) cublasCswap(bn,(cuFloatComplex*)VecCUSPCastToRawPtr(*xarray),one,(cuFloatComplex*)VecCUSPCastToRawPtr(*yarray),one); #else cublasZswap(bn,(cuDoubleComplex*)VecCUSPCastToRawPtr(*xarray),one,(cuDoubleComplex*)VecCUSPCastToRawPtr(*yarray),one); #endif #else #if defined(PETSC_USE_REAL_SINGLE) cublasSswap(bn,VecCUSPCastToRawPtr(*xarray),one,VecCUSPCastToRawPtr(*yarray),one); #else cublasDswap(bn,VecCUSPCastToRawPtr(*xarray),one,VecCUSPCastToRawPtr(*yarray),one); #endif #endif ierr = cublasGetError();CHKERRCUSP(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); } PetscFunctionReturn(0); } struct VecCUSPAX { template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t)*thrust::get<2>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecAXPBY_SeqCUSP" PetscErrorCode VecAXPBY_SeqCUSP(Vec yin,PetscScalar alpha,PetscScalar beta,Vec xin) { PetscErrorCode ierr; PetscScalar a = alpha,b = beta; CUSPARRAY *xarray,*yarray; PetscFunctionBegin; if (a == 0.0) { ierr = VecScale_SeqCUSP(yin,beta);CHKERRQ(ierr); } else if (b == 1.0) { ierr = VecAXPY_SeqCUSP(yin,alpha,xin);CHKERRQ(ierr); } else if (a == 1.0) { ierr = VecAYPX_SeqCUSP(yin,beta,xin);CHKERRQ(ierr); } else if (b == 0.0) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( yarray->begin(), thrust::make_constant_iterator(a), xarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( yarray->end(), thrust::make_constant_iterator(a), xarray->end())), VecCUSPAX()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(xin->map->n);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); } else { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(yin,&yarray);CHKERRQ(ierr); try { cusp::blas::axpby(*xarray,*yarray,*yarray,a,b); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(yin,&yarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(3.0*xin->map->n);CHKERRQ(ierr); } PetscFunctionReturn(0); } /* structs below are for special cases of VecAXPBYPCZ_SeqCUSP */ struct VecCUSPXPBYPCZ { /* z = x + b*y + c*z */ template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) = thrust::get<1>(t)*thrust::get<0>(t)+thrust::get<2>(t)+thrust::get<4>(t)*thrust::get<3>(t); } }; struct VecCUSPAXPBYPZ { /* z = ax + b*y + z */ template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<0>(t) += thrust::get<2>(t)*thrust::get<1>(t)+thrust::get<4>(t)*thrust::get<3>(t); } }; #undef __FUNCT__ #define __FUNCT__ "VecAXPBYPCZ_SeqCUSP" PetscErrorCode VecAXPBYPCZ_SeqCUSP(Vec zin,PetscScalar alpha,PetscScalar beta,PetscScalar gamma,Vec xin,Vec yin) { PetscErrorCode ierr; PetscInt n = zin->map->n; CUSPARRAY *xarray,*yarray,*zarray; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(zin,&zarray);CHKERRQ(ierr); if (alpha == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( zarray->begin(), thrust::make_constant_iterator(gamma), xarray->begin(), yarray->begin(), thrust::make_constant_iterator(beta))), thrust::make_zip_iterator( thrust::make_tuple( zarray->end(), thrust::make_constant_iterator(gamma), xarray->end(), yarray->end(), thrust::make_constant_iterator(beta))), VecCUSPXPBYPCZ()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); } else if (gamma == 1.0) { try { thrust::for_each( thrust::make_zip_iterator( thrust::make_tuple( zarray->begin(), xarray->begin(), thrust::make_constant_iterator(alpha), yarray->begin(), thrust::make_constant_iterator(beta))), thrust::make_zip_iterator( thrust::make_tuple( zarray->end(), xarray->end(), thrust::make_constant_iterator(alpha), yarray->end(), thrust::make_constant_iterator(beta))), VecCUSPAXPBYPZ()); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); } else { try { cusp::blas::axpbypcz(*xarray,*yarray,*zarray,*zarray,alpha,beta,gamma); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayReadWrite(zin,&zarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = PetscLogFlops(5.0*n);CHKERRQ(ierr); } ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecPointwiseMult_SeqCUSP" PetscErrorCode VecPointwiseMult_SeqCUSP(Vec win,Vec xin,Vec yin) { PetscErrorCode ierr; PetscInt n = win->map->n; CUSPARRAY *xarray,*yarray,*warray; PetscFunctionBegin; ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayReadWrite(win,&warray);CHKERRQ(ierr); try { cusp::blas::xmy(*xarray,*yarray,*warray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(yin,&yarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayReadWrite(win,&warray);CHKERRQ(ierr); ierr = PetscLogFlops(n);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); PetscFunctionReturn(0); } /* should do infinity norm in cusp */ #undef __FUNCT__ #define __FUNCT__ "VecNorm_SeqCUSP" PetscErrorCode VecNorm_SeqCUSP(Vec xin,NormType type,PetscReal *z) { const PetscScalar *xx; PetscErrorCode ierr; PetscInt n = xin->map->n; PetscBLASInt one = 1, bn; CUSPARRAY *xarray; PetscFunctionBegin; ierr = PetscBLASIntCast(n,&bn);CHKERRQ(ierr); if (type == NORM_2 || type == NORM_FROBENIUS) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); try { *z = cusp::blas::nrm2(*xarray); } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = PetscLogFlops(PetscMax(2.0*n-1,0.0));CHKERRQ(ierr); } else if (type == NORM_INFINITY) { PetscInt i; PetscReal max = 0.0,tmp; ierr = VecGetArrayRead(xin,&xx);CHKERRQ(ierr); for (i=0; i<n; i++) { if ((tmp = PetscAbsScalar(*xx)) > max) max = tmp; /* check special case of tmp == NaN */ if (tmp != tmp) {max = tmp; break;} xx++; } ierr = VecRestoreArrayRead(xin,&xx);CHKERRQ(ierr); *z = max; } else if (type == NORM_1) { ierr = VecCUSPGetArrayRead(xin,&xarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) #if defined(PETSC_USE_REAL_SINGLE) *z = cublasScasum(bn,(cuFloatComplex*)VecCUSPCastToRawPtr(*xarray),one); #else *z = cublasDzasum(bn,(cuDoubleComplex*)VecCUSPCastToRawPtr(*xarray),one); #endif #else #if defined(PETSC_USE_REAL_SINGLE) *z = cublasSasum(bn,VecCUSPCastToRawPtr(*xarray),one); #else *z = cublasDasum(bn,VecCUSPCastToRawPtr(*xarray),one); #endif #endif ierr = cublasGetError();CHKERRCUSP(ierr); ierr = VecCUSPRestoreArrayRead(xin,&xarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(PetscMax(n-1.0,0.0));CHKERRQ(ierr); } else if (type == NORM_1_AND_2) { ierr = VecNorm_SeqCUSP(xin,NORM_1,z);CHKERRQ(ierr); ierr = VecNorm_SeqCUSP(xin,NORM_2,z+1);CHKERRQ(ierr); } PetscFunctionReturn(0); } /*the following few functions should be modified to actually work with the GPU so they don't force unneccesary allocation of CPU memory */ #undef __FUNCT__ #define __FUNCT__ "VecSetRandom_SeqCUSP" PetscErrorCode VecSetRandom_SeqCUSP(Vec xin,PetscRandom r) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecSetRandom_SeqCUSP_Private(xin,r);CHKERRQ(ierr); xin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecResetArray_SeqCUSP" PetscErrorCode VecResetArray_SeqCUSP(Vec vin) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecResetArray_SeqCUSP_Private(vin);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecPlaceArray_SeqCUSP" PetscErrorCode VecPlaceArray_SeqCUSP(Vec vin,const PetscScalar *a) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecPlaceArray_Seq(vin,a);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecReplaceArray_SeqCUSP" PetscErrorCode VecReplaceArray_SeqCUSP(Vec vin,const PetscScalar *a) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCUSPCopyFromGPU(vin);CHKERRQ(ierr); ierr = VecReplaceArray_Seq(vin,a);CHKERRQ(ierr); vin->valid_GPU_array = PETSC_CUSP_CPU; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCreateSeqCUSP" /*@ VecCreateSeqCUSP - Creates a standard, sequential array-style vector. Collective on MPI_Comm Input Parameter: + comm - the communicator, should be PETSC_COMM_SELF - n - the vector length Output Parameter: . V - the vector Notes: Use VecDuplicate() or VecDuplicateVecs() to form additional vectors of the same type as an existing vector. Level: intermediate Concepts: vectors^creating sequential .seealso: VecCreateMPI(), VecCreate(), VecDuplicate(), VecDuplicateVecs(), VecCreateGhost() @*/ PetscErrorCode VecCreateSeqCUSP(MPI_Comm comm,PetscInt n,Vec *v) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreate(comm,v);CHKERRQ(ierr); ierr = VecSetSizes(*v,n,n);CHKERRQ(ierr); ierr = VecSetType(*v,VECSEQCUSP);CHKERRQ(ierr); PetscFunctionReturn(0); } /*The following template functions are for VecDotNorm2_SeqCUSP. Note that there is no complex support as currently written*/ template <typename T> struct cuspdotnormcalculate : thrust::unary_function<T,T> { __host__ __device__ T operator()(T x) { #if defined(PETSC_USE_COMPLEX) //return thrust::make_tuple(thrust::get<0>(x)*thrust::get<1>(x), thrust::get<1>(x)*thrust::get<1>(x)); #else return thrust::make_tuple(thrust::get<0>(x)*thrust::get<1>(x), thrust::get<1>(x)*thrust::get<1>(x)); #endif } }; template <typename T> struct cuspdotnormreduce : thrust::binary_function<T,T,T> { __host__ __device__ T operator()(T x,T y) { return thrust::make_tuple(thrust::get<0>(x)+thrust::get<0>(y), thrust::get<1>(x)+thrust::get<1>(y)); } }; #undef __FUNCT__ #define __FUNCT__ "VecDotNorm2_SeqCUSP" PetscErrorCode VecDotNorm2_SeqCUSP(Vec s, Vec t, PetscScalar *dp, PetscScalar *nm) { PetscErrorCode ierr; PetscScalar zero = 0.0; PetscReal n=s->map->n; thrust::tuple<PetscScalar,PetscScalar> result; CUSPARRAY *sarray,*tarray; PetscFunctionBegin; /*ierr = VecCUSPCopyToGPU(s);CHKERRQ(ierr); ierr = VecCUSPCopyToGPU(t);CHKERRQ(ierr);*/ ierr = VecCUSPGetArrayRead(s,&sarray);CHKERRQ(ierr); ierr = VecCUSPGetArrayRead(t,&tarray);CHKERRQ(ierr); try { #if defined(PETSC_USE_COMPLEX) ierr = VecDot_SeqCUSP(s,t,dp);CHKERRQ(ierr); ierr = VecDot_SeqCUSP(t,t,nm);CHKERRQ(ierr); //printf("VecDotNorm2_SeqCUSP=%1.5g,%1.5g\n",PetscRealPart(*dp),PetscImaginaryPart(*dp)); //printf("VecDotNorm2_SeqCUSP=%1.5g,%1.5g\n",PetscRealPart(*nm),PetscImaginaryPart(*nm)); #else result = thrust::transform_reduce( thrust::make_zip_iterator( thrust::make_tuple( sarray->begin(), tarray->begin())), thrust::make_zip_iterator( thrust::make_tuple( sarray->end(), tarray->end())), cuspdotnormcalculate<thrust::tuple<PetscScalar,PetscScalar> >(), thrust::make_tuple(zero,zero), /*init */ cuspdotnormreduce<thrust::tuple<PetscScalar, PetscScalar> >()); /* binary function */ *dp = thrust::get<0>(result); *nm = thrust::get<1>(result); #endif } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecCUSPRestoreArrayRead(s,&sarray);CHKERRQ(ierr); ierr = VecCUSPRestoreArrayRead(t,&tarray);CHKERRQ(ierr); ierr = WaitForGPU();CHKERRCUSP(ierr); ierr = PetscLogFlops(4.0*n);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDuplicate_SeqCUSP" PetscErrorCode VecDuplicate_SeqCUSP(Vec win,Vec *V) { PetscErrorCode ierr; PetscFunctionBegin; ierr = VecCreateSeqCUSP(PetscObjectComm((PetscObject)win),win->map->n,V);CHKERRQ(ierr); ierr = PetscLayoutReference(win->map,&(*V)->map);CHKERRQ(ierr); ierr = PetscObjectListDuplicate(((PetscObject)win)->olist,&((PetscObject)(*V))->olist);CHKERRQ(ierr); ierr = PetscFunctionListDuplicate(((PetscObject)win)->qlist,&((PetscObject)(*V))->qlist);CHKERRQ(ierr); (*V)->stash.ignorenegidx = win->stash.ignorenegidx; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecDestroy_SeqCUSP" PetscErrorCode VecDestroy_SeqCUSP(Vec v) { PetscErrorCode ierr; Vec_Seq *s = (Vec_Seq*)v->data; cudaError_t err; PetscFunctionBegin; try { if (v->spptr) { delete ((Vec_CUSP*)v->spptr)->GPUarray; err = cudaStreamDestroy(((Vec_CUSP*)v->spptr)->stream);CHKERRCUSP(err); /* If the host array has been registered as (page-locked) mapped, one must unregister the buffer */ if (((Vec_CUSP*)v->spptr)->hostDataRegisteredAsPageLocked) { err = cudaHostUnregister(s->array);CHKERRCUSP(err); } delete (Vec_CUSP*) v->spptr; } } catch(char *ex) { SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_LIB,"CUSP error: %s", ex); } ierr = VecDestroy_SeqCUSP_Private(v);CHKERRQ(ierr); PetscFunctionReturn(0); } #if defined(PETSC_USE_COMPLEX) struct conjugate { __host__ __device__ PetscScalar operator()(PetscScalar x) { return cusp::conj(x); } }; #endif #undef __FUNCT__ #define __FUNCT__ "VecConjugate_SeqCUSP" PetscErrorCode VecConjugate_SeqCUSP(Vec xin) { PetscErrorCode ierr; CUSPARRAY *xarray; PetscFunctionBegin; ierr = VecCUSPGetArrayReadWrite(xin,&xarray);CHKERRQ(ierr); #if defined(PETSC_USE_COMPLEX) thrust::transform(xarray->begin(), xarray->end(), xarray->begin(), conjugate()); #endif ierr = VecCUSPRestoreArrayReadWrite(xin,&xarray);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCreate_SeqCUSP" PETSC_EXTERN PetscErrorCode VecCreate_SeqCUSP(Vec V) { PetscErrorCode ierr; PetscMPIInt size; PetscFunctionBegin; ierr = MPI_Comm_size(PetscObjectComm((PetscObject)V),&size);CHKERRQ(ierr); if (size > 1) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_ARG_WRONG,"Cannot create VECSEQCUSP on more than one process"); ierr = VecCreate_Seq_Private(V,0);CHKERRQ(ierr); ierr = PetscObjectChangeTypeName((PetscObject)V,VECSEQCUSP);CHKERRQ(ierr); V->ops->dot = VecDot_SeqCUSP; V->ops->norm = VecNorm_SeqCUSP; V->ops->tdot = VecTDot_SeqCUSP; V->ops->scale = VecScale_SeqCUSP; V->ops->copy = VecCopy_SeqCUSP; V->ops->set = VecSet_SeqCUSP; V->ops->swap = VecSwap_SeqCUSP; V->ops->axpy = VecAXPY_SeqCUSP; V->ops->axpby = VecAXPBY_SeqCUSP; V->ops->axpbypcz = VecAXPBYPCZ_SeqCUSP; V->ops->pointwisemult = VecPointwiseMult_SeqCUSP; V->ops->pointwisedivide = VecPointwiseDivide_SeqCUSP; V->ops->setrandom = VecSetRandom_SeqCUSP; V->ops->dot_local = VecDot_SeqCUSP; V->ops->tdot_local = VecTDot_SeqCUSP; V->ops->norm_local = VecNorm_SeqCUSP; V->ops->mdot_local = VecMDot_SeqCUSP; V->ops->maxpy = VecMAXPY_SeqCUSP; V->ops->mdot = VecMDot_SeqCUSP; V->ops->aypx = VecAYPX_SeqCUSP; V->ops->waxpy = VecWAXPY_SeqCUSP; V->ops->dotnorm2 = VecDotNorm2_SeqCUSP; V->ops->placearray = VecPlaceArray_SeqCUSP; V->ops->replacearray = VecReplaceArray_SeqCUSP; V->ops->resetarray = VecResetArray_SeqCUSP; V->ops->destroy = VecDestroy_SeqCUSP; V->ops->duplicate = VecDuplicate_SeqCUSP; V->ops->conjugate = VecConjugate_SeqCUSP; ierr = VecCUSPAllocateCheck(V);CHKERRQ(ierr); V->valid_GPU_array = PETSC_CUSP_GPU; ierr = VecSet(V,0.0);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayReadWrite" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayReadWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayReadWrite" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayReadWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; v->valid_GPU_array = PETSC_CUSP_GPU; ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr); PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayRead" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayRead(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPCopyToGPU(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayRead" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayRead(Vec v, CUSPARRAY **a) { PetscFunctionBegin; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPGetArrayWrite" PETSC_EXTERN PetscErrorCode VecCUSPGetArrayWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; *a = 0; ierr = VecCUSPAllocateCheck(v);CHKERRQ(ierr); *a = ((Vec_CUSP*)v->spptr)->GPUarray; PetscFunctionReturn(0); } #undef __FUNCT__ #define __FUNCT__ "VecCUSPRestoreArrayWrite" PETSC_EXTERN PetscErrorCode VecCUSPRestoreArrayWrite(Vec v, CUSPARRAY **a) { PetscErrorCode ierr; PetscFunctionBegin; v->valid_GPU_array = PETSC_CUSP_GPU; ierr = PetscObjectStateIncrease((PetscObject)v);CHKERRQ(ierr); PetscFunctionReturn(0); }
bad6e20a7613ae7f3a554a52ebc1bde6d87f64bf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* @author:chenzhengqiang @date:2018-09-25 */ #include "cvutils.h" #include "opencv_common.h" #include "cuda_common.h" #include <iostream> #include <vector> using std::vector; __global__ void calc_image_histo(const cuda::PtrStepSz<uchar> src_image, int* histo) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if ( x < src_image.rows && y < src_image.cols ) { int index = static_cast<int>(src_image(x, y)); atomicAdd(&(histo[index]), 1); } } int get_max_entropy( BUFFER::GLOBAL_BUFFER & global_buffer ) { hipMemset(global_buffer.HISTO, 0, 256 * sizeof(int)); hipLaunchKernelGGL(( calc_image_histo), dim3(global_buffer.BLOCKS), dim3(global_buffer.THREADS), 0, 0, global_buffer.saliency_map, global_buffer.HISTO); int histo[256]={0}; hipMemcpy( histo, global_buffer.HISTO, 256 * sizeof(unsigned int), hipMemcpyDeviceToHost ); int pixel_index = 0; double property = 0.0; double max_entropy = -1.0; double front_entropy = 0.0; double back_entropy = 0.0; for (int i = 0; i< 256; i++) { double back_total = 0; for (int j = 0; j < i; j++) { back_total += histo[j]; } for (int j = 0; j < i; j++) { if (histo[j] != 0) { property = histo[j] / back_total; back_entropy += -property * logf((float)property); } } for (int k = i; k < 256; k++) { if (histo[k] != 0) { property = histo[k] / (global_buffer.IMAGE_PIXELS - back_total); front_entropy += -property * logf((float)property); } } if ((front_entropy + back_entropy) > max_entropy) { max_entropy = front_entropy + back_entropy; pixel_index = i; } front_entropy = 0.0; back_entropy = 0.0; } return pixel_index; } int remove_area_by_adaptive_threshold(Mat & src_image) { vector< vector<Point> > contours; vector<Vec4i> hierarchy; vector<double> areas; findContours(src_image, contours, hierarchy, CV_RETR_LIST, CHAIN_APPROX_NONE, Point(0, 0)); int ret = 0; double threshold_area = 0; int area_size = (int) contours.size(); if ( area_size <= 0 ) return -1; for (int i = 0; i < area_size; ++i ) { //areas.push_back(contourArea(contours[i], false)); threshold_area += contourArea(contours[i]); } threshold_area = threshold_area / area_size; //calculate the max threashold area /*sort(areas.begin(), areas.end()); double max_sub = -1; int max_sub_index = 0; for (int i = 0; i < areas.size(); i++) { int j = i+1; if ( j == areas.size() ) break; double area_sub = areas[j]-areas[i]; if ( area_sub > max_sub ) { max_sub_index = j; max_sub = area_sub; } } threshold_area = areas[max_sub_index];*/ vector< vector< Point> > contours2; vector<vector<Point> >::iterator iter = contours.begin(); while ( iter != contours.end() ) { if( contourArea(*iter, false) < threshold_area) { //iter = contours.erase(iter); contours2.push_back(*iter); } ++iter; /*else { ++ret; ++iter; }*/ } drawContours(src_image, contours2, -1, Scalar(0), CV_FILLED); return ret; }
bad6e20a7613ae7f3a554a52ebc1bde6d87f64bf.cu
/* @author:chenzhengqiang @date:2018-09-25 */ #include "cvutils.h" #include "opencv_common.h" #include "cuda_common.h" #include <iostream> #include <vector> using std::vector; __global__ void calc_image_histo(const cuda::PtrStepSz<uchar> src_image, int* histo) { int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; if ( x < src_image.rows && y < src_image.cols ) { int index = static_cast<int>(src_image(x, y)); atomicAdd(&(histo[index]), 1); } } int get_max_entropy( BUFFER::GLOBAL_BUFFER & global_buffer ) { cudaMemset(global_buffer.HISTO, 0, 256 * sizeof(int)); calc_image_histo<<<global_buffer.BLOCKS, global_buffer.THREADS>>>(global_buffer.saliency_map, global_buffer.HISTO); int histo[256]={0}; cudaMemcpy( histo, global_buffer.HISTO, 256 * sizeof(unsigned int), cudaMemcpyDeviceToHost ); int pixel_index = 0; double property = 0.0; double max_entropy = -1.0; double front_entropy = 0.0; double back_entropy = 0.0; for (int i = 0; i< 256; i++) { double back_total = 0; for (int j = 0; j < i; j++) { back_total += histo[j]; } for (int j = 0; j < i; j++) { if (histo[j] != 0) { property = histo[j] / back_total; back_entropy += -property * logf((float)property); } } for (int k = i; k < 256; k++) { if (histo[k] != 0) { property = histo[k] / (global_buffer.IMAGE_PIXELS - back_total); front_entropy += -property * logf((float)property); } } if ((front_entropy + back_entropy) > max_entropy) { max_entropy = front_entropy + back_entropy; pixel_index = i; } front_entropy = 0.0; back_entropy = 0.0; } return pixel_index; } int remove_area_by_adaptive_threshold(Mat & src_image) { vector< vector<Point> > contours; vector<Vec4i> hierarchy; vector<double> areas; findContours(src_image, contours, hierarchy, CV_RETR_LIST, CHAIN_APPROX_NONE, Point(0, 0)); int ret = 0; double threshold_area = 0; int area_size = (int) contours.size(); if ( area_size <= 0 ) return -1; for (int i = 0; i < area_size; ++i ) { //areas.push_back(contourArea(contours[i], false)); threshold_area += contourArea(contours[i]); } threshold_area = threshold_area / area_size; //calculate the max threashold area /*sort(areas.begin(), areas.end()); double max_sub = -1; int max_sub_index = 0; for (int i = 0; i < areas.size(); i++) { int j = i+1; if ( j == areas.size() ) break; double area_sub = areas[j]-areas[i]; if ( area_sub > max_sub ) { max_sub_index = j; max_sub = area_sub; } } threshold_area = areas[max_sub_index];*/ vector< vector< Point> > contours2; vector<vector<Point> >::iterator iter = contours.begin(); while ( iter != contours.end() ) { if( contourArea(*iter, false) < threshold_area) { //iter = contours.erase(iter); contours2.push_back(*iter); } ++iter; /*else { ++ret; ++iter; }*/ } drawContours(src_image, contours2, -1, Scalar(0), CV_FILLED); return ret; }
26e62b1cf8a14a675d1ba611c764601b92171aae.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <time.h> #include "genmatrix.h" #include <hip/hip_runtime.h> template <int BLOCK_SIZE> __global__ void matrixMulSharedMemBasic(float *C, float *A, float *B, int width) { int a_start = width * BLOCK_SIZE * blockIdx.y, a_offset, b_start = BLOCK_SIZE * blockIdx.x, b_offset; __shared__ float A_shared[BLOCK_SIZE*BLOCK_SIZE]; __shared__ float B_shared[BLOCK_SIZE*BLOCK_SIZE]; float C_local = 0.0f; for(int index = 0; index < gridDim.x; index++) // rwnie dobrze mogoby by gridDim.y bo s rwne { a_offset = index * BLOCK_SIZE; b_offset = index * BLOCK_SIZE * width; A_shared[threadIdx.y * blockDim.x + threadIdx.x] = A[a_start + a_offset + threadIdx.y * width + threadIdx.x]; B_shared[threadIdx.y * blockDim.x + threadIdx.x] = B[b_start + b_offset + threadIdx.y * width + threadIdx.x]; __syncthreads(); for(int k = 0; k < BLOCK_SIZE; k++) { C_local += A_shared[threadIdx.y * BLOCK_SIZE + k] * B_shared[k * BLOCK_SIZE + threadIdx.x]; } __syncthreads(); if(index * BLOCK_SIZE >= width) break; } int c_start = blockIdx.y * width * BLOCK_SIZE, c_offset = blockIdx.x * BLOCK_SIZE; C[c_start + c_offset + width * threadIdx.y + threadIdx.x] = C_local; } static float totalTime = 0.0f; int performSharedMemTest(dim3 block_size, int width) { hipError_t error; float *A = (float*)malloc(width*width*sizeof(float)); float *B = (float*)malloc(width*width*sizeof(float)); generateTestMatrix(A, width); generateTestMatrix(B, width); float *C = (float*)malloc(width*width*sizeof(float)); memset(C, 0, width*width*sizeof(float)); float *A_d, *B_d, *C_d; error = hipMalloc((void**)&A_d, width*width*sizeof(float)); if(error != hipSuccess) { fprintf(stderr, "Could not allocate memory on the device for matrix A: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } error = hipMalloc((void**)&B_d, width*width*sizeof(float)); if(error != hipSuccess) { fprintf(stderr, "Could not allocate memory on the device for matrix B: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } error = hipMalloc((void**)&C_d, width*width*sizeof(float)); if(error != hipSuccess) { fprintf(stderr, "Could not allocate memory on the device for matrix C: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } error = hipMemcpy(A_d, A, width*width*sizeof(float), hipMemcpyHostToDevice); if(error != hipSuccess) { fprintf(stderr, "Could not copy data from host to device: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } error = hipMemcpy(B_d, B, width*width*sizeof(float), hipMemcpyHostToDevice); if(error != hipSuccess) { fprintf(stderr, "Could not copy data from host to device: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } error = hipEventRecord(start, NULL); if(error != hipSuccess) { fprintf(stderr, "Could not record start event: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } int grid_side = (int)ceil((float)width/(float)block_size.x); for(int current_test = 0; current_test < TEST_COUNT; current_test++) { switch(block_size.x) { case 8: hipLaunchKernelGGL(( matrixMulSharedMemBasic<8>), dim3(dim3(grid_side, grid_side)), dim3(block_size), 0, 0, C_d, A_d, B_d, width); break; case 16: hipLaunchKernelGGL(( matrixMulSharedMemBasic<16>), dim3(dim3(grid_side, grid_side)), dim3(block_size), 0, 0, C_d, A_d, B_d, width); break; case 22: hipLaunchKernelGGL(( matrixMulSharedMemBasic<22>), dim3(dim3(grid_side, grid_side)), dim3(block_size), 0, 0, C_d, A_d, B_d, width); break; case 32: hipLaunchKernelGGL(( matrixMulSharedMemBasic<32>), dim3(dim3(grid_side, grid_side)), dim3(block_size), 0, 0, C_d, A_d, B_d, width); break; } hipDeviceSynchronize(); } error = hipEventRecord(stop, NULL); if(error != hipSuccess) { fprintf(stderr, "Could not record stop event: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } error = hipEventSynchronize(stop); if(error != hipSuccess) { fprintf(stderr, "Could not synchronize with stop event: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } totalTime = 0.0f; error = hipEventElapsedTime(&totalTime, start, stop); if(error != hipSuccess) { fprintf(stderr, "Could not calculate elapsed time: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } float msecPerMatrixMul = totalTime / (float)TEST_COUNT; double flopsPerMatrixMul = 2.0 * (double)width * (double)width * (double)width; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf("%dx%d\t%dx%d\t%dx%d\t%.3f\t%.2f\n", width, width, block_size.x, block_size.y, grid_side, grid_side, msecPerMatrixMul, gigaFlops); error = hipMemcpy(C, C_d, width*width*sizeof(float), hipMemcpyDeviceToHost); if(error != hipSuccess) { fprintf(stderr, "Could not copy data from device to host: %s (line: %d)\n", hipGetErrorString(error), __LINE__); return -1; } hipEventDestroy(start); hipEventDestroy(stop); hipFree(C_d); hipFree(B_d); hipFree(A_d); free(C); free(B); free(A); return 0; } void performSharedMemTests(void) { srand((unsigned int)time(NULL)); dim3 blockSizes[] = { dim3(8,8), dim3(16,16), dim3(22,22), dim3(32,32)}; int matrixSizes[] = { 32, 64, 128 }; for(int i = 0; i < sizeof(matrixSizes)/sizeof(int); i++) { //printf("+++ %dx%d matrix +++\n", matrixSizes[i], matrixSizes[i]); for(int j = 0; j < sizeof(blockSizes)/sizeof(dim3); j++) { //printf("%dx%d block\n", blockSizes[i].x, blockSizes[i].y); performSharedMemTest(blockSizes[j], matrixSizes[i]); } } hipDeviceReset(); }
26e62b1cf8a14a675d1ba611c764601b92171aae.cu
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <math.h> #include <time.h> #include "genmatrix.h" #include <cuda_runtime.h> template <int BLOCK_SIZE> __global__ void matrixMulSharedMemBasic(float *C, float *A, float *B, int width) { int a_start = width * BLOCK_SIZE * blockIdx.y, a_offset, b_start = BLOCK_SIZE * blockIdx.x, b_offset; __shared__ float A_shared[BLOCK_SIZE*BLOCK_SIZE]; __shared__ float B_shared[BLOCK_SIZE*BLOCK_SIZE]; float C_local = 0.0f; for(int index = 0; index < gridDim.x; index++) // równie dobrze mogłoby być gridDim.y bo są równe { a_offset = index * BLOCK_SIZE; b_offset = index * BLOCK_SIZE * width; A_shared[threadIdx.y * blockDim.x + threadIdx.x] = A[a_start + a_offset + threadIdx.y * width + threadIdx.x]; B_shared[threadIdx.y * blockDim.x + threadIdx.x] = B[b_start + b_offset + threadIdx.y * width + threadIdx.x]; __syncthreads(); for(int k = 0; k < BLOCK_SIZE; k++) { C_local += A_shared[threadIdx.y * BLOCK_SIZE + k] * B_shared[k * BLOCK_SIZE + threadIdx.x]; } __syncthreads(); if(index * BLOCK_SIZE >= width) break; } int c_start = blockIdx.y * width * BLOCK_SIZE, c_offset = blockIdx.x * BLOCK_SIZE; C[c_start + c_offset + width * threadIdx.y + threadIdx.x] = C_local; } static float totalTime = 0.0f; int performSharedMemTest(dim3 block_size, int width) { cudaError_t error; float *A = (float*)malloc(width*width*sizeof(float)); float *B = (float*)malloc(width*width*sizeof(float)); generateTestMatrix(A, width); generateTestMatrix(B, width); float *C = (float*)malloc(width*width*sizeof(float)); memset(C, 0, width*width*sizeof(float)); float *A_d, *B_d, *C_d; error = cudaMalloc((void**)&A_d, width*width*sizeof(float)); if(error != cudaSuccess) { fprintf(stderr, "Could not allocate memory on the device for matrix A: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } error = cudaMalloc((void**)&B_d, width*width*sizeof(float)); if(error != cudaSuccess) { fprintf(stderr, "Could not allocate memory on the device for matrix B: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } error = cudaMalloc((void**)&C_d, width*width*sizeof(float)); if(error != cudaSuccess) { fprintf(stderr, "Could not allocate memory on the device for matrix C: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } error = cudaMemcpy(A_d, A, width*width*sizeof(float), cudaMemcpyHostToDevice); if(error != cudaSuccess) { fprintf(stderr, "Could not copy data from host to device: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } error = cudaMemcpy(B_d, B, width*width*sizeof(float), cudaMemcpyHostToDevice); if(error != cudaSuccess) { fprintf(stderr, "Could not copy data from host to device: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } error = cudaEventRecord(start, NULL); if(error != cudaSuccess) { fprintf(stderr, "Could not record start event: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } int grid_side = (int)ceil((float)width/(float)block_size.x); for(int current_test = 0; current_test < TEST_COUNT; current_test++) { switch(block_size.x) { case 8: matrixMulSharedMemBasic<8><<<dim3(grid_side, grid_side), block_size>>>(C_d, A_d, B_d, width); break; case 16: matrixMulSharedMemBasic<16><<<dim3(grid_side, grid_side), block_size>>>(C_d, A_d, B_d, width); break; case 22: matrixMulSharedMemBasic<22><<<dim3(grid_side, grid_side), block_size>>>(C_d, A_d, B_d, width); break; case 32: matrixMulSharedMemBasic<32><<<dim3(grid_side, grid_side), block_size>>>(C_d, A_d, B_d, width); break; } cudaDeviceSynchronize(); } error = cudaEventRecord(stop, NULL); if(error != cudaSuccess) { fprintf(stderr, "Could not record stop event: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } error = cudaEventSynchronize(stop); if(error != cudaSuccess) { fprintf(stderr, "Could not synchronize with stop event: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } totalTime = 0.0f; error = cudaEventElapsedTime(&totalTime, start, stop); if(error != cudaSuccess) { fprintf(stderr, "Could not calculate elapsed time: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } float msecPerMatrixMul = totalTime / (float)TEST_COUNT; double flopsPerMatrixMul = 2.0 * (double)width * (double)width * (double)width; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf("%dx%d\t%dx%d\t%dx%d\t%.3f\t%.2f\n", width, width, block_size.x, block_size.y, grid_side, grid_side, msecPerMatrixMul, gigaFlops); error = cudaMemcpy(C, C_d, width*width*sizeof(float), cudaMemcpyDeviceToHost); if(error != cudaSuccess) { fprintf(stderr, "Could not copy data from device to host: %s (line: %d)\n", cudaGetErrorString(error), __LINE__); return -1; } cudaEventDestroy(start); cudaEventDestroy(stop); cudaFree(C_d); cudaFree(B_d); cudaFree(A_d); free(C); free(B); free(A); return 0; } void performSharedMemTests(void) { srand((unsigned int)time(NULL)); dim3 blockSizes[] = { dim3(8,8), dim3(16,16), dim3(22,22), dim3(32,32)}; int matrixSizes[] = { 32, 64, 128 }; for(int i = 0; i < sizeof(matrixSizes)/sizeof(int); i++) { //printf("+++ %dx%d matrix +++\n", matrixSizes[i], matrixSizes[i]); for(int j = 0; j < sizeof(blockSizes)/sizeof(dim3); j++) { //printf("%dx%d block\n", blockSizes[i].x, blockSizes[i].y); performSharedMemTest(blockSizes[j], matrixSizes[i]); } } cudaDeviceReset(); }
5e08c0de70608d136270f6e44140f95bd4bd10e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void VectorMultiplicationKernel(int *array, int arrayCount) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < arrayCount) { array[idx] *= array[idx]; } }
5e08c0de70608d136270f6e44140f95bd4bd10e5.cu
#include "includes.h" __global__ void VectorMultiplicationKernel(int *array, int arrayCount) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < arrayCount) { array[idx] *= array[idx]; } }
b604c186aac26a83834082a24c0a9e43d07bba71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/detection/density_prior_box_op.h" namespace paddle { namespace operators { template <typename T> static __device__ inline T Clip(T in) { return min(max(in, 0.), 1.); } template <typename T> static __global__ void GenDensityPriorBox( const int height, const int width, const int im_height, const int im_width, const T offset, const T step_width, const T step_height, const int num_priors, const T* ratios_shift, bool is_clip, const T var_xmin, const T var_ymin, const T var_xmax, const T var_ymax, T* out, T* var) { int gidx = blockIdx.x * blockDim.x + threadIdx.x; int gidy = blockIdx.y * blockDim.y + threadIdx.y; int step_x = blockDim.x * gridDim.x; int step_y = blockDim.y * gridDim.y; const T* width_ratio = ratios_shift; const T* height_ratio = ratios_shift + num_priors; const T* width_shift = ratios_shift + 2 * num_priors; const T* height_shift = ratios_shift + 3 * num_priors; for (int j = gidy; j < height; j += step_y) { for (int i = gidx; i < width * num_priors; i += step_x) { int h = j; int w = i / num_priors; int k = i % num_priors; T center_x = (w + offset) * step_width; T center_y = (h + offset) * step_height; T center_x_temp = center_x + width_shift[k]; T center_y_temp = center_y + height_shift[k]; T box_width_ratio = width_ratio[k] / 2.; T box_height_ratio = height_ratio[k] / 2.; T xmin = max((center_x_temp - box_width_ratio) / im_width, 0.); T ymin = max((center_y_temp - box_height_ratio) / im_height, 0.); T xmax = min((center_x_temp + box_width_ratio) / im_width, 1.); T ymax = min((center_y_temp + box_height_ratio) / im_height, 1.); int out_offset = (j * width * num_priors + i) * 4; out[out_offset] = is_clip ? Clip<T>(xmin) : xmin; out[out_offset + 1] = is_clip ? Clip<T>(ymin) : ymin; out[out_offset + 2] = is_clip ? Clip<T>(xmax) : xmax; out[out_offset + 3] = is_clip ? Clip<T>(ymax) : ymax; var[out_offset] = var_xmin; var[out_offset + 1] = var_ymin; var[out_offset + 2] = var_xmax; var[out_offset + 3] = var_ymax; } } } template <typename T> class DensityPriorBoxOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<paddle::framework::Tensor>("Input"); auto* image = ctx.Input<paddle::framework::Tensor>("Image"); auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes"); auto* vars = ctx.Output<paddle::framework::Tensor>("Variances"); auto variances = ctx.Attr<std::vector<float>>("variances"); auto is_clip = ctx.Attr<bool>("clip"); auto fixed_sizes = ctx.Attr<std::vector<float>>("fixed_sizes"); auto fixed_ratios = ctx.Attr<std::vector<float>>("fixed_ratios"); auto densities = ctx.Attr<std::vector<int>>("densities"); T step_w = static_cast<T>(ctx.Attr<float>("step_w")); T step_h = static_cast<T>(ctx.Attr<float>("step_h")); T offset = static_cast<T>(ctx.Attr<float>("offset")); auto img_width = image->dims()[3]; auto img_height = image->dims()[2]; auto feature_width = input->dims()[3]; auto feature_height = input->dims()[2]; T step_width, step_height; if (step_w == 0 || step_h == 0) { step_width = static_cast<T>(img_width) / feature_width; step_height = static_cast<T>(img_height) / feature_height; } else { step_width = step_w; step_height = step_h; } int num_priors = 0; for (size_t i = 0; i < densities.size(); ++i) { num_priors += (fixed_ratios.size()) * (pow(densities[i], 2)); } int step_average = static_cast<int>((step_width + step_height) * 0.5); framework::Tensor h_temp; T* tdata = h_temp.mutable_data<T>({num_priors * 4}, platform::CPUPlace()); int idx = 0; for (size_t s = 0; s < fixed_sizes.size(); ++s) { auto fixed_size = fixed_sizes[s]; int density = densities[s]; for (size_t r = 0; r < fixed_ratios.size(); ++r) { float ar = fixed_ratios[r]; int shift = step_average / density; float box_width_ratio = fixed_size * sqrt(ar); float box_height_ratio = fixed_size / sqrt(ar); for (int di = 0; di < density; ++di) { for (int dj = 0; dj < density; ++dj) { float center_x_temp = shift / 2. + dj * shift - step_average / 2.; float center_y_temp = shift / 2. + di * shift - step_average / 2.; tdata[idx] = box_width_ratio; tdata[num_priors + idx] = box_height_ratio; tdata[2 * num_priors + idx] = center_x_temp; tdata[3 * num_priors + idx] = center_y_temp; idx++; } } } } boxes->mutable_data<T>(ctx.GetPlace()); vars->mutable_data<T>(ctx.GetPlace()); framework::Tensor d_temp; framework::TensorCopySync(h_temp, ctx.GetPlace(), &d_temp); // At least use 32 threads, at most 512 threads. // blockx is multiple of 32. int blockx = ::min(((feature_width * num_priors + 31) >> 5) << 5, 512L); int gridx = (feature_width * num_priors + blockx - 1) / blockx; dim3 threads(blockx, 1); dim3 grids(gridx, feature_height); auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); hipLaunchKernelGGL(( GenDensityPriorBox<T>), dim3(grids), dim3(threads), 0, stream, feature_height, feature_width, img_height, img_width, offset, step_width, step_height, num_priors, d_temp.data<T>(), is_clip, variances[0], variances[1], variances[2], variances[3], boxes->data<T>(), vars->data<T>()); } }; // namespace operators } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(density_prior_box, ops::DensityPriorBoxOpCUDAKernel<float>, ops::DensityPriorBoxOpCUDAKernel<double>);
b604c186aac26a83834082a24c0a9e43d07bba71.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/detection/density_prior_box_op.h" namespace paddle { namespace operators { template <typename T> static __device__ inline T Clip(T in) { return min(max(in, 0.), 1.); } template <typename T> static __global__ void GenDensityPriorBox( const int height, const int width, const int im_height, const int im_width, const T offset, const T step_width, const T step_height, const int num_priors, const T* ratios_shift, bool is_clip, const T var_xmin, const T var_ymin, const T var_xmax, const T var_ymax, T* out, T* var) { int gidx = blockIdx.x * blockDim.x + threadIdx.x; int gidy = blockIdx.y * blockDim.y + threadIdx.y; int step_x = blockDim.x * gridDim.x; int step_y = blockDim.y * gridDim.y; const T* width_ratio = ratios_shift; const T* height_ratio = ratios_shift + num_priors; const T* width_shift = ratios_shift + 2 * num_priors; const T* height_shift = ratios_shift + 3 * num_priors; for (int j = gidy; j < height; j += step_y) { for (int i = gidx; i < width * num_priors; i += step_x) { int h = j; int w = i / num_priors; int k = i % num_priors; T center_x = (w + offset) * step_width; T center_y = (h + offset) * step_height; T center_x_temp = center_x + width_shift[k]; T center_y_temp = center_y + height_shift[k]; T box_width_ratio = width_ratio[k] / 2.; T box_height_ratio = height_ratio[k] / 2.; T xmin = max((center_x_temp - box_width_ratio) / im_width, 0.); T ymin = max((center_y_temp - box_height_ratio) / im_height, 0.); T xmax = min((center_x_temp + box_width_ratio) / im_width, 1.); T ymax = min((center_y_temp + box_height_ratio) / im_height, 1.); int out_offset = (j * width * num_priors + i) * 4; out[out_offset] = is_clip ? Clip<T>(xmin) : xmin; out[out_offset + 1] = is_clip ? Clip<T>(ymin) : ymin; out[out_offset + 2] = is_clip ? Clip<T>(xmax) : xmax; out[out_offset + 3] = is_clip ? Clip<T>(ymax) : ymax; var[out_offset] = var_xmin; var[out_offset + 1] = var_ymin; var[out_offset + 2] = var_xmax; var[out_offset + 3] = var_ymax; } } } template <typename T> class DensityPriorBoxOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input = ctx.Input<paddle::framework::Tensor>("Input"); auto* image = ctx.Input<paddle::framework::Tensor>("Image"); auto* boxes = ctx.Output<paddle::framework::Tensor>("Boxes"); auto* vars = ctx.Output<paddle::framework::Tensor>("Variances"); auto variances = ctx.Attr<std::vector<float>>("variances"); auto is_clip = ctx.Attr<bool>("clip"); auto fixed_sizes = ctx.Attr<std::vector<float>>("fixed_sizes"); auto fixed_ratios = ctx.Attr<std::vector<float>>("fixed_ratios"); auto densities = ctx.Attr<std::vector<int>>("densities"); T step_w = static_cast<T>(ctx.Attr<float>("step_w")); T step_h = static_cast<T>(ctx.Attr<float>("step_h")); T offset = static_cast<T>(ctx.Attr<float>("offset")); auto img_width = image->dims()[3]; auto img_height = image->dims()[2]; auto feature_width = input->dims()[3]; auto feature_height = input->dims()[2]; T step_width, step_height; if (step_w == 0 || step_h == 0) { step_width = static_cast<T>(img_width) / feature_width; step_height = static_cast<T>(img_height) / feature_height; } else { step_width = step_w; step_height = step_h; } int num_priors = 0; for (size_t i = 0; i < densities.size(); ++i) { num_priors += (fixed_ratios.size()) * (pow(densities[i], 2)); } int step_average = static_cast<int>((step_width + step_height) * 0.5); framework::Tensor h_temp; T* tdata = h_temp.mutable_data<T>({num_priors * 4}, platform::CPUPlace()); int idx = 0; for (size_t s = 0; s < fixed_sizes.size(); ++s) { auto fixed_size = fixed_sizes[s]; int density = densities[s]; for (size_t r = 0; r < fixed_ratios.size(); ++r) { float ar = fixed_ratios[r]; int shift = step_average / density; float box_width_ratio = fixed_size * sqrt(ar); float box_height_ratio = fixed_size / sqrt(ar); for (int di = 0; di < density; ++di) { for (int dj = 0; dj < density; ++dj) { float center_x_temp = shift / 2. + dj * shift - step_average / 2.; float center_y_temp = shift / 2. + di * shift - step_average / 2.; tdata[idx] = box_width_ratio; tdata[num_priors + idx] = box_height_ratio; tdata[2 * num_priors + idx] = center_x_temp; tdata[3 * num_priors + idx] = center_y_temp; idx++; } } } } boxes->mutable_data<T>(ctx.GetPlace()); vars->mutable_data<T>(ctx.GetPlace()); framework::Tensor d_temp; framework::TensorCopySync(h_temp, ctx.GetPlace(), &d_temp); // At least use 32 threads, at most 512 threads. // blockx is multiple of 32. int blockx = std::min(((feature_width * num_priors + 31) >> 5) << 5, 512L); int gridx = (feature_width * num_priors + blockx - 1) / blockx; dim3 threads(blockx, 1); dim3 grids(gridx, feature_height); auto stream = ctx.template device_context<platform::CUDADeviceContext>().stream(); GenDensityPriorBox<T><<<grids, threads, 0, stream>>>( feature_height, feature_width, img_height, img_width, offset, step_width, step_height, num_priors, d_temp.data<T>(), is_clip, variances[0], variances[1], variances[2], variances[3], boxes->data<T>(), vars->data<T>()); } }; // namespace operators } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL(density_prior_box, ops::DensityPriorBoxOpCUDAKernel<float>, ops::DensityPriorBoxOpCUDAKernel<double>);
79c92e7769c8b6b43a2571eab8a029845f0a1214.hip
// !!! This is a file automatically generated by hipify!!! #include "ATen/ATen.h" #include "ATen/NativeFunctions.h" #include "ATen/Dispatch.h" #include "ATen/hip/HIPApplyUtils.cuh" namespace { using namespace at; template<typename scalar_t> void kl_div_backward_kernel(const Tensor& grad_input, const Tensor& target, const Tensor& grad) { at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>( grad_input, target, grad, [] __device__( scalar_t& grad_input_val, const scalar_t& target_val, const scalar_t& grad_val) { if (target_val > 0) { grad_input_val = -target_val * grad_val; } }); } } // namespace namespace at { namespace native { Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction) { auto grad_input = at::zeros_like(input); Tensor grad_expand = grad.expand_as(input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "kl_div_backward", [&]() { kl_div_backward_kernel<scalar_t>(grad_input, target, grad_expand); }); if (reduction == Reduction::Mean) { return grad_input / input.numel(); } return grad_input; } }} // namespace at::native
79c92e7769c8b6b43a2571eab8a029845f0a1214.cu
#include "ATen/ATen.h" #include "ATen/NativeFunctions.h" #include "ATen/Dispatch.h" #include "ATen/cuda/CUDAApplyUtils.cuh" namespace { using namespace at; template<typename scalar_t> void kl_div_backward_kernel(const Tensor& grad_input, const Tensor& target, const Tensor& grad) { at::cuda::CUDA_tensor_apply3<scalar_t, scalar_t, scalar_t>( grad_input, target, grad, [] __device__( scalar_t& grad_input_val, const scalar_t& target_val, const scalar_t& grad_val) { if (target_val > 0) { grad_input_val = -target_val * grad_val; } }); } } // namespace namespace at { namespace native { Tensor kl_div_backward_cuda(const Tensor& grad, const Tensor& input, const Tensor& target, int64_t reduction) { auto grad_input = at::zeros_like(input); Tensor grad_expand = grad.expand_as(input); AT_DISPATCH_FLOATING_TYPES_AND_HALF(input.type(), "kl_div_backward", [&]() { kl_div_backward_kernel<scalar_t>(grad_input, target, grad_expand); }); if (reduction == Reduction::Mean) { return grad_input / input.numel(); } return grad_input; } }} // namespace at::native
377101bb26b6899996736efc4638ee7b88ea50cc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated s Tue Dec 17 13:18:45 2013 @author Stan Tomov */ #include "common_magma.h" #define PRECISION_s #include "commonblas.h" __global__ void stranspose_32( float *B, int ldb, const float *A, int lda ) { __shared__ float a[32][SSIZE_1SHARED+1]; int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x*32; int iby = blockIdx.y*32; A += ibx + inx + __mul24( iby + iny, lda ); B += iby + inx + __mul24( ibx + iny, ldb ); a[iny+0][inx] = A[0*lda]; a[iny+8][inx] = A[8*lda]; a[iny+16][inx] = A[16*lda]; a[iny+24][inx] = A[24*lda]; __syncthreads(); #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[16*ldb] = a[inx][iny+16]; B[24*ldb] = a[inx][iny+24]; #else /* defined(PRECISION_z) */ B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[0*ldb+16] = a[inx+16][iny+0]; B[8*ldb+16] = a[inx+16][iny+8]; __syncthreads(); A += SSIZE_1SHARED; B += __mul24( 16, ldb); a[iny+0][inx] = A[0*lda]; a[iny+8][inx] = A[8*lda]; a[iny+16][inx] = A[16*lda]; a[iny+24][inx] = A[24*lda]; __syncthreads(); B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[0*ldb+16] = a[inx+16][iny+0]; B[8*ldb+16] = a[inx+16][iny+8]; #endif } // // m, n - dimensions in the source matrix // This version works when m and n are divisible by 32. // extern "C" void magmablas_stranspose(float *odata, magma_int_t ldo, const float *idata, magma_int_t ldi, magma_int_t m, magma_int_t n ) { //assert( (m%32) == 0 && (n%32) == 0, "misaligned transpose" ); dim3 threads( SSIZE_1SHARED, 8, 1 ); dim3 grid( m/32, n/32, 1 ); hipLaunchKernelGGL(( stranspose_32), dim3(grid), dim3(threads), 0, magma_stream , odata, ldo, idata, ldi ); }
377101bb26b6899996736efc4638ee7b88ea50cc.cu
/* -- MAGMA (version 1.4.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver December 2013 @generated s Tue Dec 17 13:18:45 2013 @author Stan Tomov */ #include "common_magma.h" #define PRECISION_s #include "commonblas.h" __global__ void stranspose_32( float *B, int ldb, const float *A, int lda ) { __shared__ float a[32][SSIZE_1SHARED+1]; int inx = threadIdx.x; int iny = threadIdx.y; int ibx = blockIdx.x*32; int iby = blockIdx.y*32; A += ibx + inx + __mul24( iby + iny, lda ); B += iby + inx + __mul24( ibx + iny, ldb ); a[iny+0][inx] = A[0*lda]; a[iny+8][inx] = A[8*lda]; a[iny+16][inx] = A[16*lda]; a[iny+24][inx] = A[24*lda]; __syncthreads(); #if defined(PRECISION_s) || defined(PRECISION_d) || defined(PRECISION_c) B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[16*ldb] = a[inx][iny+16]; B[24*ldb] = a[inx][iny+24]; #else /* defined(PRECISION_z) */ B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[0*ldb+16] = a[inx+16][iny+0]; B[8*ldb+16] = a[inx+16][iny+8]; __syncthreads(); A += SSIZE_1SHARED; B += __mul24( 16, ldb); a[iny+0][inx] = A[0*lda]; a[iny+8][inx] = A[8*lda]; a[iny+16][inx] = A[16*lda]; a[iny+24][inx] = A[24*lda]; __syncthreads(); B[0*ldb] = a[inx][iny+0]; B[8*ldb] = a[inx][iny+8]; B[0*ldb+16] = a[inx+16][iny+0]; B[8*ldb+16] = a[inx+16][iny+8]; #endif } // // m, n - dimensions in the source matrix // This version works when m and n are divisible by 32. // extern "C" void magmablas_stranspose(float *odata, magma_int_t ldo, const float *idata, magma_int_t ldi, magma_int_t m, magma_int_t n ) { //assert( (m%32) == 0 && (n%32) == 0, "misaligned transpose" ); dim3 threads( SSIZE_1SHARED, 8, 1 ); dim3 grid( m/32, n/32, 1 ); stranspose_32<<< grid, threads, 0, magma_stream >>>( odata, ldo, idata, ldi ); }
6dafb5a2b3f72ffe677950d2efd14dfbce6ebf6b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe2/operators/sigmoid_op.h" #include <algorithm> #include <functional> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> __global__ void SigmoidCUDAKernel(const int N, const T* X, T* Y); template <> __global__ void SigmoidCUDAKernel<float>(const int N, const float* X, float* Y) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 Y[i] = 1.0f / (1.0f + expf(-__ldg(X + i))); #else Y[i] = 1.0f / (1.0f + expf(-X[i])); #endif } } template <typename T> __global__ void SigmoidGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 dX[i] = __ldg(dY + i) * __ldg(Y + i) * (T(1) - __ldg(Y + i)); #else dX[i] = dY[i] * Y[i] * (T(1) - Y[i]); #endif } } } // namespace template <> template <typename T> bool SigmoidFunctor<CUDAContext>:: operator()(const int N, const T* X, T* Y, CUDAContext* context) const { hipLaunchKernelGGL(( SigmoidCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, X, Y); C10_HIP_KERNEL_LAUNCH_CHECK(); return true; } template <> template <typename T> bool SigmoidGradientFunctor<CUDAContext>::Forward( const std::vector<int>& Y_dims, const std::vector<int>& /* dY_dims */, const T* Y, const T* dY, T* dX, CUDAContext* context) const { const int size = std::accumulate( Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>()); hipLaunchKernelGGL(( SigmoidGradientCUDAKernel<T>) , dim3(CAFFE_GET_BLOCKS(size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), size, dY, Y, dX); C10_HIP_KERNEL_LAUNCH_CHECK(); return true; } REGISTER_CUDA_OPERATOR( Sigmoid, UnaryElementwiseOp< TensorTypes<float>, CUDAContext, SigmoidFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( SigmoidGradient, BinaryElementwiseOp< TensorTypes<float>, CUDAContext, SigmoidGradientFunctor<CUDAContext>>); } // namespace caffe2
6dafb5a2b3f72ffe677950d2efd14dfbce6ebf6b.cu
#include "caffe2/operators/sigmoid_op.h" #include <algorithm> #include <functional> #include "caffe2/core/context_gpu.h" namespace caffe2 { namespace { template <typename T> __global__ void SigmoidCUDAKernel(const int N, const T* X, T* Y); template <> __global__ void SigmoidCUDAKernel<float>(const int N, const float* X, float* Y) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 Y[i] = 1.0f / (1.0f + expf(-__ldg(X + i))); #else Y[i] = 1.0f / (1.0f + expf(-X[i])); #endif } } template <typename T> __global__ void SigmoidGradientCUDAKernel(const int N, const T* dY, const T* Y, T* dX) { CUDA_1D_KERNEL_LOOP(i, N) { #if __CUDA_ARCH__ >= 350 dX[i] = __ldg(dY + i) * __ldg(Y + i) * (T(1) - __ldg(Y + i)); #else dX[i] = dY[i] * Y[i] * (T(1) - Y[i]); #endif } } } // namespace template <> template <typename T> bool SigmoidFunctor<CUDAContext>:: operator()(const int N, const T* X, T* Y, CUDAContext* context) const { SigmoidCUDAKernel<T> <<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, X, Y); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } template <> template <typename T> bool SigmoidGradientFunctor<CUDAContext>::Forward( const std::vector<int>& Y_dims, const std::vector<int>& /* dY_dims */, const T* Y, const T* dY, T* dX, CUDAContext* context) const { const int size = std::accumulate( Y_dims.cbegin(), Y_dims.cend(), 1, std::multiplies<int>()); SigmoidGradientCUDAKernel<T> <<<CAFFE_GET_BLOCKS(size), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(size, dY, Y, dX); C10_CUDA_KERNEL_LAUNCH_CHECK(); return true; } REGISTER_CUDA_OPERATOR( Sigmoid, UnaryElementwiseOp< TensorTypes<float>, CUDAContext, SigmoidFunctor<CUDAContext>>); REGISTER_CUDA_OPERATOR( SigmoidGradient, BinaryElementwiseOp< TensorTypes<float>, CUDAContext, SigmoidGradientFunctor<CUDAContext>>); } // namespace caffe2
0ec6baa1485cb3a9c375319abd838013e2ed7423.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "common.h" #define SHARED_ARRAY_SIZE 128 __global__ void smem_static_test(int * in, int * out, int size) { int tid = threadIdx.x; int gid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int smem[SHARED_ARRAY_SIZE]; if (gid < size) { smem[tid] = in[gid]; out[gid] = smem[tid]; } } __global__ void smem_dynamic_test(int * in, int * out, int size) { int tid = threadIdx.x; int gid = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ int smem[]; if (gid < size) { smem[tid] = in[gid]; out[gid] = smem[tid]; } } int main(int argc, char ** argv) { int size = 1 << 22; int block_size = SHARED_ARRAY_SIZE; bool dynamic = false; if (argc > 1) { dynamic = atoi(argv[1]); } //number of bytes needed to hold element count size_t NO_BYTES = size * sizeof(int); // host pointers int *h_in, *h_ref, *d_in, *d_out; // allocate memory for host size pointers h_in = (int *)malloc(NO_BYTES); h_ref = (int *)malloc(NO_BYTES); initialize(h_in, size, INIT_ONE_TO_TEN); hipMalloc((int **)&d_in, NO_BYTES); hipMalloc((int **)&d_out, NO_BYTES); // kernel launch parameters dim3 block(block_size); dim3 grid((size / block.x) + 1); hipMemcpy(d_in, h_in, NO_BYTES, hipMemcpyHostToDevice); if (!dynamic) { printf("Static smem kernel \n"); smem_static_test << <grid, block >> > (d_in, d_out, size); } else { printf("Dynamic smem kernel \n"); smem_dynamic_test << <grid, block, sizeof(int)* SHARED_ARRAY_SIZE >> > (d_in, d_out, size); } hipDeviceSynchronize(); hipMemcpy(h_ref, d_out, NO_BYTES, hipMemcpyDeviceToHost); hipFree(d_in); hipFree(d_out); free(h_in); free(h_ref); hipDeviceReset(); return EXIT_SUCCESS; }
0ec6baa1485cb3a9c375319abd838013e2ed7423.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "common.h" #define SHARED_ARRAY_SIZE 128 __global__ void smem_static_test(int * in, int * out, int size) { int tid = threadIdx.x; int gid = blockIdx.x * blockDim.x + threadIdx.x; __shared__ int smem[SHARED_ARRAY_SIZE]; if (gid < size) { smem[tid] = in[gid]; out[gid] = smem[tid]; } } __global__ void smem_dynamic_test(int * in, int * out, int size) { int tid = threadIdx.x; int gid = blockIdx.x * blockDim.x + threadIdx.x; extern __shared__ int smem[]; if (gid < size) { smem[tid] = in[gid]; out[gid] = smem[tid]; } } int main(int argc, char ** argv) { int size = 1 << 22; int block_size = SHARED_ARRAY_SIZE; bool dynamic = false; if (argc > 1) { dynamic = atoi(argv[1]); } //number of bytes needed to hold element count size_t NO_BYTES = size * sizeof(int); // host pointers int *h_in, *h_ref, *d_in, *d_out; // allocate memory for host size pointers h_in = (int *)malloc(NO_BYTES); h_ref = (int *)malloc(NO_BYTES); initialize(h_in, size, INIT_ONE_TO_TEN); cudaMalloc((int **)&d_in, NO_BYTES); cudaMalloc((int **)&d_out, NO_BYTES); // kernel launch parameters dim3 block(block_size); dim3 grid((size / block.x) + 1); cudaMemcpy(d_in, h_in, NO_BYTES, cudaMemcpyHostToDevice); if (!dynamic) { printf("Static smem kernel \n"); smem_static_test << <grid, block >> > (d_in, d_out, size); } else { printf("Dynamic smem kernel \n"); smem_dynamic_test << <grid, block, sizeof(int)* SHARED_ARRAY_SIZE >> > (d_in, d_out, size); } cudaDeviceSynchronize(); cudaMemcpy(h_ref, d_out, NO_BYTES, cudaMemcpyDeviceToHost); cudaFree(d_in); cudaFree(d_out); free(h_in); free(h_ref); cudaDeviceReset(); return EXIT_SUCCESS; }
a53180f5ce81122238c001e6df98aa37ba955a97.hip
// !!! This is a file automatically generated by hipify!!! /** * 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <hip/hip_runtime.h> #include "../../common/polybenchUtilFuncts.h" #define GPU_DEVICE 0 //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size. */ # define NI 512 # define NJ 512 # define NK 512 # define NL 512 # define NM 512 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NK + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NJ + j] = ((DATA_TYPE) i*(j+1)) / NJ; } } for (i = 0; i < NJ; i++) { for (j = 0; j < NM; j++) { C[i*NM + j] = ((DATA_TYPE) i*(j+3)) / NL; } } for (i = 0; i < NM; i++) { for (j = 0; j < NL; j++) { D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK; } } } void compareResults(DATA_TYPE *G, DATA_TYPE *G_outputFromGpu) { int i,j,fail; fail = 0; for (i=0; i < NI; i++) { for (j=0; j < NL; j++) { if (percentDiff(G[i*NL + j], G_outputFromGpu[i*NL + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); hipSetDevice( GPU_DEVICE ); } __global__ void mm3_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NJ)) { int k; for(k=0; k < NK; k++) { E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; } } } __global__ void mm3_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NJ) && (j < NL)) { int k; for(k=0; k < NM; k++) { F[i * NL + j] += C[i * NM + k] * D[k * NL +j]; } } } __global__ void mm3_kernel3(DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NL)) { int k; for(k=0; k < NJ; k++) { G[i * NL + j] += E[i * NJ + k] * F[k * NL + j]; } } } void mm3_cpu(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { int i,j,k; /* E := A*B */ for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { E[i*NJ + j] = 0; for (k = 0; k < NK; ++k) { E[i*NJ + j] += A[i*NK + k] * B[k*NJ + j]; } } } /* F := C*D */ for (i = 0; i < NJ; i++) { for (j = 0; j < NL; j++) { F[i*NL + j] = 0; for (k = 0; k < NM; ++k) { F[i*NL + j] += C[i*NM + k] * D[k*NL + j]; } } } /* G := E*F */ for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { G[i*NL + j] = 0; for (k = 0; k < NJ; ++k) { G[i*NL + j] += E[i*NJ + k] * F[k*NL + j]; } } } } void mm3Cuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E, DATA_TYPE* F, DATA_TYPE* G, DATA_TYPE* G_outputFromGpu) { double t_start, t_end; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; DATA_TYPE *D_gpu; DATA_TYPE *E_gpu; DATA_TYPE *F_gpu; DATA_TYPE *G_gpu; hipMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK); hipMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ); hipMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NJ * NM); hipMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NM * NL); hipMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NJ); hipMalloc((void **)&F_gpu, sizeof(DATA_TYPE) * NJ * NL); hipMalloc((void **)&G_gpu, sizeof(DATA_TYPE) * NI * NL); hipMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, hipMemcpyHostToDevice); hipMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, hipMemcpyHostToDevice); hipMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NJ * NM, hipMemcpyHostToDevice); hipMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NM * NL, hipMemcpyHostToDevice); hipMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NJ, hipMemcpyHostToDevice); hipMemcpy(F_gpu, F, sizeof(DATA_TYPE) * NJ * NL, hipMemcpyHostToDevice); hipMemcpy(G_gpu, G, sizeof(DATA_TYPE) * NI * NL, hipMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) ))); dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) ))); dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) ))); t_start = rtclock(); hipLaunchKernelGGL(( mm3_kernel1), dim3(grid1),dim3(block), 0, 0, A_gpu, B_gpu, E_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( mm3_kernel2), dim3(grid2),dim3(block), 0, 0, C_gpu, D_gpu, F_gpu); hipDeviceSynchronize(); hipLaunchKernelGGL(( mm3_kernel3), dim3(grid3),dim3(block), 0, 0, E_gpu, F_gpu, G_gpu); hipDeviceSynchronize(); t_end = rtclock(); hipMemcpy(G_outputFromGpu, G_gpu, sizeof(DATA_TYPE) * NI * NL, hipMemcpyDeviceToHost); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); hipFree(A_gpu); hipFree(B_gpu); hipFree(C_gpu); hipFree(D_gpu); hipFree(E_gpu); hipFree(F_gpu); hipFree(G_gpu); } int main(int argc, char** argv) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* C; DATA_TYPE* D; DATA_TYPE* E; DATA_TYPE* F; DATA_TYPE* G; DATA_TYPE* G_outputFromGpu; A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); C = (DATA_TYPE*)malloc(NJ*NM*sizeof(DATA_TYPE)); D = (DATA_TYPE*)malloc(NM*NL*sizeof(DATA_TYPE)); E = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); F = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE)); G = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); G_outputFromGpu = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); init_array(A, B, C, D); GPU_argv_init(); mm3Cuda(A, B, C, D, E, F, G, G_outputFromGpu); t_start = rtclock(); mm3_cpu(A, B, C, D, E, F, G); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(G, G_outputFromGpu); free(A); free(B); free(C); free(D); free(E); free(F); free(G); free(G_outputFromGpu); return 0; }
a53180f5ce81122238c001e6df98aa37ba955a97.cu
/** * 3mm.cu: This file is part of the PolyBench/GPU 1.0 test suite. * * * Contact: Scott Grauer-Gray <[email protected]> * Louis-Noel Pouchet <[email protected]> * Web address: http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU */ #include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <unistd.h> #include <sys/time.h> #include <cuda.h> #include "../../common/polybenchUtilFuncts.h" #define GPU_DEVICE 0 //define the error threshold for the results "not matching" #define PERCENT_DIFF_ERROR_THRESHOLD 0.05 /* Problem size. */ # define NI 512 # define NJ 512 # define NK 512 # define NL 512 # define NM 512 /* Thread block dimensions */ #define DIM_THREAD_BLOCK_X 32 #define DIM_THREAD_BLOCK_Y 8 /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void init_array(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D) { int i, j; for (i = 0; i < NI; i++) { for (j = 0; j < NK; j++) { A[i*NK + j] = ((DATA_TYPE) i*j) / NI; } } for (i = 0; i < NK; i++) { for (j = 0; j < NJ; j++) { B[i*NJ + j] = ((DATA_TYPE) i*(j+1)) / NJ; } } for (i = 0; i < NJ; i++) { for (j = 0; j < NM; j++) { C[i*NM + j] = ((DATA_TYPE) i*(j+3)) / NL; } } for (i = 0; i < NM; i++) { for (j = 0; j < NL; j++) { D[i*NL + j] = ((DATA_TYPE) i*(j+2)) / NK; } } } void compareResults(DATA_TYPE *G, DATA_TYPE *G_outputFromGpu) { int i,j,fail; fail = 0; for (i=0; i < NI; i++) { for (j=0; j < NL; j++) { if (percentDiff(G[i*NL + j], G_outputFromGpu[i*NL + j]) > PERCENT_DIFF_ERROR_THRESHOLD) { fail++; } } } // print results printf("Non-Matching CPU-GPU Outputs Beyond Error Threshold of %4.2f Percent: %d\n", PERCENT_DIFF_ERROR_THRESHOLD, fail); } void GPU_argv_init() { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, GPU_DEVICE); printf("setting device %d with name %s\n",GPU_DEVICE,deviceProp.name); cudaSetDevice( GPU_DEVICE ); } __global__ void mm3_kernel1(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *E) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NJ)) { int k; for(k=0; k < NK; k++) { E[i * NJ + j] += A[i * NK + k] * B[k * NJ + j]; } } } __global__ void mm3_kernel2(DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *F) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NJ) && (j < NL)) { int k; for(k=0; k < NM; k++) { F[i * NL + j] += C[i * NM + k] * D[k * NL +j]; } } } __global__ void mm3_kernel3(DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { int j = blockIdx.x * blockDim.x + threadIdx.x; int i = blockIdx.y * blockDim.y + threadIdx.y; if ((i < NI) && (j < NL)) { int k; for(k=0; k < NJ; k++) { G[i * NL + j] += E[i * NJ + k] * F[k * NL + j]; } } } void mm3_cpu(DATA_TYPE *A, DATA_TYPE *B, DATA_TYPE *C, DATA_TYPE *D, DATA_TYPE *E, DATA_TYPE *F, DATA_TYPE *G) { int i,j,k; /* E := A*B */ for (i = 0; i < NI; i++) { for (j = 0; j < NJ; j++) { E[i*NJ + j] = 0; for (k = 0; k < NK; ++k) { E[i*NJ + j] += A[i*NK + k] * B[k*NJ + j]; } } } /* F := C*D */ for (i = 0; i < NJ; i++) { for (j = 0; j < NL; j++) { F[i*NL + j] = 0; for (k = 0; k < NM; ++k) { F[i*NL + j] += C[i*NM + k] * D[k*NL + j]; } } } /* G := E*F */ for (i = 0; i < NI; i++) { for (j = 0; j < NL; j++) { G[i*NL + j] = 0; for (k = 0; k < NJ; ++k) { G[i*NL + j] += E[i*NJ + k] * F[k*NL + j]; } } } } void mm3Cuda(DATA_TYPE* A, DATA_TYPE* B, DATA_TYPE* C, DATA_TYPE* D, DATA_TYPE* E, DATA_TYPE* F, DATA_TYPE* G, DATA_TYPE* G_outputFromGpu) { double t_start, t_end; DATA_TYPE *A_gpu; DATA_TYPE *B_gpu; DATA_TYPE *C_gpu; DATA_TYPE *D_gpu; DATA_TYPE *E_gpu; DATA_TYPE *F_gpu; DATA_TYPE *G_gpu; cudaMalloc((void **)&A_gpu, sizeof(DATA_TYPE) * NI * NK); cudaMalloc((void **)&B_gpu, sizeof(DATA_TYPE) * NK * NJ); cudaMalloc((void **)&C_gpu, sizeof(DATA_TYPE) * NJ * NM); cudaMalloc((void **)&D_gpu, sizeof(DATA_TYPE) * NM * NL); cudaMalloc((void **)&E_gpu, sizeof(DATA_TYPE) * NI * NJ); cudaMalloc((void **)&F_gpu, sizeof(DATA_TYPE) * NJ * NL); cudaMalloc((void **)&G_gpu, sizeof(DATA_TYPE) * NI * NL); cudaMemcpy(A_gpu, A, sizeof(DATA_TYPE) * NI * NK, cudaMemcpyHostToDevice); cudaMemcpy(B_gpu, B, sizeof(DATA_TYPE) * NK * NJ, cudaMemcpyHostToDevice); cudaMemcpy(C_gpu, C, sizeof(DATA_TYPE) * NJ * NM, cudaMemcpyHostToDevice); cudaMemcpy(D_gpu, D, sizeof(DATA_TYPE) * NM * NL, cudaMemcpyHostToDevice); cudaMemcpy(E_gpu, E, sizeof(DATA_TYPE) * NI * NJ, cudaMemcpyHostToDevice); cudaMemcpy(F_gpu, F, sizeof(DATA_TYPE) * NJ * NL, cudaMemcpyHostToDevice); cudaMemcpy(G_gpu, G, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyHostToDevice); dim3 block(DIM_THREAD_BLOCK_X, DIM_THREAD_BLOCK_Y); dim3 grid1((size_t)(ceil( ((float)NJ) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) ))); dim3 grid2((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NJ/ ((float)DIM_THREAD_BLOCK_Y) ))); dim3 grid3((size_t)(ceil( ((float)NL) / ((float)DIM_THREAD_BLOCK_X) )),(size_t)(ceil((float)NI/ ((float)DIM_THREAD_BLOCK_Y) ))); t_start = rtclock(); mm3_kernel1<<<grid1,block>>>(A_gpu, B_gpu, E_gpu); cudaThreadSynchronize(); mm3_kernel2<<<grid2,block>>>(C_gpu, D_gpu, F_gpu); cudaThreadSynchronize(); mm3_kernel3<<<grid3,block>>>(E_gpu, F_gpu, G_gpu); cudaThreadSynchronize(); t_end = rtclock(); cudaMemcpy(G_outputFromGpu, G_gpu, sizeof(DATA_TYPE) * NI * NL, cudaMemcpyDeviceToHost); fprintf(stdout, "GPU Runtime: %0.6lfs\n", t_end - t_start); cudaFree(A_gpu); cudaFree(B_gpu); cudaFree(C_gpu); cudaFree(D_gpu); cudaFree(E_gpu); cudaFree(F_gpu); cudaFree(G_gpu); } int main(int argc, char** argv) { double t_start, t_end; DATA_TYPE* A; DATA_TYPE* B; DATA_TYPE* C; DATA_TYPE* D; DATA_TYPE* E; DATA_TYPE* F; DATA_TYPE* G; DATA_TYPE* G_outputFromGpu; A = (DATA_TYPE*)malloc(NI*NK*sizeof(DATA_TYPE)); B = (DATA_TYPE*)malloc(NK*NJ*sizeof(DATA_TYPE)); C = (DATA_TYPE*)malloc(NJ*NM*sizeof(DATA_TYPE)); D = (DATA_TYPE*)malloc(NM*NL*sizeof(DATA_TYPE)); E = (DATA_TYPE*)malloc(NI*NJ*sizeof(DATA_TYPE)); F = (DATA_TYPE*)malloc(NJ*NL*sizeof(DATA_TYPE)); G = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); G_outputFromGpu = (DATA_TYPE*)malloc(NI*NL*sizeof(DATA_TYPE)); init_array(A, B, C, D); GPU_argv_init(); mm3Cuda(A, B, C, D, E, F, G, G_outputFromGpu); t_start = rtclock(); mm3_cpu(A, B, C, D, E, F, G); t_end = rtclock(); fprintf(stdout, "CPU Runtime: %0.6lfs\n", t_end - t_start); compareResults(G, G_outputFromGpu); free(A); free(B); free(C); free(D); free(E); free(F); free(G); free(G_outputFromGpu); return 0; }
670bdccfcd318da34e712b1b9c8d3adc12b7bd71.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_ROCM template<typename Dtype> __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } #endif // USE_ROCM template<typename Dtype> void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM // NOLINT_NEXT_LINE(whitespace/operators) ReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel( CL_KERNEL_SELECT("relu_forward")); viennacl::ocl::enqueue( oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), WrapHandle((cl_mem) top_data, &ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); #endif // USE_GREENTEA } // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } #ifdef USE_ROCM template<typename Dtype> __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } #endif // USE_ROCM template<typename Dtype> void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM // NOLINT_NEXT_LINE(whitespace/operators) ReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel( CL_KERNEL_SELECT("relu_backward")); viennacl::ocl::enqueue( oclk_relu_backward(count, WrapHandle((cl_mem) top_diff, &ctx), WrapHandle((cl_mem) bottom_data, &ctx), WrapHandle((cl_mem) bottom_diff, &ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); #endif // USE_GREENTEA } } } INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); } // namespace caffe
670bdccfcd318da34e712b1b9c8d3adc12b7bd71.cu
#include <algorithm> #include <vector> #include "caffe/layer.hpp" #include "caffe/vision_layers.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_CUDA template<typename Dtype> __global__ void ReLUForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : in[index] * negative_slope; } } #endif // USE_CUDA template<typename Dtype> void ReLULayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) ReLUForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); viennacl::ocl::kernel &oclk_relu_forward = program.get_kernel( CL_KERNEL_SELECT("relu_forward")); viennacl::ocl::enqueue( oclk_relu_forward(count, WrapHandle((cl_mem) bottom_data, &ctx), WrapHandle((cl_mem) top_data, &ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); #endif // USE_GREENTEA } // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } #ifdef USE_CUDA template<typename Dtype> __global__ void ReLUBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * negative_slope); } } #endif // USE_CUDA template<typename Dtype> void ReLULayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); if (this->device_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA // NOLINT_NEXT_LINE(whitespace/operators) ReLUBackward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_->id()); viennacl::ocl::kernel &oclk_relu_backward = program.get_kernel( CL_KERNEL_SELECT("relu_backward")); viennacl::ocl::enqueue( oclk_relu_backward(count, WrapHandle((cl_mem) top_diff, &ctx), WrapHandle((cl_mem) bottom_data, &ctx), WrapHandle((cl_mem) bottom_diff, &ctx), negative_slope), ctx.get_queue()); ctx.get_queue().finish(); #endif // USE_GREENTEA } } } INSTANTIATE_LAYER_GPU_FUNCS(ReLULayer); } // namespace caffe
562bb455a36db9e5443536f0040373d6cc6a24fb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by Yurii Shyrma on 02.01.2018 // #include <ops/declarable/helpers/stack.h> #include <helpers/ShapeUtils.h> #include <array/ResultSet.h> #include <exceptions/cuda_exception.h> #include <helpers/TAD.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> static __global__ void stackScalarsCuda(void* pVx, void* vz, const Nd4jLong* zShapeInfo) { T* z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong zLen, totalThreads; if (threadIdx.x == 0) { zLen = shape::length(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { const T *x = reinterpret_cast<const T*>(reinterpret_cast<void**>(pVx)[i]); z[shape::getIndexOffset(i, zShapeInfo)] = *x; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void stackScalarsCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, void* pVx, void* vz, const Nd4jLong* zShapeInfo) { hipLaunchKernelGGL(( stackScalarsCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, pVx, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////// template <typename T> static void stack_(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim) { const int numOfSubArrs = inArrs.size(); NDArray::prepareSpecialUse({&output}, inArrs); if(inArrs[0]->rankOf() == 0) { std::vector<void const*> hInBuffers(numOfSubArrs); for(int i = 0; i < numOfSubArrs; ++i) hInBuffers[i] = inArrs[i]->specialBuffer(); PointersManager manager(context, "helpers::stack cuda"); void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*)); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; stackScalarsCudaLauncher<T>(blocksPerGrid, threadsPerBlock, context->getCudaStream(), dInBuffers, output.specialBuffer(), output.specialShapeInfo()); manager.synchronize(); } else { auto zTadPack = ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), ShapeUtils::evalDimsToExclude(output.rankOf(), {dim})); auto zTadShapeInfo = zTadPack.primaryShapeInfo(); for (uint i = 0; i < numOfSubArrs; ++i) { void* zBuff = output.specialBufferWithOffset(zTadPack.primaryOffsets()[i]); NativeOpExecutioner::execTransformAny(context, transform::Assign, nullptr, inArrs[i]->shapeInfo(), inArrs[i]->specialBuffer(), inArrs[i]->specialShapeInfo(), nullptr, zTadShapeInfo, zBuff, zTadPack.specialShapeInfo(), nullptr, nullptr, nullptr, false/*allowParallelism*/); } } NDArray::registerSpecialUse({&output}, inArrs); } //////////////////////////////////////////////////////////////////////// void stack(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim) { BUILD_SINGLE_SELECTOR(output.dataType(), stack_, (context, inArrs, output, dim), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void stack_ , (sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// template <typename T> static __global__ void unstackScalarsCuda(const void* vx, const Nd4jLong* xShapeInfo, void* pVz) { const T* x = reinterpret_cast<const T*>(vx); __shared__ Nd4jLong xLen, totalThreads; if (threadIdx.x == 0) { xLen = shape::length(xShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < xLen; i += totalThreads) { T* z = reinterpret_cast<T*>(reinterpret_cast<void**>(pVz)[i]); *z = x[shape::getIndexOffset(i, xShapeInfo)]; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void unstackScalarsCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* pVz) { hipLaunchKernelGGL(( unstackScalarsCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), 256, *stream, vx, xShapeInfo, pVz); } /////////////////////////////////////////////////////////////////// template <typename T> static void unstack_(sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim) { const int numOfSubArrs = outArrs.size(); // NDArray::prepareSpecialUse(outArrs, {&input}); input.syncToDevice(); for (const auto a : outArrs) a->getDataBuffer()->allocateSpecial(); if(outArrs[0]->rankOf() == 0) { std::vector<void*> hOutBuffers(numOfSubArrs); for(int i = 0; i < numOfSubArrs; ++i) hOutBuffers[i] = outArrs[i]->specialBuffer(); PointersManager manager(context, "helpers::unstack cuda"); void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*)); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; unstackScalarsCudaLauncher<T>(blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), dOutBuffers); manager.synchronize(); } else { auto xTadPack = ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), ShapeUtils::evalDimsToExclude(input.rankOf(), {dim})); auto xTadShapeInfo = xTadPack.primaryShapeInfo(); for (uint i = 0; i < numOfSubArrs; ++i) { auto xBuff = input.specialBufferWithOffset(xTadPack.primaryOffsets()[i]); NativeOpExecutioner::execTransformAny(input.getContext(), transform::Assign, nullptr, xTadShapeInfo, xBuff, xTadPack.specialShapeInfo(), nullptr, outArrs[i]->shapeInfo(), outArrs[i]->specialBuffer(), outArrs[i]->specialShapeInfo(), nullptr, nullptr, nullptr, false/*allowParallelism*/); } } // NDArray::registerSpecialUse(outArrs, {&input}); input.tickReadDevice(); for (const auto p : outArrs) p->tickWriteDevice(); } //////////////////////////////////////////////////////////////////////// void unstack(sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim) { BUILD_SINGLE_SELECTOR(input.dataType(), unstack_, (context, input, outArrs, dim), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void unstack_, (sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// // template <typename T> // static __global__ void unstackCuda(const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) { // const T* x = reinterpret_cast<const T*>(vx); // __shared__ Nd4jLong xLen, totalThreads; // __shared__ int xRank; // if (threadIdx.x == 0) { // xLen = shape::length(xShapeInfo); // xRank = shape::rank(xShapeInfo); // totalThreads = gridDim.x * blockDim.x; // } // __syncthreads(); // const auto tid = blockIdx.x * blockDim.x + threadIdx.x; // Nd4jLong coords[MAX_RANK]; // for (uint64_t i = tid; i < xLen; i += totalThreads) { // shape::index2coords(i, xShapeInfo, coords); // const auto xOffset = shape::getOffset(xShapeInfo, coords); // T *z = reinterpret_cast<T*>(reinterpret_cast<void **>(pVz)[coords[axis]]); // for (uint j = axis; j < xRank - 1; ++j) // shift coords staring from axis position // coords[j] = coords[j + 1]; // const auto zOffset = shape::getOffset(zTadShapeInfo, coords); // z[zOffset] = x[xOffset]; // } // } // /////////////////////////////////////////////////////////////////// // template<typename T> // __host__ static void unstackCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, // const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) { // unstackCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, pVz, zTadShapeInfo, axis); // } // BUILD_SINGLE_TEMPLATE(template void unstackCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis), LIBND4J_TYPES); // /////////////////////////////////////////////////////////////////// // void unstack(sd::LaunchContext* context, const NDArray& input, const std::vector<const NDArray*>& outArrs, const int axis) { // const int threadsPerBlock = MAX_NUM_THREADS / 2; // const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; // const int numOfSubArrs = outArrs.size(); // std::vector<void*> hOutBuffers(numOfSubArrs); // for(int i = 0; i < numOfSubArrs; ++i) // hOutBuffers[i] = outArrs[i]->specialBuffer(); // PointersManager manager(context, "helpers::unstack"); // void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*)); // for(uint i = 0; i < numOfSubArrs; ++i) // outArrs[i]->syncToDevice(); // input.syncToDevice(); // BUILD_SINGLE_SELECTOR(input.dataType(), unstackCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), dOutBuffers, outArrs[0]->special(), axis), LIBND4J_TYPES); // manager.synchronize(); // for(uint i = 0; i < numOfSubArrs; ++i) // outArrs[i]->tickReadDevice(); // input.tickWriteDevice(); // } // /////////////////////////////////////////////////////////////////// // template <typename T> // static __global__ void stackCuda(void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) { // T* z = reinterpret_cast<T*>(vz); // __shared__ Nd4jLong zLen, totalThreads; // __shared__ int zRank; // if (threadIdx.x == 0) { // zLen = shape::length(zShapeInfo); // zRank = shape::rank(zShapeInfo); // totalThreads = gridDim.x * blockDim.x; // } // __syncthreads(); // const auto tid = blockIdx.x * blockDim.x + threadIdx.x; // Nd4jLong coords[MAX_RANK]; // for (uint64_t i = tid; i < zLen; i += totalThreads) { // shape::index2coords(i, zShapeInfo, coords); // const auto zOffset = shape::getOffset(zShapeInfo, coords); // const T *x = reinterpret_cast<const T*>(reinterpret_cast<void**>(pVx)[coords[axis]]); // for (uint j = axis; j < zRank - 1; ++j) // shift coords staring from axis position // coords[j] = coords[j + 1]; // const auto xOffset = shape::getOffset(xTadShapeInfo, coords); // z[zOffset] = x[xOffset]; // } // } // /////////////////////////////////////////////////////////////////// // template<typename T> // __host__ static void stackCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, // void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) { // stackCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(pVx, xTadShapeInfo, vz, zShapeInfo, axis); // } // BUILD_SINGLE_TEMPLATE(template void stackCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const hipStream_t *stream, void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis), LIBND4J_TYPES); // /////////////////////////////////////////////////////////////////// // void stack(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int axis) { // const int threadsPerBlock = MAX_NUM_THREADS / 2; // const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; // const int numOfSubArrs = inArrs.size(); // std::vector<void*> hInBuffers(numOfSubArrs); // for(int i = 0; i < numOfSubArrs; ++i) // hInBuffers[i] = inArrs[i]->specialBuffer(); // PointersManager manager(context, "helpers::stack"); // void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*)); // for(uint i = 0; i < numOfSubArrs; ++i) // inArrs[i]->syncToDevice(); // output.syncToDevice(); // BUILD_SINGLE_SELECTOR(output.dataType(), stackCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), dInBuffers, inArrs[0]->specialShapeInfo(), output.specialBuffer(), output.special(), axis), LIBND4J_TYPES); // manager.synchronize(); // for(uint i = 0; i < numOfSubArrs; ++i) // inArrs[i]->tickReadDevice(); // output.tickWriteDevice(); // } } } }
562bb455a36db9e5443536f0040373d6cc6a24fb.cu
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ // // Created by Yurii Shyrma on 02.01.2018 // #include <ops/declarable/helpers/stack.h> #include <helpers/ShapeUtils.h> #include <array/ResultSet.h> #include <exceptions/cuda_exception.h> #include <helpers/TAD.h> #include <helpers/PointersManager.h> #include <helpers/ConstantTadHelper.h> namespace sd { namespace ops { namespace helpers { /////////////////////////////////////////////////////////////////// template <typename T> static __global__ void stackScalarsCuda(void* pVx, void* vz, const Nd4jLong* zShapeInfo) { T* z = reinterpret_cast<T*>(vz); __shared__ Nd4jLong zLen, totalThreads; if (threadIdx.x == 0) { zLen = shape::length(zShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < zLen; i += totalThreads) { const T *x = reinterpret_cast<const T*>(reinterpret_cast<void**>(pVx)[i]); z[shape::getIndexOffset(i, zShapeInfo)] = *x; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void stackScalarsCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, void* pVx, void* vz, const Nd4jLong* zShapeInfo) { stackScalarsCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(pVx, vz, zShapeInfo); } /////////////////////////////////////////////////////////////////// template <typename T> static void stack_(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim) { const int numOfSubArrs = inArrs.size(); NDArray::prepareSpecialUse({&output}, inArrs); if(inArrs[0]->rankOf() == 0) { std::vector<void const*> hInBuffers(numOfSubArrs); for(int i = 0; i < numOfSubArrs; ++i) hInBuffers[i] = inArrs[i]->specialBuffer(); PointersManager manager(context, "helpers::stack cuda"); void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*)); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; stackScalarsCudaLauncher<T>(blocksPerGrid, threadsPerBlock, context->getCudaStream(), dInBuffers, output.specialBuffer(), output.specialShapeInfo()); manager.synchronize(); } else { auto zTadPack = ConstantTadHelper::getInstance().tadForDimensions(output.shapeInfo(), ShapeUtils::evalDimsToExclude(output.rankOf(), {dim})); auto zTadShapeInfo = zTadPack.primaryShapeInfo(); for (uint i = 0; i < numOfSubArrs; ++i) { void* zBuff = output.specialBufferWithOffset(zTadPack.primaryOffsets()[i]); NativeOpExecutioner::execTransformAny(context, transform::Assign, nullptr, inArrs[i]->shapeInfo(), inArrs[i]->specialBuffer(), inArrs[i]->specialShapeInfo(), nullptr, zTadShapeInfo, zBuff, zTadPack.specialShapeInfo(), nullptr, nullptr, nullptr, false/*allowParallelism*/); } } NDArray::registerSpecialUse({&output}, inArrs); } //////////////////////////////////////////////////////////////////////// void stack(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim) { BUILD_SINGLE_SELECTOR(output.dataType(), stack_, (context, inArrs, output, dim), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void stack_ , (sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int dim), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// template <typename T> static __global__ void unstackScalarsCuda(const void* vx, const Nd4jLong* xShapeInfo, void* pVz) { const T* x = reinterpret_cast<const T*>(vx); __shared__ Nd4jLong xLen, totalThreads; if (threadIdx.x == 0) { xLen = shape::length(xShapeInfo); totalThreads = gridDim.x * blockDim.x; } __syncthreads(); const auto tid = blockIdx.x * blockDim.x + threadIdx.x; for (Nd4jLong i = tid; i < xLen; i += totalThreads) { T* z = reinterpret_cast<T*>(reinterpret_cast<void**>(pVz)[i]); *z = x[shape::getIndexOffset(i, xShapeInfo)]; } } /////////////////////////////////////////////////////////////////// template<typename T> __host__ static void unstackScalarsCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* pVz) { unstackScalarsCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, pVz); } /////////////////////////////////////////////////////////////////// template <typename T> static void unstack_(sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim) { const int numOfSubArrs = outArrs.size(); // NDArray::prepareSpecialUse(outArrs, {&input}); input.syncToDevice(); for (const auto a : outArrs) a->getDataBuffer()->allocateSpecial(); if(outArrs[0]->rankOf() == 0) { std::vector<void*> hOutBuffers(numOfSubArrs); for(int i = 0; i < numOfSubArrs; ++i) hOutBuffers[i] = outArrs[i]->specialBuffer(); PointersManager manager(context, "helpers::unstack cuda"); void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*)); const int threadsPerBlock = MAX_NUM_THREADS / 2; const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; unstackScalarsCudaLauncher<T>(blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), dOutBuffers); manager.synchronize(); } else { auto xTadPack = ConstantTadHelper::getInstance().tadForDimensions(input.shapeInfo(), ShapeUtils::evalDimsToExclude(input.rankOf(), {dim})); auto xTadShapeInfo = xTadPack.primaryShapeInfo(); for (uint i = 0; i < numOfSubArrs; ++i) { auto xBuff = input.specialBufferWithOffset(xTadPack.primaryOffsets()[i]); NativeOpExecutioner::execTransformAny(input.getContext(), transform::Assign, nullptr, xTadShapeInfo, xBuff, xTadPack.specialShapeInfo(), nullptr, outArrs[i]->shapeInfo(), outArrs[i]->specialBuffer(), outArrs[i]->specialShapeInfo(), nullptr, nullptr, nullptr, false/*allowParallelism*/); } } // NDArray::registerSpecialUse(outArrs, {&input}); input.tickReadDevice(); for (const auto p : outArrs) p->tickWriteDevice(); } //////////////////////////////////////////////////////////////////////// void unstack(sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim) { BUILD_SINGLE_SELECTOR(input.dataType(), unstack_, (context, input, outArrs, dim), LIBND4J_TYPES); } BUILD_SINGLE_TEMPLATE(template void unstack_, (sd::LaunchContext* context, const NDArray& input, const std::vector<NDArray*>& outArrs, const int dim), LIBND4J_TYPES); /////////////////////////////////////////////////////////////////// // template <typename T> // static __global__ void unstackCuda(const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) { // const T* x = reinterpret_cast<const T*>(vx); // __shared__ Nd4jLong xLen, totalThreads; // __shared__ int xRank; // if (threadIdx.x == 0) { // xLen = shape::length(xShapeInfo); // xRank = shape::rank(xShapeInfo); // totalThreads = gridDim.x * blockDim.x; // } // __syncthreads(); // const auto tid = blockIdx.x * blockDim.x + threadIdx.x; // Nd4jLong coords[MAX_RANK]; // for (uint64_t i = tid; i < xLen; i += totalThreads) { // shape::index2coords(i, xShapeInfo, coords); // const auto xOffset = shape::getOffset(xShapeInfo, coords); // T *z = reinterpret_cast<T*>(reinterpret_cast<void **>(pVz)[coords[axis]]); // for (uint j = axis; j < xRank - 1; ++j) // shift coords staring from axis position // coords[j] = coords[j + 1]; // const auto zOffset = shape::getOffset(zTadShapeInfo, coords); // z[zOffset] = x[xOffset]; // } // } // /////////////////////////////////////////////////////////////////// // template<typename T> // __host__ static void unstackCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, // const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis) { // unstackCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(vx, xShapeInfo, pVz, zTadShapeInfo, axis); // } // BUILD_SINGLE_TEMPLATE(template void unstackCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, const void* vx, const Nd4jLong* xShapeInfo, void* pVz, const Nd4jLong* zTadShapeInfo, const int axis), LIBND4J_TYPES); // /////////////////////////////////////////////////////////////////// // void unstack(sd::LaunchContext* context, const NDArray& input, const std::vector<const NDArray*>& outArrs, const int axis) { // const int threadsPerBlock = MAX_NUM_THREADS / 2; // const int blocksPerGrid = (input.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; // const int numOfSubArrs = outArrs.size(); // std::vector<void*> hOutBuffers(numOfSubArrs); // for(int i = 0; i < numOfSubArrs; ++i) // hOutBuffers[i] = outArrs[i]->specialBuffer(); // PointersManager manager(context, "helpers::unstack"); // void* dOutBuffers = manager.replicatePointer(hOutBuffers.data(), hOutBuffers.size() * sizeof(void*)); // for(uint i = 0; i < numOfSubArrs; ++i) // outArrs[i]->syncToDevice(); // input.syncToDevice(); // BUILD_SINGLE_SELECTOR(input.dataType(), unstackCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), input.specialBuffer(), input.specialShapeInfo(), dOutBuffers, outArrs[0]->special(), axis), LIBND4J_TYPES); // manager.synchronize(); // for(uint i = 0; i < numOfSubArrs; ++i) // outArrs[i]->tickReadDevice(); // input.tickWriteDevice(); // } // /////////////////////////////////////////////////////////////////// // template <typename T> // static __global__ void stackCuda(void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) { // T* z = reinterpret_cast<T*>(vz); // __shared__ Nd4jLong zLen, totalThreads; // __shared__ int zRank; // if (threadIdx.x == 0) { // zLen = shape::length(zShapeInfo); // zRank = shape::rank(zShapeInfo); // totalThreads = gridDim.x * blockDim.x; // } // __syncthreads(); // const auto tid = blockIdx.x * blockDim.x + threadIdx.x; // Nd4jLong coords[MAX_RANK]; // for (uint64_t i = tid; i < zLen; i += totalThreads) { // shape::index2coords(i, zShapeInfo, coords); // const auto zOffset = shape::getOffset(zShapeInfo, coords); // const T *x = reinterpret_cast<const T*>(reinterpret_cast<void**>(pVx)[coords[axis]]); // for (uint j = axis; j < zRank - 1; ++j) // shift coords staring from axis position // coords[j] = coords[j + 1]; // const auto xOffset = shape::getOffset(xTadShapeInfo, coords); // z[zOffset] = x[xOffset]; // } // } // /////////////////////////////////////////////////////////////////// // template<typename T> // __host__ static void stackCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, // void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis) { // stackCuda<T><<<blocksPerGrid, threadsPerBlock, 256, *stream>>>(pVx, xTadShapeInfo, vz, zShapeInfo, axis); // } // BUILD_SINGLE_TEMPLATE(template void stackCudaLauncher, (const int blocksPerGrid, const int threadsPerBlock, const cudaStream_t *stream, void* pVx, const Nd4jLong* xTadShapeInfo, void* vz, const Nd4jLong* zShapeInfo, const int axis), LIBND4J_TYPES); // /////////////////////////////////////////////////////////////////// // void stack(sd::LaunchContext* context, const std::vector<const NDArray*>& inArrs, NDArray& output, const int axis) { // const int threadsPerBlock = MAX_NUM_THREADS / 2; // const int blocksPerGrid = (output.lengthOf() + threadsPerBlock - 1) / threadsPerBlock; // const int numOfSubArrs = inArrs.size(); // std::vector<void*> hInBuffers(numOfSubArrs); // for(int i = 0; i < numOfSubArrs; ++i) // hInBuffers[i] = inArrs[i]->specialBuffer(); // PointersManager manager(context, "helpers::stack"); // void* dInBuffers = manager.replicatePointer(hInBuffers.data(), hInBuffers.size() * sizeof(void*)); // for(uint i = 0; i < numOfSubArrs; ++i) // inArrs[i]->syncToDevice(); // output.syncToDevice(); // BUILD_SINGLE_SELECTOR(output.dataType(), stackCudaLauncher, (blocksPerGrid, threadsPerBlock, context->getCudaStream(), dInBuffers, inArrs[0]->specialShapeInfo(), output.specialBuffer(), output.special(), axis), LIBND4J_TYPES); // manager.synchronize(); // for(uint i = 0; i < numOfSubArrs; ++i) // inArrs[i]->tickReadDevice(); // output.tickWriteDevice(); // } } } }
f79f3886f486048034635fb3ddb55073d43b3e7a.hip
// !!! This is a file automatically generated by hipify!!! #include <assert.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> /** * CUDA kernels for transforming color spaces from ARGB -> NV12 * * NOTE: THESE KERNELS WERE PULLED FROM NVIDIA'S GITHUB * I did not write these * */ __forceinline__ __device__ float clamp(float x, float a, float b) { return max(a, min(b, x)); } __forceinline__ __device__ float RGBA2Y(uchar4 argb) { return clamp((0.257*argb.x) + (0.504*argb.y) + (0.098*argb.z) + 16, 0, 255); } __global__ static void CudaProcessY(int w, int h, uchar4 * pARGBImage, unsigned char * pNV12ImageY) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i < w && j < h) { uchar4 argb=pARGBImage[w*j +k i]; pNV12ImageY[w*j + i]= RGBA2Y(argb); } } __forceinline__ __device__ float RGBA2U(uchar4 argb) { return clamp(-(0.148*argb.x) - (0.291*argb.y) + (0.439*argb.z) + 128.0, 0, 255); } __forceinline__ __device__ float RGBA2V(uchar4 argb) { return clamp((0.439*argb.x) - (0.368*argb.y) - (0.0701*argb.z) + 128.0, 0, 255); } __global__ static void CudaProcessUV(int w, int h, uchar4* pARGBImage, unsigned char* pNV12ImageUV) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; unsigned int fi = i*2;//full size image i unsigned int fj = j*2;//full size image j unsigned int fw = w*2;//full size image w unsigned int fh = h*2;//full size image h unsigned int u_idx = i*2 + 1 + j*w*2; unsigned int v_idx = i*2 + j*w*2; if(fi<fw-1 && fj<fh-1) { uchar4 argb1 = pARGBImage[fj*fw + fi]; uchar4 argb2 = pARGBImage[fj*fw + fi + 1]; uchar4 argb3 = pARGBImage[(fj + 1)*fw + fi]; uchar4 argb4 = pARGBImage[(fj + 1)*fw + fi+1]; float U = RGBA2U(argb1); float U2 = RGBA2U(argb2); float U3 = RGBA2U(argb3); float U4 = RGBA2U(argb4); float V = RGBA2V(argb1); float V2 = RGBA2V(argb2); float V3 = RGBA2V(argb3); float V4 = RGBA2V(argb4); pNV12ImageUV[u_idx] = (U+U2+U3+U4)/4.0; pNV12ImageUV[v_idx] = (V+V2+V3+V4)/4.0; } } // Need extern C here to allow access from main program // CUDA is weird extern "C" { hipError_t launch_CudaARGB2NV12Process(int w, int h, hipDeviceptr_t pARGBImage, hipDeviceptr_t pNV12Image) { { dim3 dimBlock(16, 16, 1); dim3 dimGrid(((w) + dimBlock.x - 1)/dimBlock.x, ((h) + dimBlock.y - 1)/dimBlock.y, 1); hipLaunchKernelGGL(( CudaProcessY), dim3(dimGrid), dim3(dimBlock), 0, 0, w, h, (uchar4*)pARGBImage, (unsigned char*)pNV12Image); } { dim3 dimBlock(16, 16, 1); dim3 dimGrid(((w/2) + dimBlock.x - 1)/dimBlock.x, ((h/2) + dimBlock.y - 1)/dimBlock.y, 1); hipLaunchKernelGGL(( CudaProcessUV), dim3(dimGrid), dim3(dimBlock), 0, 0, w/2, h/2, (uchar4*)pARGBImage, ((unsigned char*)pNV12Image) + w*h); } hipError_t err = hipGetLastError(); return err; } }
f79f3886f486048034635fb3ddb55073d43b3e7a.cu
#include <assert.h> #include <cuda.h> #include <cuda_runtime.h> /** * CUDA kernels for transforming color spaces from ARGB -> NV12 * * NOTE: THESE KERNELS WERE PULLED FROM NVIDIA'S GITHUB * I did not write these * */ __forceinline__ __device__ float clamp(float x, float a, float b) { return max(a, min(b, x)); } __forceinline__ __device__ float RGBA2Y(uchar4 argb) { return clamp((0.257*argb.x) + (0.504*argb.y) + (0.098*argb.z) + 16, 0, 255); } __global__ static void CudaProcessY(int w, int h, uchar4 * pARGBImage, unsigned char * pNV12ImageY) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; if (i < w && j < h) { uchar4 argb=pARGBImage[w*j +k i]; pNV12ImageY[w*j + i]= RGBA2Y(argb); } } __forceinline__ __device__ float RGBA2U(uchar4 argb) { return clamp(-(0.148*argb.x) - (0.291*argb.y) + (0.439*argb.z) + 128.0, 0, 255); } __forceinline__ __device__ float RGBA2V(uchar4 argb) { return clamp((0.439*argb.x) - (0.368*argb.y) - (0.0701*argb.z) + 128.0, 0, 255); } __global__ static void CudaProcessUV(int w, int h, uchar4* pARGBImage, unsigned char* pNV12ImageUV) { unsigned int i = blockIdx.x*blockDim.x + threadIdx.x; unsigned int j = blockIdx.y*blockDim.y + threadIdx.y; unsigned int fi = i*2;//full size image i unsigned int fj = j*2;//full size image j unsigned int fw = w*2;//full size image w unsigned int fh = h*2;//full size image h unsigned int u_idx = i*2 + 1 + j*w*2; unsigned int v_idx = i*2 + j*w*2; if(fi<fw-1 && fj<fh-1) { uchar4 argb1 = pARGBImage[fj*fw + fi]; uchar4 argb2 = pARGBImage[fj*fw + fi + 1]; uchar4 argb3 = pARGBImage[(fj + 1)*fw + fi]; uchar4 argb4 = pARGBImage[(fj + 1)*fw + fi+1]; float U = RGBA2U(argb1); float U2 = RGBA2U(argb2); float U3 = RGBA2U(argb3); float U4 = RGBA2U(argb4); float V = RGBA2V(argb1); float V2 = RGBA2V(argb2); float V3 = RGBA2V(argb3); float V4 = RGBA2V(argb4); pNV12ImageUV[u_idx] = (U+U2+U3+U4)/4.0; pNV12ImageUV[v_idx] = (V+V2+V3+V4)/4.0; } } // Need extern C here to allow access from main program // CUDA is weird extern "C" { cudaError launch_CudaARGB2NV12Process(int w, int h, CUdeviceptr pARGBImage, CUdeviceptr pNV12Image) { { dim3 dimBlock(16, 16, 1); dim3 dimGrid(((w) + dimBlock.x - 1)/dimBlock.x, ((h) + dimBlock.y - 1)/dimBlock.y, 1); CudaProcessY<<<dimGrid, dimBlock>>>(w, h, (uchar4*)pARGBImage, (unsigned char*)pNV12Image); } { dim3 dimBlock(16, 16, 1); dim3 dimGrid(((w/2) + dimBlock.x - 1)/dimBlock.x, ((h/2) + dimBlock.y - 1)/dimBlock.y, 1); CudaProcessUV<<<dimGrid, dimBlock>>>(w/2, h/2, (uchar4*)pARGBImage, ((unsigned char*)pNV12Image) + w*h); } cudaError err = cudaGetLastError(); return err; } }
d613df4d559bbf501241da5308876fc2abfa6dd0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/temporal_shift_op.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using framework::Tensor; template <typename T> __global__ void KeTemporalShiftFwNCHW(const T* input, T* output, const int ntchw, const int tchw, const int chw, const int hw, const int t, const int c1, const int c2) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int src_it = 0; for (; tid < ntchw; tid += stride) { int it = (tid % tchw) / chw; int ic = (tid % chw) / hw; if (ic < c1) { src_it = it - 1; } else if (ic < c2) { src_it = it + 1; } else { src_it = it; } if (src_it < 0 || src_it >= t) { output[tid] = 0; } else { output[tid] = input[tid + (src_it - it) * chw]; } } } template <typename T> __global__ void KeTemporalShiftFwNHWC(const T* input, T* output, const int nthwc, const int thwc, const int hwc, const int t, const int c, const int c1, const int c2) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int src_it = 0; for (; tid < nthwc; tid += stride) { int it = (tid % thwc) / hwc; int ic = tid % c; if (ic < c1) { src_it = it - 1; } else if (ic < c2) { src_it = it + 1; } else { src_it = it; } if (src_it < 0 || src_it >= t) { output[tid] = 0; } else { output[tid] = input[tid + (src_it - it) * hwc]; } } } template <typename T> __global__ void KeTemporalShiftBwNCHW(const T* output_grad, T* input_grad, const int ntchw, const int tchw, const int chw, const int hw, const int t, const int c1, const int c2) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int src_it = 0; for (; tid < ntchw; tid += stride) { int it = (tid % tchw) / chw; int ic = (tid % chw) / hw; if (ic < c1) { src_it = it + 1; } else if (ic < c2) { src_it = it - 1; } else { src_it = it; } if (src_it >= 0 && src_it < t) { input_grad[tid] = output_grad[tid + (src_it - it) * chw]; } else { input_grad[tid] = 0; } } } template <typename T> __global__ void KeTemporalShiftBwNHWC(const T* output_grad, T* input_grad, const int nthwc, const int thwc, const int hwc, const int t, const int c, const int c1, const int c2) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int src_it = 0; for (; tid < nthwc; tid += stride) { int it = (tid % thwc) / hwc; int ic = tid % c; if (ic < c1) { src_it = it + 1; } else if (ic < c2) { src_it = it - 1; } else { src_it = it; } if (src_it >= 0 && src_it < t) { input_grad[tid] = output_grad[tid + (src_it - it) * hwc]; } else { input_grad[tid] = 0; } } } template <typename T> class TemporalShiftOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument( "This kernel only runs on GPU device.")); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); int t = ctx.Attr<int>("seg_num"); float shift_ratio = ctx.Attr<float>("shift_ratio"); const std::string data_format_str = ctx.Attr<std::string>("data_format"); const DataLayout data_layout = framework::StringToDataLayout(data_format_str); const int nt = input->dims()[0]; const int c = (data_layout == DataLayout::kNCHW ? input->dims()[1] : input->dims()[3]); const int h = (data_layout == DataLayout::kNCHW ? input->dims()[2] : input->dims()[1]); const int w = (data_layout == DataLayout::kNCHW ? input->dims()[3] : input->dims()[2]); const int hw = h * w; const int chw = c * hw; const int tchw = t * chw; const int ntchw = nt * chw; const int c1 = static_cast<int>(c * shift_ratio); const int c2 = static_cast<int>(c * 2 * shift_ratio); framework::DDim out_dims = (data_layout == DataLayout::kNCHW ? phi::make_ddim({nt, c, h, w}) : phi::make_ddim({nt, h, w, c})); const T* input_data = input->data<T>(); T* output_data = output->mutable_data<T>(out_dims, ctx.GetPlace()); int pixelNum = nt * chw; int threads = 1024; int grid = (pixelNum + threads - 1) / threads; const auto& dev_ctx = ctx.cuda_device_context(); int blocks_per_sm = dev_ctx.GetMaxPhysicalThreadCount() / threads; grid = ::min(dev_ctx.GetSMCount() * blocks_per_sm, grid); if (data_layout == DataLayout::kNCHW) { hipLaunchKernelGGL(( KeTemporalShiftFwNCHW< T>), dim3(grid), dim3(threads), 0, ctx.cuda_device_context().stream(), input_data, output_data, ntchw, tchw, chw, hw, t, c1, c2); } else { hipLaunchKernelGGL(( KeTemporalShiftFwNHWC< T>), dim3(grid), dim3(threads), 0, ctx.cuda_device_context().stream(), input_data, output_data, ntchw, tchw, chw, t, c, c1, c2); } } }; template <typename T> class TemporalShiftGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); int t = ctx.Attr<int>("seg_num"); float shift_ratio = ctx.Attr<float>("shift_ratio"); const std::string data_format_str = ctx.Attr<std::string>("data_format"); const DataLayout data_layout = framework::StringToDataLayout(data_format_str); const int nt = output_grad->dims()[0]; const int c = (data_layout == DataLayout::kNCHW ? output_grad->dims()[1] : output_grad->dims()[3]); const int h = (data_layout == DataLayout::kNCHW ? output_grad->dims()[2] : output_grad->dims()[1]); const int w = (data_layout == DataLayout::kNCHW ? output_grad->dims()[3] : output_grad->dims()[2]); const int hw = h * w; const int chw = c * hw; const int tchw = t * chw; const int ntchw = nt * chw; const int c1 = static_cast<int>(c * shift_ratio); const int c2 = static_cast<int>(c * 2 * shift_ratio); framework::DDim in_grad_dims = (data_layout == DataLayout::kNCHW ? phi::make_ddim({nt, c, h, w}) : phi::make_ddim({nt, h, w, c})); const T* output_grad_data = output_grad->data<T>(); T* input_grad_data = input_grad->mutable_data<T>(in_grad_dims, ctx.GetPlace()); int pixelNum = nt * chw; int threads = 1024; int grid = (pixelNum + threads - 1) / threads; const auto& dev_ctx = ctx.cuda_device_context(); int blocks_per_sm = dev_ctx.GetMaxPhysicalThreadCount() / threads; grid = ::min(dev_ctx.GetSMCount() * blocks_per_sm, grid); if (data_layout == DataLayout::kNCHW) { hipLaunchKernelGGL(( KeTemporalShiftBwNCHW< T>), dim3(grid), dim3(threads), 0, ctx.cuda_device_context().stream(), output_grad_data, input_grad_data, ntchw, tchw, chw, hw, t, c1, c2); } else { hipLaunchKernelGGL(( KeTemporalShiftBwNHWC< T>), dim3(grid), dim3(threads), 0, ctx.cuda_device_context().stream(), output_grad_data, input_grad_data, ntchw, tchw, chw, t, c, c1, c2); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( temporal_shift, ops::TemporalShiftOpCUDAKernel<float>, ops::TemporalShiftOpCUDAKernel<double>, ops::TemporalShiftOpCUDAKernel<paddle::platform::float16>); REGISTER_OP_CUDA_KERNEL( temporal_shift_grad, ops::TemporalShiftGradOpCUDAKernel<float>, ops::TemporalShiftGradOpCUDAKernel<double>, ops::TemporalShiftGradOpCUDAKernel<paddle::platform::float16>);
d613df4d559bbf501241da5308876fc2abfa6dd0.cu
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserve. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/temporal_shift_op.h" #include "paddle/fluid/platform/device/gpu/gpu_launch_config.h" #include "paddle/fluid/platform/device/gpu/gpu_primitives.h" namespace paddle { namespace operators { using framework::Tensor; template <typename T> __global__ void KeTemporalShiftFwNCHW(const T* input, T* output, const int ntchw, const int tchw, const int chw, const int hw, const int t, const int c1, const int c2) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int src_it = 0; for (; tid < ntchw; tid += stride) { int it = (tid % tchw) / chw; int ic = (tid % chw) / hw; if (ic < c1) { src_it = it - 1; } else if (ic < c2) { src_it = it + 1; } else { src_it = it; } if (src_it < 0 || src_it >= t) { output[tid] = 0; } else { output[tid] = input[tid + (src_it - it) * chw]; } } } template <typename T> __global__ void KeTemporalShiftFwNHWC(const T* input, T* output, const int nthwc, const int thwc, const int hwc, const int t, const int c, const int c1, const int c2) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int src_it = 0; for (; tid < nthwc; tid += stride) { int it = (tid % thwc) / hwc; int ic = tid % c; if (ic < c1) { src_it = it - 1; } else if (ic < c2) { src_it = it + 1; } else { src_it = it; } if (src_it < 0 || src_it >= t) { output[tid] = 0; } else { output[tid] = input[tid + (src_it - it) * hwc]; } } } template <typename T> __global__ void KeTemporalShiftBwNCHW(const T* output_grad, T* input_grad, const int ntchw, const int tchw, const int chw, const int hw, const int t, const int c1, const int c2) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int src_it = 0; for (; tid < ntchw; tid += stride) { int it = (tid % tchw) / chw; int ic = (tid % chw) / hw; if (ic < c1) { src_it = it + 1; } else if (ic < c2) { src_it = it - 1; } else { src_it = it; } if (src_it >= 0 && src_it < t) { input_grad[tid] = output_grad[tid + (src_it - it) * chw]; } else { input_grad[tid] = 0; } } } template <typename T> __global__ void KeTemporalShiftBwNHWC(const T* output_grad, T* input_grad, const int nthwc, const int thwc, const int hwc, const int t, const int c, const int c1, const int c2) { int tid = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; int src_it = 0; for (; tid < nthwc; tid += stride) { int it = (tid % thwc) / hwc; int ic = tid % c; if (ic < c1) { src_it = it + 1; } else if (ic < c2) { src_it = it - 1; } else { src_it = it; } if (src_it >= 0 && src_it < t) { input_grad[tid] = output_grad[tid + (src_it - it) * hwc]; } else { input_grad[tid] = 0; } } } template <typename T> class TemporalShiftOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { PADDLE_ENFORCE_EQ(platform::is_gpu_place(ctx.GetPlace()), true, platform::errors::InvalidArgument( "This kernel only runs on GPU device.")); auto* input = ctx.Input<Tensor>("X"); auto* output = ctx.Output<Tensor>("Out"); int t = ctx.Attr<int>("seg_num"); float shift_ratio = ctx.Attr<float>("shift_ratio"); const std::string data_format_str = ctx.Attr<std::string>("data_format"); const DataLayout data_layout = framework::StringToDataLayout(data_format_str); const int nt = input->dims()[0]; const int c = (data_layout == DataLayout::kNCHW ? input->dims()[1] : input->dims()[3]); const int h = (data_layout == DataLayout::kNCHW ? input->dims()[2] : input->dims()[1]); const int w = (data_layout == DataLayout::kNCHW ? input->dims()[3] : input->dims()[2]); const int hw = h * w; const int chw = c * hw; const int tchw = t * chw; const int ntchw = nt * chw; const int c1 = static_cast<int>(c * shift_ratio); const int c2 = static_cast<int>(c * 2 * shift_ratio); framework::DDim out_dims = (data_layout == DataLayout::kNCHW ? phi::make_ddim({nt, c, h, w}) : phi::make_ddim({nt, h, w, c})); const T* input_data = input->data<T>(); T* output_data = output->mutable_data<T>(out_dims, ctx.GetPlace()); int pixelNum = nt * chw; int threads = 1024; int grid = (pixelNum + threads - 1) / threads; const auto& dev_ctx = ctx.cuda_device_context(); int blocks_per_sm = dev_ctx.GetMaxPhysicalThreadCount() / threads; grid = std::min(dev_ctx.GetSMCount() * blocks_per_sm, grid); if (data_layout == DataLayout::kNCHW) { KeTemporalShiftFwNCHW< T><<<grid, threads, 0, ctx.cuda_device_context().stream()>>>( input_data, output_data, ntchw, tchw, chw, hw, t, c1, c2); } else { KeTemporalShiftFwNHWC< T><<<grid, threads, 0, ctx.cuda_device_context().stream()>>>( input_data, output_data, ntchw, tchw, chw, t, c, c1, c2); } } }; template <typename T> class TemporalShiftGradOpCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext& ctx) const override { auto* input_grad = ctx.Output<Tensor>(framework::GradVarName("X")); auto* output_grad = ctx.Input<Tensor>(framework::GradVarName("Out")); int t = ctx.Attr<int>("seg_num"); float shift_ratio = ctx.Attr<float>("shift_ratio"); const std::string data_format_str = ctx.Attr<std::string>("data_format"); const DataLayout data_layout = framework::StringToDataLayout(data_format_str); const int nt = output_grad->dims()[0]; const int c = (data_layout == DataLayout::kNCHW ? output_grad->dims()[1] : output_grad->dims()[3]); const int h = (data_layout == DataLayout::kNCHW ? output_grad->dims()[2] : output_grad->dims()[1]); const int w = (data_layout == DataLayout::kNCHW ? output_grad->dims()[3] : output_grad->dims()[2]); const int hw = h * w; const int chw = c * hw; const int tchw = t * chw; const int ntchw = nt * chw; const int c1 = static_cast<int>(c * shift_ratio); const int c2 = static_cast<int>(c * 2 * shift_ratio); framework::DDim in_grad_dims = (data_layout == DataLayout::kNCHW ? phi::make_ddim({nt, c, h, w}) : phi::make_ddim({nt, h, w, c})); const T* output_grad_data = output_grad->data<T>(); T* input_grad_data = input_grad->mutable_data<T>(in_grad_dims, ctx.GetPlace()); int pixelNum = nt * chw; int threads = 1024; int grid = (pixelNum + threads - 1) / threads; const auto& dev_ctx = ctx.cuda_device_context(); int blocks_per_sm = dev_ctx.GetMaxPhysicalThreadCount() / threads; grid = std::min(dev_ctx.GetSMCount() * blocks_per_sm, grid); if (data_layout == DataLayout::kNCHW) { KeTemporalShiftBwNCHW< T><<<grid, threads, 0, ctx.cuda_device_context().stream()>>>( output_grad_data, input_grad_data, ntchw, tchw, chw, hw, t, c1, c2); } else { KeTemporalShiftBwNHWC< T><<<grid, threads, 0, ctx.cuda_device_context().stream()>>>( output_grad_data, input_grad_data, ntchw, tchw, chw, t, c, c1, c2); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; REGISTER_OP_CUDA_KERNEL( temporal_shift, ops::TemporalShiftOpCUDAKernel<float>, ops::TemporalShiftOpCUDAKernel<double>, ops::TemporalShiftOpCUDAKernel<paddle::platform::float16>); REGISTER_OP_CUDA_KERNEL( temporal_shift_grad, ops::TemporalShiftGradOpCUDAKernel<float>, ops::TemporalShiftGradOpCUDAKernel<double>, ops::TemporalShiftGradOpCUDAKernel<paddle::platform::float16>);
16f1e3b2a4e5cb2f3ba49a1f46d0ac68082917a8.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/matrix/matrix.cuh> #include <raft/random/rng.cuh> #include "../test_utils.h" namespace raft { namespace matrix { template <typename T> struct MatrixInputs { T tolerance; int n_row; int n_col; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const MatrixInputs<T> &dims) { return os; } template <typename T> class MatrixTest : public ::testing::TestWithParam<MatrixInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<MatrixInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.n_row * params.n_col; hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); raft::allocate(in1, len); raft::allocate(in2, len); raft::allocate(in1_revr, len); r.uniform(in1, len, T(-1.0), T(1.0), stream); copy(in1, in2, params.n_row, params.n_col, stream); // copy(in1, in1_revr, params.n_row, params.n_col); // colReverse(in1_revr, params.n_row, params.n_col); T *outTrunc; raft::allocate(outTrunc, 6); truncZeroOrigin(in1, params.n_row, outTrunc, 3, 2, stream); CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(in1)); CUDA_CHECK(hipFree(in2)); // CUDA_CHECK(hipFree(in1_revr)); } protected: MatrixInputs<T> params; T *in1, *in2, *in1_revr; }; const std::vector<MatrixInputs<float>> inputsf2 = {{0.000001f, 4, 4, 1234ULL}}; const std::vector<MatrixInputs<double>> inputsd2 = { {0.00000001, 4, 4, 1234ULL}}; typedef MatrixTest<float> MatrixTestF; TEST_P(MatrixTestF, Result) { ASSERT_TRUE(raft::devArrMatch(in1, in2, params.n_row * params.n_col, raft::CompareApprox<float>(params.tolerance))); } typedef MatrixTest<double> MatrixTestD; TEST_P(MatrixTestD, Result) { ASSERT_TRUE(raft::devArrMatch(in1, in2, params.n_row * params.n_col, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MatrixTests, MatrixTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_SUITE_P(MatrixTests, MatrixTestD, ::testing::ValuesIn(inputsd2)); template <typename T> class MatrixCopyRowsTest : public ::testing::Test { using math_t = typename std::tuple_element<0, T>::type; using idx_t = typename std::tuple_element<1, T>::type; using idx_array_t = typename std::tuple_element<2, T>::type; protected: MatrixCopyRowsTest() : allocator(handle.get_device_allocator()), input(allocator, handle.get_stream(), n_cols * n_rows), indices(allocator, handle.get_stream(), n_selected), output(allocator, handle.get_stream(), n_cols * n_selected) { CUDA_CHECK(hipStreamCreate(&stream)); handle.set_stream(stream); raft::update_device(indices.data(), indices_host, n_selected, stream); // Init input array thrust::counting_iterator<idx_t> first(0); thrust::device_ptr<math_t> ptr(input.data()); thrust::copy(thrust::hip::par.on(stream), first, first + n_cols * n_rows, ptr); } void TearDown() override { CUDA_CHECK(hipStreamDestroy(stream)); } void testCopyRows() { copyRows(input.data(), n_rows, n_cols, output.data(), indices.data(), n_selected, stream, false); EXPECT_TRUE(raft::devArrMatchHost(output_exp_colmajor, output.data(), n_selected * n_cols, raft::Compare<math_t>())); copyRows(input.data(), n_rows, n_cols, output.data(), indices.data(), n_selected, stream, true); EXPECT_TRUE(raft::devArrMatchHost(output_exp_rowmajor, output.data(), n_selected * n_cols, raft::Compare<math_t>())); } protected: int n_rows = 10; int n_cols = 3; int n_selected = 5; idx_array_t indices_host[5] = {0, 3, 4, 7, 9}; math_t output_exp_colmajor[15] = {0, 3, 4, 7, 9, 10, 13, 14, 17, 19, 20, 23, 24, 27, 29}; math_t output_exp_rowmajor[15] = {0, 1, 2, 9, 10, 11, 12, 13, 14, 21, 22, 23, 27, 28, 29}; raft::handle_t handle; hipStream_t stream; std::shared_ptr<raft::mr::device::allocator> allocator; raft::mr::device::buffer<math_t> input; raft::mr::device::buffer<math_t> output; raft::mr::device::buffer<idx_array_t> indices; }; using TypeTuple = ::testing::Types<std::tuple<float, int, int>, std::tuple<float, int64_t, int>, std::tuple<double, int, int>, std::tuple<double, int64_t, int>>; TYPED_TEST_CASE(MatrixCopyRowsTest, TypeTuple); TYPED_TEST(MatrixCopyRowsTest, CopyRows) { this->testCopyRows(); } } // namespace matrix } // namespace raft
16f1e3b2a4e5cb2f3ba49a1f46d0ac68082917a8.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <raft/matrix/matrix.cuh> #include <raft/random/rng.cuh> #include "../test_utils.h" namespace raft { namespace matrix { template <typename T> struct MatrixInputs { T tolerance; int n_row; int n_col; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const MatrixInputs<T> &dims) { return os; } template <typename T> class MatrixTest : public ::testing::TestWithParam<MatrixInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<MatrixInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.n_row * params.n_col; cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); raft::allocate(in1, len); raft::allocate(in2, len); raft::allocate(in1_revr, len); r.uniform(in1, len, T(-1.0), T(1.0), stream); copy(in1, in2, params.n_row, params.n_col, stream); // copy(in1, in1_revr, params.n_row, params.n_col); // colReverse(in1_revr, params.n_row, params.n_col); T *outTrunc; raft::allocate(outTrunc, 6); truncZeroOrigin(in1, params.n_row, outTrunc, 3, 2, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(in1)); CUDA_CHECK(cudaFree(in2)); // CUDA_CHECK(cudaFree(in1_revr)); } protected: MatrixInputs<T> params; T *in1, *in2, *in1_revr; }; const std::vector<MatrixInputs<float>> inputsf2 = {{0.000001f, 4, 4, 1234ULL}}; const std::vector<MatrixInputs<double>> inputsd2 = { {0.00000001, 4, 4, 1234ULL}}; typedef MatrixTest<float> MatrixTestF; TEST_P(MatrixTestF, Result) { ASSERT_TRUE(raft::devArrMatch(in1, in2, params.n_row * params.n_col, raft::CompareApprox<float>(params.tolerance))); } typedef MatrixTest<double> MatrixTestD; TEST_P(MatrixTestD, Result) { ASSERT_TRUE(raft::devArrMatch(in1, in2, params.n_row * params.n_col, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_SUITE_P(MatrixTests, MatrixTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_SUITE_P(MatrixTests, MatrixTestD, ::testing::ValuesIn(inputsd2)); template <typename T> class MatrixCopyRowsTest : public ::testing::Test { using math_t = typename std::tuple_element<0, T>::type; using idx_t = typename std::tuple_element<1, T>::type; using idx_array_t = typename std::tuple_element<2, T>::type; protected: MatrixCopyRowsTest() : allocator(handle.get_device_allocator()), input(allocator, handle.get_stream(), n_cols * n_rows), indices(allocator, handle.get_stream(), n_selected), output(allocator, handle.get_stream(), n_cols * n_selected) { CUDA_CHECK(cudaStreamCreate(&stream)); handle.set_stream(stream); raft::update_device(indices.data(), indices_host, n_selected, stream); // Init input array thrust::counting_iterator<idx_t> first(0); thrust::device_ptr<math_t> ptr(input.data()); thrust::copy(thrust::cuda::par.on(stream), first, first + n_cols * n_rows, ptr); } void TearDown() override { CUDA_CHECK(cudaStreamDestroy(stream)); } void testCopyRows() { copyRows(input.data(), n_rows, n_cols, output.data(), indices.data(), n_selected, stream, false); EXPECT_TRUE(raft::devArrMatchHost(output_exp_colmajor, output.data(), n_selected * n_cols, raft::Compare<math_t>())); copyRows(input.data(), n_rows, n_cols, output.data(), indices.data(), n_selected, stream, true); EXPECT_TRUE(raft::devArrMatchHost(output_exp_rowmajor, output.data(), n_selected * n_cols, raft::Compare<math_t>())); } protected: int n_rows = 10; int n_cols = 3; int n_selected = 5; idx_array_t indices_host[5] = {0, 3, 4, 7, 9}; math_t output_exp_colmajor[15] = {0, 3, 4, 7, 9, 10, 13, 14, 17, 19, 20, 23, 24, 27, 29}; math_t output_exp_rowmajor[15] = {0, 1, 2, 9, 10, 11, 12, 13, 14, 21, 22, 23, 27, 28, 29}; raft::handle_t handle; cudaStream_t stream; std::shared_ptr<raft::mr::device::allocator> allocator; raft::mr::device::buffer<math_t> input; raft::mr::device::buffer<math_t> output; raft::mr::device::buffer<idx_array_t> indices; }; using TypeTuple = ::testing::Types<std::tuple<float, int, int>, std::tuple<float, int64_t, int>, std::tuple<double, int, int>, std::tuple<double, int64_t, int>>; TYPED_TEST_CASE(MatrixCopyRowsTest, TypeTuple); TYPED_TEST(MatrixCopyRowsTest, CopyRows) { this->testCopyRows(); } } // namespace matrix } // namespace raft
02bd489c70d540bae78d2be3ee04af9c901721d0.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file graphsum_app.cu * * @brief gcn graphsum application */ #include <gunrock/gunrock.h> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph definations #include <gunrock/graphio/graphio.cuh> #include <gunrock/app/app_base.cuh> #include <gunrock/app/test_base.cuh> // single-source shortest path includes #include <gunrock/app/gcn/graphsum/graphsum_enactor.cuh> #include <gunrock/app/gcn/module.h> /** * @brief graphsum layer of GCN * * @param parameters The parameters * @param graph The graph * @param[in] dim dimension of the feature vector * @param in the input to the graphsum layer * @param out output matrix * * @tparam GraphT type of the graph * @tparam ValueT type of the value, double by default * * @return time elapsed to execute */ namespace gunrock { namespace app { namespace graphsum { hipError_t UseParameters(util::Parameters &parameters) { hipError_t retval = hipSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<std::string>( "in", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::REQUIRED_PARAMETER, "invalid", "input file name to feature matrix", __FILE__, __LINE__ )); GUARD_CU(parameters.Use<int>( "dim", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::REQUIRED_PARAMETER, -1, "feature vector dimension", __FILE__, __LINE__ )); GUARD_CU(parameters.Use<std::string>( "out", util::OPTIONAL_ARGUMENT | util::SINGLE_VALUE | util::REQUIRED_PARAMETER, "out", "output file name", __FILE__, __LINE__ )); return retval; } } } } using namespace gunrock; template <typename SizeT, typename ValueT, typename GraphT> struct graph_sum : module { typedef app::graphsum::Problem<GraphT> ProblemT; typedef app::graphsum::Enactor<ProblemT> EnactorT; GraphT *a; util::Array1D<SizeT, ValueT> b, c, b_grad, c_grad; ProblemT *problem; EnactorT *enactor; int dim; float *fw_time, *bw_time; graph_sum(util::Parameters &p, GraphT &_a, util::Array1D<SizeT, ValueT> &_b, util::Array1D<SizeT, ValueT> &_b_grad, util::Array1D<SizeT, ValueT> &_c, util::Array1D<SizeT, ValueT> &_c_grad, int _dim, float *_fw, float *_bw) : a(&_a), b(_b), c(_c), b_grad(_b_grad), c_grad(_c_grad), dim(_dim), fw_time(_fw), bw_time(_bw) { problem = new ProblemT(p); enactor = new EnactorT(); problem->Init(_a, dim); enactor->Init(*problem); } virtual void forward(bool train) override { timer.Start (); // so here the b array is used to create the new c array // infact the c array is just b into the coef problem->Reset(1, b, c); enactor->Reset(); enactor->Enact(); timer.Stop (); *fw_time += timer.ElapsedMillis (); } virtual void backward() override { timer.Start (); // here the c grad is used to create the b grad array // where does the c grad array come from? problem->Reset(0, c_grad, b_grad); enactor->Reset(); enactor->Enact(); timer.Stop (); *bw_time += timer.ElapsedMillis (); } }; template <typename GraphT, typename ValueT = typename GraphT::ValueT> double gcn_graphsum(gunrock::util::Parameters &parameters, GraphT &graph, const int dim, ValueT *in, ValueT *out) { typedef typename GraphT::VertexT VertexT; typedef gunrock::app::graphsum::Problem<GraphT> ProblemT; typedef gunrock::app::graphsum::Enactor<ProblemT> EnactorT; gunrock::util::CpuTimer cpu_timer; gunrock::util::Location target = gunrock::util::DEVICE; double total_time = 0; if (parameters.UseDefault("quiet")) parameters.Set("quiet", true); // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; problem.Init(graph, dim, in, target); enactor.Init(problem, target); problem.Reset(in); enactor.Reset(); cpu_timer.Start(); enactor.Enact(); cpu_timer.Stop(); total_time += cpu_timer.ElapsedMillis(); problem.Extract(out); enactor.Release(target); problem.Release(target); return total_time; } /* * @brief Simple interface take in graph as CSR format * * @param[in] num_nodes Number of veritces in the input graph * @param[in] num_edges Number of edges in the input graph * @param[in] row_offsets CSR-formatted graph input row offsets * @param[in] col_indices CSR-formatted graph input column indices * @param[in] dim The dimenssion of the feature vector * @param in The input to graphsum layer * @param out The output of graphsum layer * * @tparam VertexT type of vertex id, default to int * * @return double Return accumulated elapsed times for all runs */ template <typename VertexT = int, typename SizeT = int, typename ValueT = double> double graphsum(const SizeT num_nodes, const SizeT num_edges, const SizeT *row_offsets, const VertexT *col_indices, const int dim, ValueT *in, ValueT *out) { typedef typename gunrock::app::TestGraph<VertexT, SizeT, ValueT, gunrock::graph::HAS_CSR> GraphT; typedef typename GraphT::CsrT CsrT; // Setup parameters gunrock::util::Parameters parameters("sparseMatMul"); gunrock::graphio::UseParameters(parameters); gunrock::app::graphsum::UseParameters(parameters); gunrock::app::UseParameters_test(parameters); parameters.Parse_CommandLine(0, NULL); parameters.Set("graph-type", "by-pass"); bool quiet = parameters.Get<bool>("quiet"); GraphT graph; // Assign pointers into gunrock graph format graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST); graph.CsrT::row_offsets.SetPointer(row_offsets, num_nodes + 1, gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices, num_edges, gunrock::util::HOST); // graph.FromCsr(graph.csr()); gunrock::graphio::LoadGraph(parameters, graph); // Run the gcn_graphsum double elapsed_time = gcn_graphsum(parameters, graph, in, out); // Cleanup graph.Release(); return elapsed_time; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
02bd489c70d540bae78d2be3ee04af9c901721d0.cu
// ---------------------------------------------------------------------------- // Gunrock -- Fast and Efficient GPU Graph Library // ---------------------------------------------------------------------------- // This source code is distributed under the terms of LICENSE.TXT // in the root directory of this source distribution. // ---------------------------------------------------------------------------- /** * @file graphsum_app.cu * * @brief gcn graphsum application */ #include <gunrock/gunrock.h> // Utilities and correctness-checking #include <gunrock/util/test_utils.cuh> // Graph definations #include <gunrock/graphio/graphio.cuh> #include <gunrock/app/app_base.cuh> #include <gunrock/app/test_base.cuh> // single-source shortest path includes #include <gunrock/app/gcn/graphsum/graphsum_enactor.cuh> #include <gunrock/app/gcn/module.h> /** * @brief graphsum layer of GCN * * @param parameters The parameters * @param graph The graph * @param[in] dim dimension of the feature vector * @param in the input to the graphsum layer * @param out output matrix * * @tparam GraphT type of the graph * @tparam ValueT type of the value, double by default * * @return time elapsed to execute */ namespace gunrock { namespace app { namespace graphsum { cudaError_t UseParameters(util::Parameters &parameters) { cudaError_t retval = cudaSuccess; GUARD_CU(UseParameters_app(parameters)); GUARD_CU(UseParameters_problem(parameters)); GUARD_CU(UseParameters_enactor(parameters)); GUARD_CU(parameters.Use<std::string>( "in", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::REQUIRED_PARAMETER, "invalid", "input file name to feature matrix", __FILE__, __LINE__ )); GUARD_CU(parameters.Use<int>( "dim", util::REQUIRED_ARGUMENT | util::SINGLE_VALUE | util::REQUIRED_PARAMETER, -1, "feature vector dimension", __FILE__, __LINE__ )); GUARD_CU(parameters.Use<std::string>( "out", util::OPTIONAL_ARGUMENT | util::SINGLE_VALUE | util::REQUIRED_PARAMETER, "out", "output file name", __FILE__, __LINE__ )); return retval; } } } } using namespace gunrock; template <typename SizeT, typename ValueT, typename GraphT> struct graph_sum : module { typedef app::graphsum::Problem<GraphT> ProblemT; typedef app::graphsum::Enactor<ProblemT> EnactorT; GraphT *a; util::Array1D<SizeT, ValueT> b, c, b_grad, c_grad; ProblemT *problem; EnactorT *enactor; int dim; float *fw_time, *bw_time; graph_sum(util::Parameters &p, GraphT &_a, util::Array1D<SizeT, ValueT> &_b, util::Array1D<SizeT, ValueT> &_b_grad, util::Array1D<SizeT, ValueT> &_c, util::Array1D<SizeT, ValueT> &_c_grad, int _dim, float *_fw, float *_bw) : a(&_a), b(_b), c(_c), b_grad(_b_grad), c_grad(_c_grad), dim(_dim), fw_time(_fw), bw_time(_bw) { problem = new ProblemT(p); enactor = new EnactorT(); problem->Init(_a, dim); enactor->Init(*problem); } virtual void forward(bool train) override { timer.Start (); // so here the b array is used to create the new c array // infact the c array is just b into the coef problem->Reset(1, b, c); enactor->Reset(); enactor->Enact(); timer.Stop (); *fw_time += timer.ElapsedMillis (); } virtual void backward() override { timer.Start (); // here the c grad is used to create the b grad array // where does the c grad array come from? problem->Reset(0, c_grad, b_grad); enactor->Reset(); enactor->Enact(); timer.Stop (); *bw_time += timer.ElapsedMillis (); } }; template <typename GraphT, typename ValueT = typename GraphT::ValueT> double gcn_graphsum(gunrock::util::Parameters &parameters, GraphT &graph, const int dim, ValueT *in, ValueT *out) { typedef typename GraphT::VertexT VertexT; typedef gunrock::app::graphsum::Problem<GraphT> ProblemT; typedef gunrock::app::graphsum::Enactor<ProblemT> EnactorT; gunrock::util::CpuTimer cpu_timer; gunrock::util::Location target = gunrock::util::DEVICE; double total_time = 0; if (parameters.UseDefault("quiet")) parameters.Set("quiet", true); // Allocate problem and enactor on GPU, and initialize them ProblemT problem(parameters); EnactorT enactor; problem.Init(graph, dim, in, target); enactor.Init(problem, target); problem.Reset(in); enactor.Reset(); cpu_timer.Start(); enactor.Enact(); cpu_timer.Stop(); total_time += cpu_timer.ElapsedMillis(); problem.Extract(out); enactor.Release(target); problem.Release(target); return total_time; } /* * @brief Simple interface take in graph as CSR format * * @param[in] num_nodes Number of veritces in the input graph * @param[in] num_edges Number of edges in the input graph * @param[in] row_offsets CSR-formatted graph input row offsets * @param[in] col_indices CSR-formatted graph input column indices * @param[in] dim The dimenssion of the feature vector * @param in The input to graphsum layer * @param out The output of graphsum layer * * @tparam VertexT type of vertex id, default to int * * @return double Return accumulated elapsed times for all runs */ template <typename VertexT = int, typename SizeT = int, typename ValueT = double> double graphsum(const SizeT num_nodes, const SizeT num_edges, const SizeT *row_offsets, const VertexT *col_indices, const int dim, ValueT *in, ValueT *out) { typedef typename gunrock::app::TestGraph<VertexT, SizeT, ValueT, gunrock::graph::HAS_CSR> GraphT; typedef typename GraphT::CsrT CsrT; // Setup parameters gunrock::util::Parameters parameters("sparseMatMul"); gunrock::graphio::UseParameters(parameters); gunrock::app::graphsum::UseParameters(parameters); gunrock::app::UseParameters_test(parameters); parameters.Parse_CommandLine(0, NULL); parameters.Set("graph-type", "by-pass"); bool quiet = parameters.Get<bool>("quiet"); GraphT graph; // Assign pointers into gunrock graph format graph.CsrT::Allocate(num_nodes, num_edges, gunrock::util::HOST); graph.CsrT::row_offsets.SetPointer(row_offsets, num_nodes + 1, gunrock::util::HOST); graph.CsrT::column_indices.SetPointer(col_indices, num_edges, gunrock::util::HOST); // graph.FromCsr(graph.csr()); gunrock::graphio::LoadGraph(parameters, graph); // Run the gcn_graphsum double elapsed_time = gcn_graphsum(parameters, graph, in, out); // Cleanup graph.Release(); return elapsed_time; } // Leave this at the end of the file // Local Variables: // mode:c++ // c-file-style: "NVIDIA" // End:
0a72ddc46760a3d148b1a4e7ed4ebfa7b70478fd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_common.h" #include "orc_gpu.h" #include <cudf/column/column_device_view.cuh> #include <cudf/lists/lists_column_view.hpp> #include <cudf/utilities/bit.hpp> #include <io/utilities/block_utils.cuh> #include <io/utilities/config_utils.hpp> #include <io/utilities/time_utils.cuh> #include <hipcub/hipcub.hpp> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <nvcomp/snappy.h> namespace cudf { namespace io { namespace orc { namespace gpu { using cudf::detail::device_2dspan; constexpr int scratch_buffer_size = 512 * 4; // Apache ORC reader does not handle zero-length patch lists for RLEv2 mode2 // Workaround replaces zero-length patch lists by a dummy zero patch constexpr bool zero_pll_war = true; static __device__ __constant__ int64_t kORCTimeToUTC = 1420070400; // Seconds from January 1st, 1970 to January 1st, 2015 struct byterle_enc_state_s { uint32_t literal_run; uint32_t repeat_run; volatile uint32_t rpt_map[(512 / 32) + 1]; }; struct intrle_enc_state_s { uint32_t literal_run; uint32_t delta_run; uint32_t literal_mode; uint32_t literal_w; uint32_t hdr_bytes; uint32_t pl_bytes; volatile uint32_t delta_map[(512 / 32) + 1]; }; struct strdata_enc_state_s { uint32_t char_count; uint32_t lengths_red[(512 / 32)]; const char* str_data[512]; }; struct orcenc_state_s { uint32_t cur_row; // Current row in group uint32_t present_rows; // # of rows in present buffer uint32_t present_out; // # of rows in present buffer that have been flushed uint32_t nrows; // # of rows in current batch uint32_t numvals; // # of non-zero values in current batch (<=nrows) uint32_t numlengths; // # of non-zero values in DATA2 batch uint32_t nnz; // Running count of non-null values encoder_chunk_streams stream; EncChunk chunk; uint32_t strm_pos[CI_NUM_STREAMS]; uint8_t valid_buf[512]; // valid map bits union { byterle_enc_state_s byterle; intrle_enc_state_s intrle; strdata_enc_state_s strenc; StripeDictionary dict_stripe; } u; union { uint8_t u8[scratch_buffer_size]; // gblock_vminscratch buffer uint32_t u32[scratch_buffer_size / 4]; } buf; union { uint8_t u8[2048]; uint32_t u32[1024]; int32_t i32[1024]; uint64_t u64[1024]; int64_t i64[1024]; } vals; union { uint8_t u8[2048]; uint32_t u32[1024]; uint64_t u64[1024]; } lengths; }; static inline __device__ uint32_t zigzag(uint32_t v) { return v; } static inline __device__ uint32_t zigzag(int32_t v) { int32_t s = (v >> 31); return ((v ^ s) * 2) - s; } static inline __device__ uint64_t zigzag(uint64_t v) { return v; } static inline __device__ uint64_t zigzag(int64_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; } static inline __device__ __uint128_t zigzag(__int128_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; } static inline __device__ uint32_t CountLeadingBytes32(uint32_t v) { return __clz(v) >> 3; } static inline __device__ uint32_t CountLeadingBytes64(uint64_t v) { return __clzll(v) >> 3; } /** * @brief Raw data output * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] count number of bytes to encode * @param[in] t thread id */ template <StreamIndexType cid, uint32_t inmask> static __device__ void StoreBytes( orcenc_state_s* s, const uint8_t* inbuf, uint32_t inpos, uint32_t count, int t) { uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; while (count > 0) { uint32_t n = min(count, 512); if (t < n) { dst[t] = inbuf[(inpos + t) & inmask]; } dst += n; inpos += n; count -= n; } __syncthreads(); if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } } /** * @brief ByteRLE encoder * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] numvals max number of values to encode * @param[in] flush encode all remaining values if nonzero * @param[in] t thread id * * @return number of input values encoded */ template <StreamIndexType cid, uint32_t inmask> static __device__ uint32_t ByteRLE( orcenc_state_s* s, const uint8_t* inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t) { uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; uint32_t out_cnt = 0; while (numvals > 0) { uint8_t v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0; uint8_t v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0; uint32_t rpt_map = ballot(t + 1 < numvals && v0 == v1), literal_run, repeat_run, maxvals = min(numvals, 512); if (!(t & 0x1f)) s->u.byterle.rpt_map[t >> 5] = rpt_map; __syncthreads(); if (t == 0) { // Find the start of an identical 3-byte sequence // TBD: The two loops below could be eliminated using more ballot+ffs using warp0 literal_run = 0; repeat_run = 0; while (literal_run < maxvals) { uint32_t next = s->u.byterle.rpt_map[(literal_run >> 5) + 1]; uint32_t mask = rpt_map & __funnelshift_r(rpt_map, next, 1); if (mask) { uint32_t literal_run_ofs = __ffs(mask) - 1; literal_run += literal_run_ofs; repeat_run = __ffs(~((rpt_map >> literal_run_ofs) >> 1)); if (repeat_run + literal_run_ofs == 32) { while (next == ~0) { uint32_t next_idx = ((literal_run + repeat_run) >> 5) + 1; next = (next_idx < 512 / 32) ? s->u.byterle.rpt_map[next_idx] : 0; repeat_run += 32; } repeat_run += __ffs(~next) - 1; } repeat_run = min(repeat_run + 1, maxvals - min(literal_run, maxvals)); if (repeat_run < 3) { literal_run += (flush && literal_run + repeat_run >= numvals) ? repeat_run : 0; repeat_run = 0; } break; } rpt_map = next; literal_run += 32; } if (repeat_run >= 130) { // Limit large runs to multiples of 130 repeat_run = (repeat_run >= 3 * 130) ? 3 * 130 : (repeat_run >= 2 * 130) ? 2 * 130 : 130; } else if (literal_run && literal_run + repeat_run == maxvals) { repeat_run = 0; // Try again at next iteration } s->u.byterle.repeat_run = repeat_run; s->u.byterle.literal_run = min(literal_run, maxvals); } __syncthreads(); literal_run = s->u.byterle.literal_run; if (!flush && literal_run == numvals) { literal_run &= ~0x7f; if (!literal_run) break; } if (literal_run > 0) { uint32_t num_runs = (literal_run + 0x7f) >> 7; if (t < literal_run) { uint32_t run_id = t >> 7; uint32_t run = min(literal_run - run_id * 128, 128); if (!(t & 0x7f)) dst[run_id + t] = 0x100 - run; dst[run_id + t + 1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += num_runs + literal_run; out_cnt += literal_run; numvals -= literal_run; inpos += literal_run; } repeat_run = s->u.byterle.repeat_run; if (repeat_run > 0) { while (repeat_run >= 130) { if (t == literal_run) // repeat_run follows literal_run { dst[0] = 0x7f; dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += 2; out_cnt += 130; numvals -= 130; inpos += 130; repeat_run -= 130; } if (!flush && repeat_run == numvals) { // Wait for more data in case we can continue the run later break; } if (repeat_run >= 3) { if (t == literal_run) // repeat_run follows literal_run { dst[0] = repeat_run - 3; dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += 2; out_cnt += repeat_run; numvals -= repeat_run; inpos += repeat_run; } } } if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } return out_cnt; } /** * @brief Maps the symbol size in bytes to RLEv2 5-bit length code */ static const __device__ __constant__ uint8_t kByteLengthToRLEv2_W[9] = { 0, 7, 15, 23, 27, 28, 29, 30, 31}; /** * @brief Encode a varint value, return the number of bytes written */ static inline __device__ uint32_t StoreVarint(uint8_t* dst, __uint128_t v) { uint32_t bytecnt = 0; for (;;) { auto c = static_cast<uint32_t>(v & 0x7f); v >>= 7u; if (v == 0) { dst[bytecnt++] = c; break; } else { dst[bytecnt++] = c + 0x80; } } return bytecnt; } template <class T> static inline __device__ void StoreBytesBigEndian(uint8_t* dst, T v, uint32_t w) { for (uint32_t i = 0, b = w * 8; i < w; ++i) { b -= 8; dst[i] = static_cast<uint8_t>(v >> b); } } // Combine and store bits for symbol widths less than 8 static inline __device__ void StoreBitsBigEndian( uint8_t* dst, uint32_t v, uint32_t w, int num_vals, int t) { if (t <= (num_vals | 0x1f)) { uint32_t mask; if (w <= 1) { v = (v << 1) | (shuffle_xor(v, 1) & 0x1); v = (v << 2) | (shuffle_xor(v, 2) & 0x3); v = (v << 4) | (shuffle_xor(v, 4) & 0xf); mask = 0x7; } else if (w <= 2) { v = (v << 2) | (shuffle_xor(v, 1) & 0x3); v = (v << 4) | (shuffle_xor(v, 2) & 0xf); mask = 0x3; } else // if (w <= 4) { v = (v << 4) | (shuffle_xor(v, 1) & 0xf); mask = 0x1; } if (t < num_vals && !(t & mask)) { dst[(t * w) >> 3] = static_cast<uint8_t>(v); } } } /** * @brief Integer RLEv2 encoder * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] numvals max number of values to encode * @param[in] flush encode all remaining values if nonzero * @param[in] t thread id * @param[in] temp_storage shared memory storage to perform block reduce * * @return number of input values encoded */ template <StreamIndexType cid, class T, bool is_signed, uint32_t inmask, int block_size, typename Storage> static __device__ uint32_t IntegerRLE( orcenc_state_s* s, const T* inbuf, uint32_t inpos, uint32_t numvals, int t, Storage& temp_storage) { using block_reduce = hipcub::BlockReduce<T, block_size>; uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; uint32_t out_cnt = 0; __shared__ volatile uint64_t block_vmin; while (numvals > 0) { T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0; T v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0; T v2 = (t + 2 < numvals) ? inbuf[(inpos + t + 2) & inmask] : 0; uint32_t delta_map = ballot(t + 2 < numvals && v1 - v0 == v2 - v1), maxvals = min(numvals, 512), literal_run, delta_run; if (!(t & 0x1f)) s->u.intrle.delta_map[t >> 5] = delta_map; __syncthreads(); if (!t) { // Find the start of the next delta run (2 consecutive values with the same delta) literal_run = delta_run = 0; while (literal_run < maxvals) { if (delta_map != 0) { uint32_t literal_run_ofs = __ffs(delta_map) - 1; literal_run += literal_run_ofs; delta_run = __ffs(~((delta_map >> literal_run_ofs) >> 1)); if (literal_run_ofs + delta_run == 32) { for (;;) { uint32_t delta_idx = (literal_run + delta_run) >> 5; delta_map = (delta_idx < 512 / 32) ? s->u.intrle.delta_map[delta_idx] : 0; if (delta_map != ~0) break; delta_run += 32; } delta_run += __ffs(~delta_map) - 1; } delta_run += 2; break; } literal_run += 32; delta_map = s->u.intrle.delta_map[(literal_run >> 5)]; } literal_run = min(literal_run, maxvals); s->u.intrle.literal_run = literal_run; s->u.intrle.delta_run = min(delta_run, maxvals - literal_run); } __syncthreads(); literal_run = s->u.intrle.literal_run; // Find minimum and maximum values if (literal_run > 0) { // Find min & max T vmin = (t < literal_run) ? v0 : std::numeric_limits<T>::max(); T vmax = (t < literal_run) ? v0 : std::numeric_limits<T>::min(); uint32_t literal_mode, literal_w; vmin = block_reduce(temp_storage).Reduce(vmin, hipcub::Min()); __syncthreads(); vmax = block_reduce(temp_storage).Reduce(vmax, hipcub::Max()); if (t == 0) { uint32_t mode1_w, mode2_w; typename std::make_unsigned<T>::type vrange_mode1, vrange_mode2; block_vmin = static_cast<uint64_t>(vmin); if (sizeof(T) > 4) { vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax; vrange_mode2 = vmax - vmin; mode1_w = 8 - min(CountLeadingBytes64(vrange_mode1), 7); mode2_w = 8 - min(CountLeadingBytes64(vrange_mode2), 7); } else { vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax; vrange_mode2 = vmax - vmin; mode1_w = 4 - min(CountLeadingBytes32(vrange_mode1), 3); mode2_w = 4 - min(CountLeadingBytes32(vrange_mode2), 3); } // Decide between mode1 & mode2 (also mode3 for length=2 repeat) if (vrange_mode2 == 0 && mode1_w > 1) { // Should only occur if literal_run==2 (otherwise would have resulted in repeat_run >= // 3) uint32_t bytecnt = 2; dst[0] = 0xC0 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; bytecnt += StoreVarint(dst + 2, vrange_mode1); dst[bytecnt++] = 0; // Zero delta s->u.intrle.literal_mode = 3; s->u.intrle.literal_w = bytecnt; } else { uint32_t range, w; if (mode1_w > mode2_w && (literal_run - 1) * (mode1_w - mode2_w) > 4) { s->u.intrle.literal_mode = 2; w = mode2_w; range = (uint32_t)vrange_mode2; } else { s->u.intrle.literal_mode = 1; w = mode1_w; range = (uint32_t)vrange_mode1; } if (w == 1) w = (range >= 16) ? w << 3 : (range >= 4) ? 4 : (range >= 2) ? 2 : 1; else w <<= 3; // bytes -> bits s->u.intrle.literal_w = w; } } __syncthreads(); vmin = static_cast<T>(block_vmin); literal_mode = s->u.intrle.literal_mode; literal_w = s->u.intrle.literal_w; if (literal_mode == 1) { // Direct mode if (!t) { dst[0] = 0x40 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; } dst += 2; typename std::make_unsigned<T>::type zzv0 = v0; if (t < literal_run) { zzv0 = zigzag(v0); } if (literal_w < 8) { StoreBitsBigEndian(dst, zzv0, literal_w, literal_run, t); } else if (t < literal_run) { StoreBytesBigEndian(dst + t * (literal_w >> 3), zzv0, (literal_w >> 3)); } } else if (literal_mode == 2) { // Patched base mode if (!t) { uint32_t bw, pw = 1, pll, pgw = 1, bv_scale = (is_signed) ? 0 : 1; vmax = (is_signed) ? ((vmin < 0) ? -vmin : vmin) * 2 : vmin; bw = (sizeof(T) > 4) ? (8 - min(CountLeadingBytes64(vmax << bv_scale), 7)) : (4 - min(CountLeadingBytes32(vmax << bv_scale), 3)); if (zero_pll_war) { // Insert a dummy zero patch pll = 1; dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 0] = 0; dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 1] = 0; } else { pll = 0; } dst[0] = 0x80 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; dst[2] = ((bw - 1) << 5) | kByteLengthToRLEv2_W[pw]; dst[3] = ((pgw - 1) << 5) | pll; if (is_signed) { vmax >>= 1; vmax |= vmin & ((T)1 << (bw * 8 - 1)); } StoreBytesBigEndian(dst + 4, vmax, bw); s->u.intrle.hdr_bytes = 4 + bw; s->u.intrle.pl_bytes = (pll * (pw * 8 + pgw) + 7) >> 3; } __syncthreads(); dst += s->u.intrle.hdr_bytes; v0 -= (t < literal_run) ? vmin : 0; if (literal_w < 8) StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t); else if (t < literal_run) StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3)); dst += s->u.intrle.pl_bytes; } else { // Delta mode dst += literal_w; literal_w = 0; } dst += (literal_run * literal_w + 7) >> 3; numvals -= literal_run; inpos += literal_run; out_cnt += literal_run; __syncthreads(); } delta_run = s->u.intrle.delta_run; if (delta_run > 0) { if (t == literal_run) { int64_t delta = (int64_t)v1 - (int64_t)v0; uint64_t delta_base = zigzag(v0); if (delta == 0 && delta_run >= 3 && delta_run <= 10) { // Short repeat uint32_t delta_bw = 8 - min(CountLeadingBytes64(delta_base), 7); dst[0] = ((delta_bw - 1) << 3) + (delta_run - 3); for (uint32_t i = 0, b = delta_bw * 8; i < delta_bw; i++) { b -= 8; dst[1 + i] = static_cast<uint8_t>(delta_base >> b); } s->u.intrle.hdr_bytes = 1 + delta_bw; } else { // Delta uint64_t delta_u = zigzag(delta); uint32_t bytecnt = 2; dst[0] = 0xC0 + ((delta_run - 1) >> 8); dst[1] = (delta_run - 1) & 0xff; bytecnt += StoreVarint(dst + bytecnt, delta_base); bytecnt += StoreVarint(dst + bytecnt, delta_u); s->u.intrle.hdr_bytes = bytecnt; } } __syncthreads(); dst += s->u.intrle.hdr_bytes; numvals -= delta_run; inpos += delta_run; out_cnt += delta_run; } } if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } __syncthreads(); return out_cnt; } /** * @brief Store a group of strings as a single concatenated string * * @param[in] dst destination buffer * @param[in] strenc string encoder state * @param[in] len(t) string length (per thread) * @param[in] t thread id */ static __device__ void StoreStringData(uint8_t* dst, strdata_enc_state_s* strenc, uint32_t len, int t) { // Start with summing up all the lengths uint32_t pos = len; uint32_t wt = t & 0x1f; for (uint32_t n = 1; n < 32; n <<= 1) { uint32_t tmp = shuffle(pos, (wt & ~n) | (n - 1)); pos += (wt & n) ? tmp : 0; } if (wt == 0x1f) { strenc->lengths_red[t >> 5] = pos; } dst += pos - len; __syncthreads(); if (t < 32) { uint32_t wlen = (wt < 16) ? strenc->lengths_red[wt] : 0; uint32_t wpos = wlen; for (uint32_t n = 1; n < 16; n <<= 1) { uint32_t tmp = shuffle(wpos, (wt & ~n) | (n - 1)); wpos += (wt & n) ? tmp : 0; } if (wt < 16) { strenc->lengths_red[wt] = wpos - wlen; } if (wt == 0xf) { strenc->char_count = wpos; // Update stream position } } __syncthreads(); // TBD: Might be more efficient to loop over 4 strings and copy 8 consecutive character at a time // rather than have each thread to a memcpy if (len > 0) { memcpy(dst + strenc->lengths_red[t >> 5], strenc->str_data[t], len); } } /** * @brief In-place conversion from lengths to positions * * @param[in] vals input values * @param[in] numvals number of values * @param[in] t thread id */ template <class T> inline __device__ void lengths_to_positions(volatile T* vals, uint32_t numvals, unsigned int t) { for (uint32_t n = 1; n < numvals; n <<= 1) { __syncthreads(); if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)]; } } template <int block_size, typename Storage> static __device__ void encode_null_mask(orcenc_state_s* s, bitmask_type const* pushdown_mask, Storage& scan_storage, int t) { if (s->stream.ids[CI_PRESENT] < 0) return; auto const column = *s->chunk.column; while (s->present_rows < s->chunk.null_mask_num_rows or s->numvals > 0) { // Number of rows read so far auto present_rows = s->present_rows; // valid_buf capacity is byte per thread in block auto const buf_available_bits = encode_block_size * 8 - s->numvals; // Number of rows for the block to process in this iteration auto const nrows = min(s->chunk.null_mask_num_rows - present_rows, buf_available_bits); // Number of rows for this thread to process in this iteration auto const t_nrows = min(max(static_cast<int32_t>(nrows) - t * 8, 0), 8); auto const row = s->chunk.null_mask_start_row + present_rows + t * 8; auto get_mask_byte = [&](bitmask_type const* mask, size_type offset) -> uint8_t { if (t_nrows == 0) return 0; if (mask == nullptr) return 0xff; auto const begin_offset = row + offset; auto const end_offset = min(begin_offset + 8, offset + column.size()); auto const mask_word = cudf::detail::get_mask_offset_word(mask, 0, begin_offset, end_offset); return mask_word & 0xff; }; uint8_t pd_byte = (1 << t_nrows) - 1; uint32_t pd_set_cnt = t_nrows; uint32_t offset = t_nrows != 0 ? t * 8 : nrows; if (pushdown_mask != nullptr) { pd_byte = get_mask_byte(pushdown_mask, 0) & ((1 << t_nrows) - 1); pd_set_cnt = __popc(pd_byte); // Scan the number of valid bits to get dst offset for each thread hipcub::BlockScan<uint32_t, block_size>(scan_storage).ExclusiveSum(pd_set_cnt, offset); } auto const mask_byte = get_mask_byte(column.null_mask(), column.offset()); auto dst_offset = offset + s->nnz; auto vbuf_bit_idx = [](int row) { // valid_buf is a circular buffer with validity of 8 rows in each element return row % (encode_block_size * 8); }; if (dst_offset % 8 == 0 and pd_set_cnt == 8) { s->valid_buf[vbuf_bit_idx(dst_offset) / 8] = mask_byte; } else { for (auto bit_idx = 0; bit_idx < t_nrows; ++bit_idx) { // skip bits where pushdown mask is not set if (not(pd_byte & (1 << bit_idx))) continue; if (mask_byte & (1 << bit_idx)) { set_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++)); } else { clear_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++)); } } } __syncthreads(); if (t == block_size - 1) { // Number of loaded rows, available for encode s->numvals += offset + pd_set_cnt; // Number of loaded rows (different from present_rows because of pushdown masks) s->nnz += offset + pd_set_cnt; } present_rows += nrows; if (!t) { s->present_rows = present_rows; } __syncthreads(); // RLE encode the present stream if (s->numvals > ((present_rows < s->chunk.null_mask_num_rows) ? 130 * 8 : 0)) { auto const flush = (present_rows < s->chunk.null_mask_num_rows) ? 0 : 7; auto const nbytes_out = (s->numvals + flush) / 8; auto const nrows_encoded = ByteRLE<CI_PRESENT, 0x1ff>(s, s->valid_buf, s->present_out / 8, nbytes_out, flush, t) * 8; if (!t) { // Number of rows encoded so far s->present_out += nrows_encoded; s->numvals -= min(s->numvals, nrows_encoded); } __syncthreads(); } } // reset shared state if (t == 0) { s->nnz = 0; s->numvals = 0; } } /** * @brief Encode column data * * @param[in] chunks encoder chunks device array [column][rowgroup] * @param[in, out] streams chunk streams device array [column][rowgroup] */ // blockDim {`encode_block_size`,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuEncodeOrcColumnData(device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) orcenc_state_s state_g; __shared__ union { typename hipcub::BlockScan<uint32_t, block_size>::TempStorage scan_u32; typename hipcub::BlockReduce<int32_t, block_size>::TempStorage i32; typename hipcub::BlockReduce<int64_t, block_size>::TempStorage i64; typename hipcub::BlockReduce<uint32_t, block_size>::TempStorage u32; typename hipcub::BlockReduce<uint64_t, block_size>::TempStorage u64; } temp_storage; orcenc_state_s* const s = &state_g; uint32_t col_id = blockIdx.x; uint32_t group_id = blockIdx.y; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[col_id][group_id]; s->stream = streams[col_id][group_id]; s->cur_row = 0; s->present_rows = 0; s->present_out = 0; s->numvals = 0; s->numlengths = 0; s->nnz = 0; s->strm_pos[CI_DATA] = 0; s->strm_pos[CI_PRESENT] = 0; s->strm_pos[CI_INDEX] = 0; // Dictionary data is encoded in a separate kernel s->strm_pos[CI_DATA2] = s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DATA2] : 0; s->strm_pos[CI_DICTIONARY] = s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DICTIONARY] : 0; } __syncthreads(); auto const pushdown_mask = [&]() -> cudf::bitmask_type const* { auto const parent_index = s->chunk.column->parent_index; if (!parent_index.has_value()) return nullptr; return chunks[parent_index.value()][0].column->pushdown_mask; }(); encode_null_mask<block_size>(s, pushdown_mask, temp_storage.scan_u32, t); __syncthreads(); auto const column = *s->chunk.column; while (s->cur_row < s->chunk.num_rows || s->numvals + s->numlengths != 0) { // Fetch non-null values auto const length_stream_only = s->chunk.type_kind == LIST or s->chunk.type_kind == MAP; if (not length_stream_only && s->stream.data_ptrs[CI_DATA] == nullptr) { // Pass-through __syncthreads(); if (!t) { s->cur_row = s->chunk.num_rows; s->strm_pos[CI_DATA] = s->chunk.num_rows * s->chunk.dtype_len; } } else if (s->cur_row < s->chunk.num_rows) { uint32_t maxnumvals = (s->chunk.type_kind == BOOLEAN) ? 2048 : 1024; uint32_t nrows = min(min(s->chunk.num_rows - s->cur_row, maxnumvals - max(s->numvals, s->numlengths)), encode_block_size); auto const row = s->chunk.start_row + s->cur_row + t; auto const is_value_valid = [&]() { if (t >= nrows) return false; return bit_value_or(pushdown_mask, column.offset() + row, true) and bit_value_or(column.null_mask(), column.offset() + row, true); }(); s->buf.u32[t] = is_value_valid ? 1u : 0u; // TODO: Could use a faster reduction relying on _popc() for the initial phase lengths_to_positions(s->buf.u32, encode_block_size, t); __syncthreads(); if (is_value_valid) { int nz_idx = (s->nnz + s->buf.u32[t] - 1) & (maxnumvals - 1); switch (s->chunk.type_kind) { case INT: case DATE: case FLOAT: s->vals.u32[nz_idx] = column.element<uint32_t>(row); break; case DOUBLE: case LONG: s->vals.u64[nz_idx] = column.element<uint64_t>(row); break; case SHORT: s->vals.u32[nz_idx] = column.element<uint16_t>(row); break; case BOOLEAN: case BYTE: s->vals.u8[nz_idx] = column.element<uint8_t>(row); break; case TIMESTAMP: { int64_t ts = column.element<int64_t>(row); int32_t ts_scale = powers_of_ten[9 - min(s->chunk.scale, 9)]; int64_t seconds = ts / ts_scale; int64_t nanos = (ts - seconds * ts_scale); // There is a bug in the ORC spec such that for negative timestamps, it is understood // between the writer and reader that nanos will be adjusted to their positive component // but the negative seconds will be left alone. This means that -2.6 is encoded as // seconds = -2 and nanos = 1+(-0.6) = 0.4 // This leads to an error in decoding time where -1 < time (s) < 0 // Details: https://github.com/rapidsai/cudf/pull/5529#issuecomment-648768925 if (nanos < 0) { nanos += ts_scale; } s->vals.i64[nz_idx] = seconds - kORCTimeToUTC; if (nanos != 0) { // Trailing zeroes are encoded in the lower 3-bits uint32_t zeroes = 0; nanos *= powers_of_ten[min(s->chunk.scale, 9)]; if (!(nanos % 100)) { nanos /= 100; zeroes = 1; while (zeroes < 7 && !(nanos % 10)) { nanos /= 10; zeroes++; } } nanos = (nanos << 3) + zeroes; } s->lengths.u64[nz_idx] = nanos; break; } case STRING: if (s->chunk.encoding_kind == DICTIONARY_V2) { uint32_t dict_idx = s->chunk.dict_index[row]; if (dict_idx > 0x7fffffffu) { dict_idx = s->chunk.dict_index[dict_idx & 0x7fffffffu]; } s->vals.u32[nz_idx] = dict_idx; } else { string_view value = column.element<string_view>(row); s->u.strenc.str_data[s->buf.u32[t] - 1] = value.data(); s->lengths.u32[nz_idx] = value.size_bytes(); } break; // Reusing the lengths array for the scale stream // Note: can be written in a faster manner, given that all values are equal case DECIMAL: s->lengths.u32[nz_idx] = zigzag(s->chunk.scale); break; case LIST: case MAP: { auto const& offsets = column.child(lists_column_view::offsets_column_index); // Compute list length from the offsets s->lengths.u32[nz_idx] = offsets.element<size_type>(row + 1 + column.offset()) - offsets.element<size_type>(row + column.offset()); } break; default: break; } } __syncthreads(); if (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2) { // Store string data uint32_t nz = s->buf.u32[511]; uint32_t nz_idx = (s->nnz + t) & 0x3ff; uint32_t len = (t < nz && s->u.strenc.str_data[t]) ? s->lengths.u32[nz_idx] : 0; StoreStringData(s->stream.data_ptrs[CI_DATA] + s->strm_pos[CI_DATA], &s->u.strenc, len, t); if (!t) { s->strm_pos[CI_DATA] += s->u.strenc.char_count; } __syncthreads(); } else if (s->chunk.type_kind == BOOLEAN) { // bool8 -> 8x bool1 uint32_t nz = s->buf.u32[511]; uint8_t n = ((s->nnz + nz) - (s->nnz & ~7) + 7) >> 3; if (t < n) { uint32_t idx8 = (s->nnz & ~7) + (t << 3); s->lengths.u8[((s->nnz >> 3) + t) & 0x1ff] = ((s->vals.u8[(idx8 + 0) & 0x7ff] & 1) << 7) | ((s->vals.u8[(idx8 + 1) & 0x7ff] & 1) << 6) | ((s->vals.u8[(idx8 + 2) & 0x7ff] & 1) << 5) | ((s->vals.u8[(idx8 + 3) & 0x7ff] & 1) << 4) | ((s->vals.u8[(idx8 + 4) & 0x7ff] & 1) << 3) | ((s->vals.u8[(idx8 + 5) & 0x7ff] & 1) << 2) | ((s->vals.u8[(idx8 + 6) & 0x7ff] & 1) << 1) | ((s->vals.u8[(idx8 + 7) & 0x7ff] & 1) << 0); } __syncthreads(); } if (!t) { uint32_t nz = s->buf.u32[511]; s->nnz += nz; s->numvals += nz; s->numlengths += (s->chunk.type_kind == TIMESTAMP || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == LIST || s->chunk.type_kind == MAP || (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2)) ? nz : 0; s->cur_row += nrows; } __syncthreads(); // Encode values if (s->numvals > 0) { uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 7 : 0, n; switch (s->chunk.type_kind) { case SHORT: case INT: case DATE: n = IntegerRLE<CI_DATA, int32_t, true, 0x3ff, block_size>( s, s->vals.i32, s->nnz - s->numvals, s->numvals, t, temp_storage.i32); break; case LONG: case TIMESTAMP: n = IntegerRLE<CI_DATA, int64_t, true, 0x3ff, block_size>( s, s->vals.i64, s->nnz - s->numvals, s->numvals, t, temp_storage.i64); break; case BYTE: n = ByteRLE<CI_DATA, 0x3ff>(s, s->vals.u8, s->nnz - s->numvals, s->numvals, flush, t); break; case BOOLEAN: n = ByteRLE<CI_DATA, 0x1ff>(s, s->lengths.u8, (s->nnz - s->numvals + flush) >> 3, (s->numvals + flush) >> 3, flush, t) * 8; break; case FLOAT: StoreBytes<CI_DATA, 0xfff>(s, s->vals.u8, (s->nnz - s->numvals) * 4, s->numvals * 4, t); n = s->numvals; break; case DOUBLE: StoreBytes<CI_DATA, 0x1fff>( s, s->vals.u8, (s->nnz - s->numvals) * 8, s->numvals * 8, t); n = s->numvals; break; case STRING: if (s->chunk.encoding_kind == DICTIONARY_V2) { n = IntegerRLE<CI_DATA, uint32_t, false, 0x3ff, block_size>( s, s->vals.u32, s->nnz - s->numvals, s->numvals, t, temp_storage.u32); } else { n = s->numvals; } break; case DECIMAL: { if (is_value_valid) { auto const id = column.type().id(); __uint128_t const zz_val = id == type_id::DECIMAL32 ? zigzag(column.element<int32_t>(row)) : id == type_id::DECIMAL64 ? zigzag(column.element<int64_t>(row)) : zigzag(column.element<__int128_t>(row)); auto const offset = (row == s->chunk.start_row) ? 0 : s->chunk.decimal_offsets[row - 1]; StoreVarint(s->stream.data_ptrs[CI_DATA] + offset, zz_val); } n = s->numvals; } break; default: n = s->numvals; break; } __syncthreads(); if (!t) { s->numvals -= min(n, s->numvals); } } // Encode secondary stream values if (s->numlengths > 0) { uint32_t n; switch (s->chunk.type_kind) { case TIMESTAMP: n = IntegerRLE<CI_DATA2, uint64_t, false, 0x3ff, block_size>( s, s->lengths.u64, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u64); break; case DECIMAL: case LIST: case MAP: case STRING: n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>( s, s->lengths.u32, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u32); break; default: n = s->numlengths; break; } __syncthreads(); if (!t) { s->numlengths -= min(n, s->numlengths); } } } __syncthreads(); } __syncthreads(); if (t <= CI_PRESENT && s->stream.ids[t] >= 0) { // Update actual compressed length // (not needed for decimal data, whose exact size is known before encode) if (!(t == CI_DATA && s->chunk.type_kind == DECIMAL)) streams[col_id][group_id].lengths[t] = s->strm_pos[t]; if (!s->stream.data_ptrs[t]) { streams[col_id][group_id].data_ptrs[t] = static_cast<uint8_t*>(const_cast<void*>(column.head())) + (column.offset() + s->chunk.start_row) * s->chunk.dtype_len; } } } /** * @brief Encode column dictionaries * * @param[in] stripes Stripe dictionaries device array [stripe][string_column] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[in] num_columns Number of columns */ // blockDim {512,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuEncodeStringDictionaries(StripeDictionary const* stripes, device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) orcenc_state_s state_g; __shared__ typename hipcub::BlockReduce<uint32_t, block_size>::TempStorage temp_storage; orcenc_state_s* const s = &state_g; uint32_t stripe_id = blockIdx.x; uint32_t cid = (blockIdx.y) ? CI_DICTIONARY : CI_DATA2; int t = threadIdx.x; if (t == 0) s->u.dict_stripe = stripes[stripe_id]; __syncthreads(); auto const strm_ptr = &streams[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk]; if (t == 0) { s->chunk = chunks[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk]; s->stream = *strm_ptr; s->strm_pos[cid] = 0; s->numlengths = 0; s->nrows = s->u.dict_stripe.num_strings; s->cur_row = 0; } auto const string_column = s->u.dict_stripe.leaf_column; auto const dict_data = s->u.dict_stripe.dict_data; __syncthreads(); if (s->chunk.encoding_kind != DICTIONARY_V2) { return; // This column isn't using dictionary encoding -> bail out } while (s->cur_row < s->nrows || s->numlengths != 0) { uint32_t numvals = min(s->nrows - s->cur_row, min(1024 - s->numlengths, 512)); uint32_t string_idx = (t < numvals) ? dict_data[s->cur_row + t] : 0; if (cid == CI_DICTIONARY) { // Encoding string contents const char* ptr = 0; uint32_t count = 0; if (t < numvals) { auto string_val = string_column->element<string_view>(string_idx); ptr = string_val.data(); count = string_val.size_bytes(); } s->u.strenc.str_data[t] = ptr; StoreStringData(s->stream.data_ptrs[CI_DICTIONARY] + s->strm_pos[CI_DICTIONARY], &s->u.strenc, (ptr) ? count : 0, t); if (!t) { s->strm_pos[CI_DICTIONARY] += s->u.strenc.char_count; } } else { // Encoding string lengths uint32_t count = (t < numvals) ? static_cast<uint32_t>(string_column->element<string_view>(string_idx).size_bytes()) : 0; uint32_t nz_idx = (s->cur_row + t) & 0x3ff; if (t < numvals) s->lengths.u32[nz_idx] = count; __syncthreads(); if (s->numlengths + numvals > 0) { uint32_t n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>( s, s->lengths.u32, s->cur_row, s->numlengths + numvals, t, temp_storage); __syncthreads(); if (!t) { s->numlengths += numvals; s->numlengths -= min(n, s->numlengths); } } } if (t == 0) { s->cur_row += numvals; } __syncthreads(); } if (t == 0) { strm_ptr->lengths[cid] = s->strm_pos[cid]; } } /** * @brief Merge chunked column data into a single contiguous stream * * @param[in,out] strm_desc StripeStream device array [stripe][stream] * @param[in,out] streams List of encoder chunk streams [column][rowgroup] */ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024) gpuCompactOrcDataStreams(device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) StripeStream ss; __shared__ __align__(16) encoder_chunk_streams strm0; __shared__ uint8_t* volatile ck_curptr_g; __shared__ uint32_t volatile ck_curlen_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; if (t == 0) { ss = strm_desc[stripe_id][stream_id]; strm0 = streams[ss.column_id][ss.first_chunk_id]; } __syncthreads(); auto const cid = ss.stream_type; auto dst_ptr = strm0.data_ptrs[cid] + strm0.lengths[cid]; for (auto group = ss.first_chunk_id + 1; group < ss.first_chunk_id + ss.num_chunks; ++group) { uint8_t* src_ptr; uint32_t len; if (t == 0) { src_ptr = streams[ss.column_id][group].data_ptrs[cid]; len = streams[ss.column_id][group].lengths[cid]; if (src_ptr != dst_ptr) { streams[ss.column_id][group].data_ptrs[cid] = dst_ptr; } ck_curptr_g = src_ptr; ck_curlen_g = len; } __syncthreads(); src_ptr = ck_curptr_g; len = ck_curlen_g; if (len > 0 && src_ptr != dst_ptr) { for (uint32_t i = 0; i < len; i += 1024) { uint8_t v = (i + t < len) ? src_ptr[i + t] : 0; __syncthreads(); if (i + t < len) { dst_ptr[i + t] = v; } } } dst_ptr += len; __syncthreads(); } if (!t) { strm_desc[stripe_id][stream_id].stream_size = dst_ptr - strm0.data_ptrs[cid]; } } /** * @brief Initializes compression input/output structures * * @param[in] strm_desc StripeStream device array [stripe][stream] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[out] comp_in Per-block compression input parameters * @param[out] comp_out Per-block compression status * @param[in] compressed_bfr Compression output buffer * @param[in] comp_blk_size Compression block size * @param[in] max_comp_blk_size Max size of any block after compression */ // blockDim {256,1,1} __global__ void __launch_bounds__(256) gpuInitCompressionBlocks(device_2dspan<StripeStream const> strm_desc, device_2dspan<encoder_chunk_streams> streams, // const? device_span<gpu_inflate_input_s> comp_in, device_span<gpu_inflate_status_s> comp_out, uint8_t* compressed_bfr, uint32_t comp_blk_size, uint32_t max_comp_blk_size) { __shared__ __align__(16) StripeStream ss; __shared__ uint8_t* volatile uncomp_base_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; uint32_t num_blocks; uint8_t *src, *dst; if (t == 0) { ss = strm_desc[stripe_id][stream_id]; uncomp_base_g = streams[ss.column_id][ss.first_chunk_id].data_ptrs[ss.stream_type]; } __syncthreads(); src = uncomp_base_g; dst = compressed_bfr + ss.bfr_offset; num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 1; for (uint32_t b = t; b < num_blocks; b += 256) { gpu_inflate_input_s* blk_in = &comp_in[ss.first_block + b]; gpu_inflate_status_s* blk_out = &comp_out[ss.first_block + b]; uint32_t blk_size = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size)); blk_in->srcDevice = src + b * comp_blk_size; blk_in->srcSize = blk_size; blk_in->dstDevice = dst + b * (BLOCK_HEADER_SIZE + max_comp_blk_size) + BLOCK_HEADER_SIZE; blk_in->dstSize = max_comp_blk_size; blk_out->bytes_written = blk_size; blk_out->status = 1; blk_out->reserved = 0; } } /** * @brief Compacts compressed blocks in a single contiguous stream, and update 3-byte block length *fields * * @param[in,out] strm_desc StripeStream device array [stripe][stream] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[in] comp_in Per-block compression input parameters * @param[in] comp_out Per-block compression status * @param[in] compressed_bfr Compression output buffer * @param[in] comp_blk_size Compression block size * @param[in] max_comp_blk_size Max size of any block after compression */ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024) gpuCompactCompressedBlocks(device_2dspan<StripeStream> strm_desc, device_span<gpu_inflate_input_s> comp_in, device_span<gpu_inflate_status_s> comp_out, uint8_t* compressed_bfr, uint32_t comp_blk_size, uint32_t max_comp_blk_size) { __shared__ __align__(16) StripeStream ss; __shared__ const uint8_t* volatile comp_src_g; __shared__ uint32_t volatile comp_len_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; uint32_t num_blocks, b, blk_size; const uint8_t* src; uint8_t* dst; if (t == 0) ss = strm_desc[stripe_id][stream_id]; __syncthreads(); num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 0; dst = compressed_bfr + ss.bfr_offset; b = 0; do { if (t == 0) { gpu_inflate_input_s* blk_in = &comp_in[ss.first_block + b]; gpu_inflate_status_s* blk_out = &comp_out[ss.first_block + b]; uint32_t src_len = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size)); uint32_t dst_len = (blk_out->status == 0) ? blk_out->bytes_written : src_len; uint32_t blk_size24; if (dst_len >= src_len) { // Copy from uncompressed source src = static_cast<const uint8_t*>(blk_in->srcDevice); blk_out->bytes_written = src_len; dst_len = src_len; blk_size24 = dst_len * 2 + 1; } else { // Compressed block src = static_cast<const uint8_t*>(blk_in->dstDevice); blk_size24 = dst_len * 2 + 0; } dst[0] = static_cast<uint8_t>(blk_size24 >> 0); dst[1] = static_cast<uint8_t>(blk_size24 >> 8); dst[2] = static_cast<uint8_t>(blk_size24 >> 16); comp_src_g = src; comp_len_g = dst_len; } __syncthreads(); src = comp_src_g; blk_size = comp_len_g; dst += 3; // skip over length written by thread0 if (src != dst) { for (uint32_t i = 0; i < blk_size; i += 1024) { uint8_t v = (i + t < blk_size) ? src[i + t] : 0; __syncthreads(); if (i + t < blk_size) { dst[i + t] = v; } } } dst += blk_size; __syncthreads(); } while (++b < num_blocks); // Update stripe stream with the compressed size if (t == 0) { strm_desc[stripe_id][stream_id].stream_size = static_cast<uint32_t>(dst - (compressed_bfr + ss.bfr_offset)); } } void EncodeOrcColumnData(device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams, rmm::cuda_stream_view stream) { dim3 dim_block(encode_block_size, 1); // `encode_block_size` threads per chunk dim3 dim_grid(chunks.size().first, chunks.size().second); hipLaunchKernelGGL(( gpuEncodeOrcColumnData<encode_block_size>) , dim3(dim_grid), dim3(dim_block), 0, stream.value(), chunks, streams); } void EncodeStripeDictionaries(StripeDictionary const* stripes, device_2dspan<EncChunk const> chunks, uint32_t num_string_columns, uint32_t num_stripes, device_2dspan<encoder_chunk_streams> enc_streams, rmm::cuda_stream_view stream) { dim3 dim_block(512, 1); // 512 threads per dictionary dim3 dim_grid(num_string_columns * num_stripes, 2); hipLaunchKernelGGL(( gpuEncodeStringDictionaries<512>) , dim3(dim_grid), dim3(dim_block), 0, stream.value(), stripes, chunks, enc_streams); } void CompactOrcDataStreams(device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> enc_streams, rmm::cuda_stream_view stream) { dim3 dim_block(1024, 1); dim3 dim_grid(strm_desc.size().first, strm_desc.size().second); hipLaunchKernelGGL(( gpuCompactOrcDataStreams), dim3(dim_grid), dim3(dim_block), 0, stream.value(), strm_desc, enc_streams); } void CompressOrcDataStreams(uint8_t* compressed_data, uint32_t num_compressed_blocks, CompressionKind compression, uint32_t comp_blk_size, uint32_t max_comp_blk_size, device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> enc_streams, device_span<gpu_inflate_input_s> comp_in, device_span<gpu_inflate_status_s> comp_out, rmm::cuda_stream_view stream) { dim3 dim_block_init(256, 1); dim3 dim_grid(strm_desc.size().first, strm_desc.size().second); hipLaunchKernelGGL(( gpuInitCompressionBlocks), dim3(dim_grid), dim3(dim_block_init), 0, stream.value(), strm_desc, enc_streams, comp_in, comp_out, compressed_data, comp_blk_size, max_comp_blk_size); if (compression == SNAPPY) { if (detail::nvcomp_integration::is_stable_enabled()) { try { size_t temp_size; nvcompStatus_t nvcomp_status = nvcompBatchedSnappyCompressGetTempSize( num_compressed_blocks, comp_blk_size, nvcompBatchedSnappyDefaultOpts, &temp_size); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in getting snappy compression scratch size"); rmm::device_buffer scratch(temp_size, stream); rmm::device_uvector<void const*> uncompressed_data_ptrs(num_compressed_blocks, stream); rmm::device_uvector<size_t> uncompressed_data_sizes(num_compressed_blocks, stream); rmm::device_uvector<void*> compressed_data_ptrs(num_compressed_blocks, stream); rmm::device_uvector<size_t> compressed_bytes_written(num_compressed_blocks, stream); auto comp_it = thrust::make_zip_iterator(uncompressed_data_ptrs.begin(), uncompressed_data_sizes.begin(), compressed_data_ptrs.begin()); thrust::transform(rmm::exec_policy(stream), comp_in.begin(), comp_in.end(), comp_it, [] __device__(gpu_inflate_input_s in) { return thrust::make_tuple(in.srcDevice, in.srcSize, in.dstDevice); }); nvcomp_status = nvcompBatchedSnappyCompressAsync(uncompressed_data_ptrs.data(), uncompressed_data_sizes.data(), max_comp_blk_size, num_compressed_blocks, scratch.data(), scratch.size(), compressed_data_ptrs.data(), compressed_bytes_written.data(), nvcompBatchedSnappyDefaultOpts, stream.value()); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in snappy compression"); thrust::transform(rmm::exec_policy(stream), compressed_bytes_written.begin(), compressed_bytes_written.end(), comp_out.begin(), [] __device__(size_t size) { gpu_inflate_status_s status{}; status.bytes_written = size; return status; }); } catch (...) { // If we reach this then there was an error in compressing so set an error status for each // block thrust::for_each(rmm::exec_policy(stream), comp_out.begin(), comp_out.end(), [] __device__(gpu_inflate_status_s & stat) { stat.status = 1; }); }; } else { gpu_snap(comp_in.data(), comp_out.data(), num_compressed_blocks, stream); } } dim3 dim_block_compact(1024, 1); hipLaunchKernelGGL(( gpuCompactCompressedBlocks), dim3(dim_grid), dim3(dim_block_compact), 0, stream.value(), strm_desc, comp_in, comp_out, compressed_data, comp_blk_size, max_comp_blk_size); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
0a72ddc46760a3d148b1a4e7ed4ebfa7b70478fd.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "orc_common.h" #include "orc_gpu.h" #include <cudf/column/column_device_view.cuh> #include <cudf/lists/lists_column_view.hpp> #include <cudf/utilities/bit.hpp> #include <io/utilities/block_utils.cuh> #include <io/utilities/config_utils.hpp> #include <io/utilities/time_utils.cuh> #include <cub/cub.cuh> #include <rmm/cuda_stream_view.hpp> #include <rmm/exec_policy.hpp> #include <nvcomp/snappy.h> namespace cudf { namespace io { namespace orc { namespace gpu { using cudf::detail::device_2dspan; constexpr int scratch_buffer_size = 512 * 4; // Apache ORC reader does not handle zero-length patch lists for RLEv2 mode2 // Workaround replaces zero-length patch lists by a dummy zero patch constexpr bool zero_pll_war = true; static __device__ __constant__ int64_t kORCTimeToUTC = 1420070400; // Seconds from January 1st, 1970 to January 1st, 2015 struct byterle_enc_state_s { uint32_t literal_run; uint32_t repeat_run; volatile uint32_t rpt_map[(512 / 32) + 1]; }; struct intrle_enc_state_s { uint32_t literal_run; uint32_t delta_run; uint32_t literal_mode; uint32_t literal_w; uint32_t hdr_bytes; uint32_t pl_bytes; volatile uint32_t delta_map[(512 / 32) + 1]; }; struct strdata_enc_state_s { uint32_t char_count; uint32_t lengths_red[(512 / 32)]; const char* str_data[512]; }; struct orcenc_state_s { uint32_t cur_row; // Current row in group uint32_t present_rows; // # of rows in present buffer uint32_t present_out; // # of rows in present buffer that have been flushed uint32_t nrows; // # of rows in current batch uint32_t numvals; // # of non-zero values in current batch (<=nrows) uint32_t numlengths; // # of non-zero values in DATA2 batch uint32_t nnz; // Running count of non-null values encoder_chunk_streams stream; EncChunk chunk; uint32_t strm_pos[CI_NUM_STREAMS]; uint8_t valid_buf[512]; // valid map bits union { byterle_enc_state_s byterle; intrle_enc_state_s intrle; strdata_enc_state_s strenc; StripeDictionary dict_stripe; } u; union { uint8_t u8[scratch_buffer_size]; // gblock_vminscratch buffer uint32_t u32[scratch_buffer_size / 4]; } buf; union { uint8_t u8[2048]; uint32_t u32[1024]; int32_t i32[1024]; uint64_t u64[1024]; int64_t i64[1024]; } vals; union { uint8_t u8[2048]; uint32_t u32[1024]; uint64_t u64[1024]; } lengths; }; static inline __device__ uint32_t zigzag(uint32_t v) { return v; } static inline __device__ uint32_t zigzag(int32_t v) { int32_t s = (v >> 31); return ((v ^ s) * 2) - s; } static inline __device__ uint64_t zigzag(uint64_t v) { return v; } static inline __device__ uint64_t zigzag(int64_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; } static inline __device__ __uint128_t zigzag(__int128_t v) { int64_t s = (v < 0) ? 1 : 0; return ((v ^ -s) * 2) + s; } static inline __device__ uint32_t CountLeadingBytes32(uint32_t v) { return __clz(v) >> 3; } static inline __device__ uint32_t CountLeadingBytes64(uint64_t v) { return __clzll(v) >> 3; } /** * @brief Raw data output * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] count number of bytes to encode * @param[in] t thread id */ template <StreamIndexType cid, uint32_t inmask> static __device__ void StoreBytes( orcenc_state_s* s, const uint8_t* inbuf, uint32_t inpos, uint32_t count, int t) { uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; while (count > 0) { uint32_t n = min(count, 512); if (t < n) { dst[t] = inbuf[(inpos + t) & inmask]; } dst += n; inpos += n; count -= n; } __syncthreads(); if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } } /** * @brief ByteRLE encoder * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] numvals max number of values to encode * @param[in] flush encode all remaining values if nonzero * @param[in] t thread id * * @return number of input values encoded */ template <StreamIndexType cid, uint32_t inmask> static __device__ uint32_t ByteRLE( orcenc_state_s* s, const uint8_t* inbuf, uint32_t inpos, uint32_t numvals, uint32_t flush, int t) { uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; uint32_t out_cnt = 0; while (numvals > 0) { uint8_t v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0; uint8_t v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0; uint32_t rpt_map = ballot(t + 1 < numvals && v0 == v1), literal_run, repeat_run, maxvals = min(numvals, 512); if (!(t & 0x1f)) s->u.byterle.rpt_map[t >> 5] = rpt_map; __syncthreads(); if (t == 0) { // Find the start of an identical 3-byte sequence // TBD: The two loops below could be eliminated using more ballot+ffs using warp0 literal_run = 0; repeat_run = 0; while (literal_run < maxvals) { uint32_t next = s->u.byterle.rpt_map[(literal_run >> 5) + 1]; uint32_t mask = rpt_map & __funnelshift_r(rpt_map, next, 1); if (mask) { uint32_t literal_run_ofs = __ffs(mask) - 1; literal_run += literal_run_ofs; repeat_run = __ffs(~((rpt_map >> literal_run_ofs) >> 1)); if (repeat_run + literal_run_ofs == 32) { while (next == ~0) { uint32_t next_idx = ((literal_run + repeat_run) >> 5) + 1; next = (next_idx < 512 / 32) ? s->u.byterle.rpt_map[next_idx] : 0; repeat_run += 32; } repeat_run += __ffs(~next) - 1; } repeat_run = min(repeat_run + 1, maxvals - min(literal_run, maxvals)); if (repeat_run < 3) { literal_run += (flush && literal_run + repeat_run >= numvals) ? repeat_run : 0; repeat_run = 0; } break; } rpt_map = next; literal_run += 32; } if (repeat_run >= 130) { // Limit large runs to multiples of 130 repeat_run = (repeat_run >= 3 * 130) ? 3 * 130 : (repeat_run >= 2 * 130) ? 2 * 130 : 130; } else if (literal_run && literal_run + repeat_run == maxvals) { repeat_run = 0; // Try again at next iteration } s->u.byterle.repeat_run = repeat_run; s->u.byterle.literal_run = min(literal_run, maxvals); } __syncthreads(); literal_run = s->u.byterle.literal_run; if (!flush && literal_run == numvals) { literal_run &= ~0x7f; if (!literal_run) break; } if (literal_run > 0) { uint32_t num_runs = (literal_run + 0x7f) >> 7; if (t < literal_run) { uint32_t run_id = t >> 7; uint32_t run = min(literal_run - run_id * 128, 128); if (!(t & 0x7f)) dst[run_id + t] = 0x100 - run; dst[run_id + t + 1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += num_runs + literal_run; out_cnt += literal_run; numvals -= literal_run; inpos += literal_run; } repeat_run = s->u.byterle.repeat_run; if (repeat_run > 0) { while (repeat_run >= 130) { if (t == literal_run) // repeat_run follows literal_run { dst[0] = 0x7f; dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += 2; out_cnt += 130; numvals -= 130; inpos += 130; repeat_run -= 130; } if (!flush && repeat_run == numvals) { // Wait for more data in case we can continue the run later break; } if (repeat_run >= 3) { if (t == literal_run) // repeat_run follows literal_run { dst[0] = repeat_run - 3; dst[1] = (cid == CI_PRESENT) ? __brev(v0) >> 24 : v0; } dst += 2; out_cnt += repeat_run; numvals -= repeat_run; inpos += repeat_run; } } } if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } return out_cnt; } /** * @brief Maps the symbol size in bytes to RLEv2 5-bit length code */ static const __device__ __constant__ uint8_t kByteLengthToRLEv2_W[9] = { 0, 7, 15, 23, 27, 28, 29, 30, 31}; /** * @brief Encode a varint value, return the number of bytes written */ static inline __device__ uint32_t StoreVarint(uint8_t* dst, __uint128_t v) { uint32_t bytecnt = 0; for (;;) { auto c = static_cast<uint32_t>(v & 0x7f); v >>= 7u; if (v == 0) { dst[bytecnt++] = c; break; } else { dst[bytecnt++] = c + 0x80; } } return bytecnt; } template <class T> static inline __device__ void StoreBytesBigEndian(uint8_t* dst, T v, uint32_t w) { for (uint32_t i = 0, b = w * 8; i < w; ++i) { b -= 8; dst[i] = static_cast<uint8_t>(v >> b); } } // Combine and store bits for symbol widths less than 8 static inline __device__ void StoreBitsBigEndian( uint8_t* dst, uint32_t v, uint32_t w, int num_vals, int t) { if (t <= (num_vals | 0x1f)) { uint32_t mask; if (w <= 1) { v = (v << 1) | (shuffle_xor(v, 1) & 0x1); v = (v << 2) | (shuffle_xor(v, 2) & 0x3); v = (v << 4) | (shuffle_xor(v, 4) & 0xf); mask = 0x7; } else if (w <= 2) { v = (v << 2) | (shuffle_xor(v, 1) & 0x3); v = (v << 4) | (shuffle_xor(v, 2) & 0xf); mask = 0x3; } else // if (w <= 4) { v = (v << 4) | (shuffle_xor(v, 1) & 0xf); mask = 0x1; } if (t < num_vals && !(t & mask)) { dst[(t * w) >> 3] = static_cast<uint8_t>(v); } } } /** * @brief Integer RLEv2 encoder * * @tparam cid stream type (strm_pos[cid] will be updated and output stored at * streams[cid]+strm_pos[cid]) * @tparam inmask input buffer position mask for circular buffers * @param[in] s encoder state * @param[in] inbuf base input buffer * @param[in] inpos position in input buffer * @param[in] numvals max number of values to encode * @param[in] flush encode all remaining values if nonzero * @param[in] t thread id * @param[in] temp_storage shared memory storage to perform block reduce * * @return number of input values encoded */ template <StreamIndexType cid, class T, bool is_signed, uint32_t inmask, int block_size, typename Storage> static __device__ uint32_t IntegerRLE( orcenc_state_s* s, const T* inbuf, uint32_t inpos, uint32_t numvals, int t, Storage& temp_storage) { using block_reduce = cub::BlockReduce<T, block_size>; uint8_t* dst = s->stream.data_ptrs[cid] + s->strm_pos[cid]; uint32_t out_cnt = 0; __shared__ volatile uint64_t block_vmin; while (numvals > 0) { T v0 = (t < numvals) ? inbuf[(inpos + t) & inmask] : 0; T v1 = (t + 1 < numvals) ? inbuf[(inpos + t + 1) & inmask] : 0; T v2 = (t + 2 < numvals) ? inbuf[(inpos + t + 2) & inmask] : 0; uint32_t delta_map = ballot(t + 2 < numvals && v1 - v0 == v2 - v1), maxvals = min(numvals, 512), literal_run, delta_run; if (!(t & 0x1f)) s->u.intrle.delta_map[t >> 5] = delta_map; __syncthreads(); if (!t) { // Find the start of the next delta run (2 consecutive values with the same delta) literal_run = delta_run = 0; while (literal_run < maxvals) { if (delta_map != 0) { uint32_t literal_run_ofs = __ffs(delta_map) - 1; literal_run += literal_run_ofs; delta_run = __ffs(~((delta_map >> literal_run_ofs) >> 1)); if (literal_run_ofs + delta_run == 32) { for (;;) { uint32_t delta_idx = (literal_run + delta_run) >> 5; delta_map = (delta_idx < 512 / 32) ? s->u.intrle.delta_map[delta_idx] : 0; if (delta_map != ~0) break; delta_run += 32; } delta_run += __ffs(~delta_map) - 1; } delta_run += 2; break; } literal_run += 32; delta_map = s->u.intrle.delta_map[(literal_run >> 5)]; } literal_run = min(literal_run, maxvals); s->u.intrle.literal_run = literal_run; s->u.intrle.delta_run = min(delta_run, maxvals - literal_run); } __syncthreads(); literal_run = s->u.intrle.literal_run; // Find minimum and maximum values if (literal_run > 0) { // Find min & max T vmin = (t < literal_run) ? v0 : std::numeric_limits<T>::max(); T vmax = (t < literal_run) ? v0 : std::numeric_limits<T>::min(); uint32_t literal_mode, literal_w; vmin = block_reduce(temp_storage).Reduce(vmin, cub::Min()); __syncthreads(); vmax = block_reduce(temp_storage).Reduce(vmax, cub::Max()); if (t == 0) { uint32_t mode1_w, mode2_w; typename std::make_unsigned<T>::type vrange_mode1, vrange_mode2; block_vmin = static_cast<uint64_t>(vmin); if (sizeof(T) > 4) { vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax; vrange_mode2 = vmax - vmin; mode1_w = 8 - min(CountLeadingBytes64(vrange_mode1), 7); mode2_w = 8 - min(CountLeadingBytes64(vrange_mode2), 7); } else { vrange_mode1 = (is_signed) ? max(zigzag(vmin), zigzag(vmax)) : vmax; vrange_mode2 = vmax - vmin; mode1_w = 4 - min(CountLeadingBytes32(vrange_mode1), 3); mode2_w = 4 - min(CountLeadingBytes32(vrange_mode2), 3); } // Decide between mode1 & mode2 (also mode3 for length=2 repeat) if (vrange_mode2 == 0 && mode1_w > 1) { // Should only occur if literal_run==2 (otherwise would have resulted in repeat_run >= // 3) uint32_t bytecnt = 2; dst[0] = 0xC0 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; bytecnt += StoreVarint(dst + 2, vrange_mode1); dst[bytecnt++] = 0; // Zero delta s->u.intrle.literal_mode = 3; s->u.intrle.literal_w = bytecnt; } else { uint32_t range, w; if (mode1_w > mode2_w && (literal_run - 1) * (mode1_w - mode2_w) > 4) { s->u.intrle.literal_mode = 2; w = mode2_w; range = (uint32_t)vrange_mode2; } else { s->u.intrle.literal_mode = 1; w = mode1_w; range = (uint32_t)vrange_mode1; } if (w == 1) w = (range >= 16) ? w << 3 : (range >= 4) ? 4 : (range >= 2) ? 2 : 1; else w <<= 3; // bytes -> bits s->u.intrle.literal_w = w; } } __syncthreads(); vmin = static_cast<T>(block_vmin); literal_mode = s->u.intrle.literal_mode; literal_w = s->u.intrle.literal_w; if (literal_mode == 1) { // Direct mode if (!t) { dst[0] = 0x40 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; } dst += 2; typename std::make_unsigned<T>::type zzv0 = v0; if (t < literal_run) { zzv0 = zigzag(v0); } if (literal_w < 8) { StoreBitsBigEndian(dst, zzv0, literal_w, literal_run, t); } else if (t < literal_run) { StoreBytesBigEndian(dst + t * (literal_w >> 3), zzv0, (literal_w >> 3)); } } else if (literal_mode == 2) { // Patched base mode if (!t) { uint32_t bw, pw = 1, pll, pgw = 1, bv_scale = (is_signed) ? 0 : 1; vmax = (is_signed) ? ((vmin < 0) ? -vmin : vmin) * 2 : vmin; bw = (sizeof(T) > 4) ? (8 - min(CountLeadingBytes64(vmax << bv_scale), 7)) : (4 - min(CountLeadingBytes32(vmax << bv_scale), 3)); if (zero_pll_war) { // Insert a dummy zero patch pll = 1; dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 0] = 0; dst[4 + bw + ((literal_run * literal_w + 7) >> 3) + 1] = 0; } else { pll = 0; } dst[0] = 0x80 + ((literal_w < 8) ? literal_w - 1 : kByteLengthToRLEv2_W[literal_w >> 3]) * 2 + ((literal_run - 1) >> 8); dst[1] = (literal_run - 1) & 0xff; dst[2] = ((bw - 1) << 5) | kByteLengthToRLEv2_W[pw]; dst[3] = ((pgw - 1) << 5) | pll; if (is_signed) { vmax >>= 1; vmax |= vmin & ((T)1 << (bw * 8 - 1)); } StoreBytesBigEndian(dst + 4, vmax, bw); s->u.intrle.hdr_bytes = 4 + bw; s->u.intrle.pl_bytes = (pll * (pw * 8 + pgw) + 7) >> 3; } __syncthreads(); dst += s->u.intrle.hdr_bytes; v0 -= (t < literal_run) ? vmin : 0; if (literal_w < 8) StoreBitsBigEndian(dst, (uint32_t)v0, literal_w, literal_run, t); else if (t < literal_run) StoreBytesBigEndian(dst + t * (literal_w >> 3), v0, (literal_w >> 3)); dst += s->u.intrle.pl_bytes; } else { // Delta mode dst += literal_w; literal_w = 0; } dst += (literal_run * literal_w + 7) >> 3; numvals -= literal_run; inpos += literal_run; out_cnt += literal_run; __syncthreads(); } delta_run = s->u.intrle.delta_run; if (delta_run > 0) { if (t == literal_run) { int64_t delta = (int64_t)v1 - (int64_t)v0; uint64_t delta_base = zigzag(v0); if (delta == 0 && delta_run >= 3 && delta_run <= 10) { // Short repeat uint32_t delta_bw = 8 - min(CountLeadingBytes64(delta_base), 7); dst[0] = ((delta_bw - 1) << 3) + (delta_run - 3); for (uint32_t i = 0, b = delta_bw * 8; i < delta_bw; i++) { b -= 8; dst[1 + i] = static_cast<uint8_t>(delta_base >> b); } s->u.intrle.hdr_bytes = 1 + delta_bw; } else { // Delta uint64_t delta_u = zigzag(delta); uint32_t bytecnt = 2; dst[0] = 0xC0 + ((delta_run - 1) >> 8); dst[1] = (delta_run - 1) & 0xff; bytecnt += StoreVarint(dst + bytecnt, delta_base); bytecnt += StoreVarint(dst + bytecnt, delta_u); s->u.intrle.hdr_bytes = bytecnt; } } __syncthreads(); dst += s->u.intrle.hdr_bytes; numvals -= delta_run; inpos += delta_run; out_cnt += delta_run; } } if (!t) { s->strm_pos[cid] = static_cast<uint32_t>(dst - s->stream.data_ptrs[cid]); } __syncthreads(); return out_cnt; } /** * @brief Store a group of strings as a single concatenated string * * @param[in] dst destination buffer * @param[in] strenc string encoder state * @param[in] len(t) string length (per thread) * @param[in] t thread id */ static __device__ void StoreStringData(uint8_t* dst, strdata_enc_state_s* strenc, uint32_t len, int t) { // Start with summing up all the lengths uint32_t pos = len; uint32_t wt = t & 0x1f; for (uint32_t n = 1; n < 32; n <<= 1) { uint32_t tmp = shuffle(pos, (wt & ~n) | (n - 1)); pos += (wt & n) ? tmp : 0; } if (wt == 0x1f) { strenc->lengths_red[t >> 5] = pos; } dst += pos - len; __syncthreads(); if (t < 32) { uint32_t wlen = (wt < 16) ? strenc->lengths_red[wt] : 0; uint32_t wpos = wlen; for (uint32_t n = 1; n < 16; n <<= 1) { uint32_t tmp = shuffle(wpos, (wt & ~n) | (n - 1)); wpos += (wt & n) ? tmp : 0; } if (wt < 16) { strenc->lengths_red[wt] = wpos - wlen; } if (wt == 0xf) { strenc->char_count = wpos; // Update stream position } } __syncthreads(); // TBD: Might be more efficient to loop over 4 strings and copy 8 consecutive character at a time // rather than have each thread to a memcpy if (len > 0) { memcpy(dst + strenc->lengths_red[t >> 5], strenc->str_data[t], len); } } /** * @brief In-place conversion from lengths to positions * * @param[in] vals input values * @param[in] numvals number of values * @param[in] t thread id */ template <class T> inline __device__ void lengths_to_positions(volatile T* vals, uint32_t numvals, unsigned int t) { for (uint32_t n = 1; n < numvals; n <<= 1) { __syncthreads(); if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)]; } } template <int block_size, typename Storage> static __device__ void encode_null_mask(orcenc_state_s* s, bitmask_type const* pushdown_mask, Storage& scan_storage, int t) { if (s->stream.ids[CI_PRESENT] < 0) return; auto const column = *s->chunk.column; while (s->present_rows < s->chunk.null_mask_num_rows or s->numvals > 0) { // Number of rows read so far auto present_rows = s->present_rows; // valid_buf capacity is byte per thread in block auto const buf_available_bits = encode_block_size * 8 - s->numvals; // Number of rows for the block to process in this iteration auto const nrows = min(s->chunk.null_mask_num_rows - present_rows, buf_available_bits); // Number of rows for this thread to process in this iteration auto const t_nrows = min(max(static_cast<int32_t>(nrows) - t * 8, 0), 8); auto const row = s->chunk.null_mask_start_row + present_rows + t * 8; auto get_mask_byte = [&](bitmask_type const* mask, size_type offset) -> uint8_t { if (t_nrows == 0) return 0; if (mask == nullptr) return 0xff; auto const begin_offset = row + offset; auto const end_offset = min(begin_offset + 8, offset + column.size()); auto const mask_word = cudf::detail::get_mask_offset_word(mask, 0, begin_offset, end_offset); return mask_word & 0xff; }; uint8_t pd_byte = (1 << t_nrows) - 1; uint32_t pd_set_cnt = t_nrows; uint32_t offset = t_nrows != 0 ? t * 8 : nrows; if (pushdown_mask != nullptr) { pd_byte = get_mask_byte(pushdown_mask, 0) & ((1 << t_nrows) - 1); pd_set_cnt = __popc(pd_byte); // Scan the number of valid bits to get dst offset for each thread cub::BlockScan<uint32_t, block_size>(scan_storage).ExclusiveSum(pd_set_cnt, offset); } auto const mask_byte = get_mask_byte(column.null_mask(), column.offset()); auto dst_offset = offset + s->nnz; auto vbuf_bit_idx = [](int row) { // valid_buf is a circular buffer with validity of 8 rows in each element return row % (encode_block_size * 8); }; if (dst_offset % 8 == 0 and pd_set_cnt == 8) { s->valid_buf[vbuf_bit_idx(dst_offset) / 8] = mask_byte; } else { for (auto bit_idx = 0; bit_idx < t_nrows; ++bit_idx) { // skip bits where pushdown mask is not set if (not(pd_byte & (1 << bit_idx))) continue; if (mask_byte & (1 << bit_idx)) { set_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++)); } else { clear_bit(reinterpret_cast<uint32_t*>(s->valid_buf), vbuf_bit_idx(dst_offset++)); } } } __syncthreads(); if (t == block_size - 1) { // Number of loaded rows, available for encode s->numvals += offset + pd_set_cnt; // Number of loaded rows (different from present_rows because of pushdown masks) s->nnz += offset + pd_set_cnt; } present_rows += nrows; if (!t) { s->present_rows = present_rows; } __syncthreads(); // RLE encode the present stream if (s->numvals > ((present_rows < s->chunk.null_mask_num_rows) ? 130 * 8 : 0)) { auto const flush = (present_rows < s->chunk.null_mask_num_rows) ? 0 : 7; auto const nbytes_out = (s->numvals + flush) / 8; auto const nrows_encoded = ByteRLE<CI_PRESENT, 0x1ff>(s, s->valid_buf, s->present_out / 8, nbytes_out, flush, t) * 8; if (!t) { // Number of rows encoded so far s->present_out += nrows_encoded; s->numvals -= min(s->numvals, nrows_encoded); } __syncthreads(); } } // reset shared state if (t == 0) { s->nnz = 0; s->numvals = 0; } } /** * @brief Encode column data * * @param[in] chunks encoder chunks device array [column][rowgroup] * @param[in, out] streams chunk streams device array [column][rowgroup] */ // blockDim {`encode_block_size`,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuEncodeOrcColumnData(device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) orcenc_state_s state_g; __shared__ union { typename cub::BlockScan<uint32_t, block_size>::TempStorage scan_u32; typename cub::BlockReduce<int32_t, block_size>::TempStorage i32; typename cub::BlockReduce<int64_t, block_size>::TempStorage i64; typename cub::BlockReduce<uint32_t, block_size>::TempStorage u32; typename cub::BlockReduce<uint64_t, block_size>::TempStorage u64; } temp_storage; orcenc_state_s* const s = &state_g; uint32_t col_id = blockIdx.x; uint32_t group_id = blockIdx.y; int t = threadIdx.x; if (t == 0) { s->chunk = chunks[col_id][group_id]; s->stream = streams[col_id][group_id]; s->cur_row = 0; s->present_rows = 0; s->present_out = 0; s->numvals = 0; s->numlengths = 0; s->nnz = 0; s->strm_pos[CI_DATA] = 0; s->strm_pos[CI_PRESENT] = 0; s->strm_pos[CI_INDEX] = 0; // Dictionary data is encoded in a separate kernel s->strm_pos[CI_DATA2] = s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DATA2] : 0; s->strm_pos[CI_DICTIONARY] = s->chunk.encoding_kind == DICTIONARY_V2 ? s->stream.lengths[CI_DICTIONARY] : 0; } __syncthreads(); auto const pushdown_mask = [&]() -> cudf::bitmask_type const* { auto const parent_index = s->chunk.column->parent_index; if (!parent_index.has_value()) return nullptr; return chunks[parent_index.value()][0].column->pushdown_mask; }(); encode_null_mask<block_size>(s, pushdown_mask, temp_storage.scan_u32, t); __syncthreads(); auto const column = *s->chunk.column; while (s->cur_row < s->chunk.num_rows || s->numvals + s->numlengths != 0) { // Fetch non-null values auto const length_stream_only = s->chunk.type_kind == LIST or s->chunk.type_kind == MAP; if (not length_stream_only && s->stream.data_ptrs[CI_DATA] == nullptr) { // Pass-through __syncthreads(); if (!t) { s->cur_row = s->chunk.num_rows; s->strm_pos[CI_DATA] = s->chunk.num_rows * s->chunk.dtype_len; } } else if (s->cur_row < s->chunk.num_rows) { uint32_t maxnumvals = (s->chunk.type_kind == BOOLEAN) ? 2048 : 1024; uint32_t nrows = min(min(s->chunk.num_rows - s->cur_row, maxnumvals - max(s->numvals, s->numlengths)), encode_block_size); auto const row = s->chunk.start_row + s->cur_row + t; auto const is_value_valid = [&]() { if (t >= nrows) return false; return bit_value_or(pushdown_mask, column.offset() + row, true) and bit_value_or(column.null_mask(), column.offset() + row, true); }(); s->buf.u32[t] = is_value_valid ? 1u : 0u; // TODO: Could use a faster reduction relying on _popc() for the initial phase lengths_to_positions(s->buf.u32, encode_block_size, t); __syncthreads(); if (is_value_valid) { int nz_idx = (s->nnz + s->buf.u32[t] - 1) & (maxnumvals - 1); switch (s->chunk.type_kind) { case INT: case DATE: case FLOAT: s->vals.u32[nz_idx] = column.element<uint32_t>(row); break; case DOUBLE: case LONG: s->vals.u64[nz_idx] = column.element<uint64_t>(row); break; case SHORT: s->vals.u32[nz_idx] = column.element<uint16_t>(row); break; case BOOLEAN: case BYTE: s->vals.u8[nz_idx] = column.element<uint8_t>(row); break; case TIMESTAMP: { int64_t ts = column.element<int64_t>(row); int32_t ts_scale = powers_of_ten[9 - min(s->chunk.scale, 9)]; int64_t seconds = ts / ts_scale; int64_t nanos = (ts - seconds * ts_scale); // There is a bug in the ORC spec such that for negative timestamps, it is understood // between the writer and reader that nanos will be adjusted to their positive component // but the negative seconds will be left alone. This means that -2.6 is encoded as // seconds = -2 and nanos = 1+(-0.6) = 0.4 // This leads to an error in decoding time where -1 < time (s) < 0 // Details: https://github.com/rapidsai/cudf/pull/5529#issuecomment-648768925 if (nanos < 0) { nanos += ts_scale; } s->vals.i64[nz_idx] = seconds - kORCTimeToUTC; if (nanos != 0) { // Trailing zeroes are encoded in the lower 3-bits uint32_t zeroes = 0; nanos *= powers_of_ten[min(s->chunk.scale, 9)]; if (!(nanos % 100)) { nanos /= 100; zeroes = 1; while (zeroes < 7 && !(nanos % 10)) { nanos /= 10; zeroes++; } } nanos = (nanos << 3) + zeroes; } s->lengths.u64[nz_idx] = nanos; break; } case STRING: if (s->chunk.encoding_kind == DICTIONARY_V2) { uint32_t dict_idx = s->chunk.dict_index[row]; if (dict_idx > 0x7fffffffu) { dict_idx = s->chunk.dict_index[dict_idx & 0x7fffffffu]; } s->vals.u32[nz_idx] = dict_idx; } else { string_view value = column.element<string_view>(row); s->u.strenc.str_data[s->buf.u32[t] - 1] = value.data(); s->lengths.u32[nz_idx] = value.size_bytes(); } break; // Reusing the lengths array for the scale stream // Note: can be written in a faster manner, given that all values are equal case DECIMAL: s->lengths.u32[nz_idx] = zigzag(s->chunk.scale); break; case LIST: case MAP: { auto const& offsets = column.child(lists_column_view::offsets_column_index); // Compute list length from the offsets s->lengths.u32[nz_idx] = offsets.element<size_type>(row + 1 + column.offset()) - offsets.element<size_type>(row + column.offset()); } break; default: break; } } __syncthreads(); if (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2) { // Store string data uint32_t nz = s->buf.u32[511]; uint32_t nz_idx = (s->nnz + t) & 0x3ff; uint32_t len = (t < nz && s->u.strenc.str_data[t]) ? s->lengths.u32[nz_idx] : 0; StoreStringData(s->stream.data_ptrs[CI_DATA] + s->strm_pos[CI_DATA], &s->u.strenc, len, t); if (!t) { s->strm_pos[CI_DATA] += s->u.strenc.char_count; } __syncthreads(); } else if (s->chunk.type_kind == BOOLEAN) { // bool8 -> 8x bool1 uint32_t nz = s->buf.u32[511]; uint8_t n = ((s->nnz + nz) - (s->nnz & ~7) + 7) >> 3; if (t < n) { uint32_t idx8 = (s->nnz & ~7) + (t << 3); s->lengths.u8[((s->nnz >> 3) + t) & 0x1ff] = ((s->vals.u8[(idx8 + 0) & 0x7ff] & 1) << 7) | ((s->vals.u8[(idx8 + 1) & 0x7ff] & 1) << 6) | ((s->vals.u8[(idx8 + 2) & 0x7ff] & 1) << 5) | ((s->vals.u8[(idx8 + 3) & 0x7ff] & 1) << 4) | ((s->vals.u8[(idx8 + 4) & 0x7ff] & 1) << 3) | ((s->vals.u8[(idx8 + 5) & 0x7ff] & 1) << 2) | ((s->vals.u8[(idx8 + 6) & 0x7ff] & 1) << 1) | ((s->vals.u8[(idx8 + 7) & 0x7ff] & 1) << 0); } __syncthreads(); } if (!t) { uint32_t nz = s->buf.u32[511]; s->nnz += nz; s->numvals += nz; s->numlengths += (s->chunk.type_kind == TIMESTAMP || s->chunk.type_kind == DECIMAL || s->chunk.type_kind == LIST || s->chunk.type_kind == MAP || (s->chunk.type_kind == STRING && s->chunk.encoding_kind != DICTIONARY_V2)) ? nz : 0; s->cur_row += nrows; } __syncthreads(); // Encode values if (s->numvals > 0) { uint32_t flush = (s->cur_row == s->chunk.num_rows) ? 7 : 0, n; switch (s->chunk.type_kind) { case SHORT: case INT: case DATE: n = IntegerRLE<CI_DATA, int32_t, true, 0x3ff, block_size>( s, s->vals.i32, s->nnz - s->numvals, s->numvals, t, temp_storage.i32); break; case LONG: case TIMESTAMP: n = IntegerRLE<CI_DATA, int64_t, true, 0x3ff, block_size>( s, s->vals.i64, s->nnz - s->numvals, s->numvals, t, temp_storage.i64); break; case BYTE: n = ByteRLE<CI_DATA, 0x3ff>(s, s->vals.u8, s->nnz - s->numvals, s->numvals, flush, t); break; case BOOLEAN: n = ByteRLE<CI_DATA, 0x1ff>(s, s->lengths.u8, (s->nnz - s->numvals + flush) >> 3, (s->numvals + flush) >> 3, flush, t) * 8; break; case FLOAT: StoreBytes<CI_DATA, 0xfff>(s, s->vals.u8, (s->nnz - s->numvals) * 4, s->numvals * 4, t); n = s->numvals; break; case DOUBLE: StoreBytes<CI_DATA, 0x1fff>( s, s->vals.u8, (s->nnz - s->numvals) * 8, s->numvals * 8, t); n = s->numvals; break; case STRING: if (s->chunk.encoding_kind == DICTIONARY_V2) { n = IntegerRLE<CI_DATA, uint32_t, false, 0x3ff, block_size>( s, s->vals.u32, s->nnz - s->numvals, s->numvals, t, temp_storage.u32); } else { n = s->numvals; } break; case DECIMAL: { if (is_value_valid) { auto const id = column.type().id(); __uint128_t const zz_val = id == type_id::DECIMAL32 ? zigzag(column.element<int32_t>(row)) : id == type_id::DECIMAL64 ? zigzag(column.element<int64_t>(row)) : zigzag(column.element<__int128_t>(row)); auto const offset = (row == s->chunk.start_row) ? 0 : s->chunk.decimal_offsets[row - 1]; StoreVarint(s->stream.data_ptrs[CI_DATA] + offset, zz_val); } n = s->numvals; } break; default: n = s->numvals; break; } __syncthreads(); if (!t) { s->numvals -= min(n, s->numvals); } } // Encode secondary stream values if (s->numlengths > 0) { uint32_t n; switch (s->chunk.type_kind) { case TIMESTAMP: n = IntegerRLE<CI_DATA2, uint64_t, false, 0x3ff, block_size>( s, s->lengths.u64, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u64); break; case DECIMAL: case LIST: case MAP: case STRING: n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>( s, s->lengths.u32, s->nnz - s->numlengths, s->numlengths, t, temp_storage.u32); break; default: n = s->numlengths; break; } __syncthreads(); if (!t) { s->numlengths -= min(n, s->numlengths); } } } __syncthreads(); } __syncthreads(); if (t <= CI_PRESENT && s->stream.ids[t] >= 0) { // Update actual compressed length // (not needed for decimal data, whose exact size is known before encode) if (!(t == CI_DATA && s->chunk.type_kind == DECIMAL)) streams[col_id][group_id].lengths[t] = s->strm_pos[t]; if (!s->stream.data_ptrs[t]) { streams[col_id][group_id].data_ptrs[t] = static_cast<uint8_t*>(const_cast<void*>(column.head())) + (column.offset() + s->chunk.start_row) * s->chunk.dtype_len; } } } /** * @brief Encode column dictionaries * * @param[in] stripes Stripe dictionaries device array [stripe][string_column] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[in] num_columns Number of columns */ // blockDim {512,1,1} template <int block_size> __global__ void __launch_bounds__(block_size) gpuEncodeStringDictionaries(StripeDictionary const* stripes, device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) orcenc_state_s state_g; __shared__ typename cub::BlockReduce<uint32_t, block_size>::TempStorage temp_storage; orcenc_state_s* const s = &state_g; uint32_t stripe_id = blockIdx.x; uint32_t cid = (blockIdx.y) ? CI_DICTIONARY : CI_DATA2; int t = threadIdx.x; if (t == 0) s->u.dict_stripe = stripes[stripe_id]; __syncthreads(); auto const strm_ptr = &streams[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk]; if (t == 0) { s->chunk = chunks[s->u.dict_stripe.column_id][s->u.dict_stripe.start_chunk]; s->stream = *strm_ptr; s->strm_pos[cid] = 0; s->numlengths = 0; s->nrows = s->u.dict_stripe.num_strings; s->cur_row = 0; } auto const string_column = s->u.dict_stripe.leaf_column; auto const dict_data = s->u.dict_stripe.dict_data; __syncthreads(); if (s->chunk.encoding_kind != DICTIONARY_V2) { return; // This column isn't using dictionary encoding -> bail out } while (s->cur_row < s->nrows || s->numlengths != 0) { uint32_t numvals = min(s->nrows - s->cur_row, min(1024 - s->numlengths, 512)); uint32_t string_idx = (t < numvals) ? dict_data[s->cur_row + t] : 0; if (cid == CI_DICTIONARY) { // Encoding string contents const char* ptr = 0; uint32_t count = 0; if (t < numvals) { auto string_val = string_column->element<string_view>(string_idx); ptr = string_val.data(); count = string_val.size_bytes(); } s->u.strenc.str_data[t] = ptr; StoreStringData(s->stream.data_ptrs[CI_DICTIONARY] + s->strm_pos[CI_DICTIONARY], &s->u.strenc, (ptr) ? count : 0, t); if (!t) { s->strm_pos[CI_DICTIONARY] += s->u.strenc.char_count; } } else { // Encoding string lengths uint32_t count = (t < numvals) ? static_cast<uint32_t>(string_column->element<string_view>(string_idx).size_bytes()) : 0; uint32_t nz_idx = (s->cur_row + t) & 0x3ff; if (t < numvals) s->lengths.u32[nz_idx] = count; __syncthreads(); if (s->numlengths + numvals > 0) { uint32_t n = IntegerRLE<CI_DATA2, uint32_t, false, 0x3ff, block_size>( s, s->lengths.u32, s->cur_row, s->numlengths + numvals, t, temp_storage); __syncthreads(); if (!t) { s->numlengths += numvals; s->numlengths -= min(n, s->numlengths); } } } if (t == 0) { s->cur_row += numvals; } __syncthreads(); } if (t == 0) { strm_ptr->lengths[cid] = s->strm_pos[cid]; } } /** * @brief Merge chunked column data into a single contiguous stream * * @param[in,out] strm_desc StripeStream device array [stripe][stream] * @param[in,out] streams List of encoder chunk streams [column][rowgroup] */ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024) gpuCompactOrcDataStreams(device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> streams) { __shared__ __align__(16) StripeStream ss; __shared__ __align__(16) encoder_chunk_streams strm0; __shared__ uint8_t* volatile ck_curptr_g; __shared__ uint32_t volatile ck_curlen_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; if (t == 0) { ss = strm_desc[stripe_id][stream_id]; strm0 = streams[ss.column_id][ss.first_chunk_id]; } __syncthreads(); auto const cid = ss.stream_type; auto dst_ptr = strm0.data_ptrs[cid] + strm0.lengths[cid]; for (auto group = ss.first_chunk_id + 1; group < ss.first_chunk_id + ss.num_chunks; ++group) { uint8_t* src_ptr; uint32_t len; if (t == 0) { src_ptr = streams[ss.column_id][group].data_ptrs[cid]; len = streams[ss.column_id][group].lengths[cid]; if (src_ptr != dst_ptr) { streams[ss.column_id][group].data_ptrs[cid] = dst_ptr; } ck_curptr_g = src_ptr; ck_curlen_g = len; } __syncthreads(); src_ptr = ck_curptr_g; len = ck_curlen_g; if (len > 0 && src_ptr != dst_ptr) { for (uint32_t i = 0; i < len; i += 1024) { uint8_t v = (i + t < len) ? src_ptr[i + t] : 0; __syncthreads(); if (i + t < len) { dst_ptr[i + t] = v; } } } dst_ptr += len; __syncthreads(); } if (!t) { strm_desc[stripe_id][stream_id].stream_size = dst_ptr - strm0.data_ptrs[cid]; } } /** * @brief Initializes compression input/output structures * * @param[in] strm_desc StripeStream device array [stripe][stream] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[out] comp_in Per-block compression input parameters * @param[out] comp_out Per-block compression status * @param[in] compressed_bfr Compression output buffer * @param[in] comp_blk_size Compression block size * @param[in] max_comp_blk_size Max size of any block after compression */ // blockDim {256,1,1} __global__ void __launch_bounds__(256) gpuInitCompressionBlocks(device_2dspan<StripeStream const> strm_desc, device_2dspan<encoder_chunk_streams> streams, // const? device_span<gpu_inflate_input_s> comp_in, device_span<gpu_inflate_status_s> comp_out, uint8_t* compressed_bfr, uint32_t comp_blk_size, uint32_t max_comp_blk_size) { __shared__ __align__(16) StripeStream ss; __shared__ uint8_t* volatile uncomp_base_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; uint32_t num_blocks; uint8_t *src, *dst; if (t == 0) { ss = strm_desc[stripe_id][stream_id]; uncomp_base_g = streams[ss.column_id][ss.first_chunk_id].data_ptrs[ss.stream_type]; } __syncthreads(); src = uncomp_base_g; dst = compressed_bfr + ss.bfr_offset; num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 1; for (uint32_t b = t; b < num_blocks; b += 256) { gpu_inflate_input_s* blk_in = &comp_in[ss.first_block + b]; gpu_inflate_status_s* blk_out = &comp_out[ss.first_block + b]; uint32_t blk_size = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size)); blk_in->srcDevice = src + b * comp_blk_size; blk_in->srcSize = blk_size; blk_in->dstDevice = dst + b * (BLOCK_HEADER_SIZE + max_comp_blk_size) + BLOCK_HEADER_SIZE; blk_in->dstSize = max_comp_blk_size; blk_out->bytes_written = blk_size; blk_out->status = 1; blk_out->reserved = 0; } } /** * @brief Compacts compressed blocks in a single contiguous stream, and update 3-byte block length *fields * * @param[in,out] strm_desc StripeStream device array [stripe][stream] * @param[in] chunks EncChunk device array [rowgroup][column] * @param[in] comp_in Per-block compression input parameters * @param[in] comp_out Per-block compression status * @param[in] compressed_bfr Compression output buffer * @param[in] comp_blk_size Compression block size * @param[in] max_comp_blk_size Max size of any block after compression */ // blockDim {1024,1,1} __global__ void __launch_bounds__(1024) gpuCompactCompressedBlocks(device_2dspan<StripeStream> strm_desc, device_span<gpu_inflate_input_s> comp_in, device_span<gpu_inflate_status_s> comp_out, uint8_t* compressed_bfr, uint32_t comp_blk_size, uint32_t max_comp_blk_size) { __shared__ __align__(16) StripeStream ss; __shared__ const uint8_t* volatile comp_src_g; __shared__ uint32_t volatile comp_len_g; auto const stripe_id = blockIdx.x; auto const stream_id = blockIdx.y; uint32_t t = threadIdx.x; uint32_t num_blocks, b, blk_size; const uint8_t* src; uint8_t* dst; if (t == 0) ss = strm_desc[stripe_id][stream_id]; __syncthreads(); num_blocks = (ss.stream_size > 0) ? (ss.stream_size - 1) / comp_blk_size + 1 : 0; dst = compressed_bfr + ss.bfr_offset; b = 0; do { if (t == 0) { gpu_inflate_input_s* blk_in = &comp_in[ss.first_block + b]; gpu_inflate_status_s* blk_out = &comp_out[ss.first_block + b]; uint32_t src_len = min(comp_blk_size, ss.stream_size - min(b * comp_blk_size, ss.stream_size)); uint32_t dst_len = (blk_out->status == 0) ? blk_out->bytes_written : src_len; uint32_t blk_size24; if (dst_len >= src_len) { // Copy from uncompressed source src = static_cast<const uint8_t*>(blk_in->srcDevice); blk_out->bytes_written = src_len; dst_len = src_len; blk_size24 = dst_len * 2 + 1; } else { // Compressed block src = static_cast<const uint8_t*>(blk_in->dstDevice); blk_size24 = dst_len * 2 + 0; } dst[0] = static_cast<uint8_t>(blk_size24 >> 0); dst[1] = static_cast<uint8_t>(blk_size24 >> 8); dst[2] = static_cast<uint8_t>(blk_size24 >> 16); comp_src_g = src; comp_len_g = dst_len; } __syncthreads(); src = comp_src_g; blk_size = comp_len_g; dst += 3; // skip over length written by thread0 if (src != dst) { for (uint32_t i = 0; i < blk_size; i += 1024) { uint8_t v = (i + t < blk_size) ? src[i + t] : 0; __syncthreads(); if (i + t < blk_size) { dst[i + t] = v; } } } dst += blk_size; __syncthreads(); } while (++b < num_blocks); // Update stripe stream with the compressed size if (t == 0) { strm_desc[stripe_id][stream_id].stream_size = static_cast<uint32_t>(dst - (compressed_bfr + ss.bfr_offset)); } } void EncodeOrcColumnData(device_2dspan<EncChunk const> chunks, device_2dspan<encoder_chunk_streams> streams, rmm::cuda_stream_view stream) { dim3 dim_block(encode_block_size, 1); // `encode_block_size` threads per chunk dim3 dim_grid(chunks.size().first, chunks.size().second); gpuEncodeOrcColumnData<encode_block_size> <<<dim_grid, dim_block, 0, stream.value()>>>(chunks, streams); } void EncodeStripeDictionaries(StripeDictionary const* stripes, device_2dspan<EncChunk const> chunks, uint32_t num_string_columns, uint32_t num_stripes, device_2dspan<encoder_chunk_streams> enc_streams, rmm::cuda_stream_view stream) { dim3 dim_block(512, 1); // 512 threads per dictionary dim3 dim_grid(num_string_columns * num_stripes, 2); gpuEncodeStringDictionaries<512> <<<dim_grid, dim_block, 0, stream.value()>>>(stripes, chunks, enc_streams); } void CompactOrcDataStreams(device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> enc_streams, rmm::cuda_stream_view stream) { dim3 dim_block(1024, 1); dim3 dim_grid(strm_desc.size().first, strm_desc.size().second); gpuCompactOrcDataStreams<<<dim_grid, dim_block, 0, stream.value()>>>(strm_desc, enc_streams); } void CompressOrcDataStreams(uint8_t* compressed_data, uint32_t num_compressed_blocks, CompressionKind compression, uint32_t comp_blk_size, uint32_t max_comp_blk_size, device_2dspan<StripeStream> strm_desc, device_2dspan<encoder_chunk_streams> enc_streams, device_span<gpu_inflate_input_s> comp_in, device_span<gpu_inflate_status_s> comp_out, rmm::cuda_stream_view stream) { dim3 dim_block_init(256, 1); dim3 dim_grid(strm_desc.size().first, strm_desc.size().second); gpuInitCompressionBlocks<<<dim_grid, dim_block_init, 0, stream.value()>>>( strm_desc, enc_streams, comp_in, comp_out, compressed_data, comp_blk_size, max_comp_blk_size); if (compression == SNAPPY) { if (detail::nvcomp_integration::is_stable_enabled()) { try { size_t temp_size; nvcompStatus_t nvcomp_status = nvcompBatchedSnappyCompressGetTempSize( num_compressed_blocks, comp_blk_size, nvcompBatchedSnappyDefaultOpts, &temp_size); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in getting snappy compression scratch size"); rmm::device_buffer scratch(temp_size, stream); rmm::device_uvector<void const*> uncompressed_data_ptrs(num_compressed_blocks, stream); rmm::device_uvector<size_t> uncompressed_data_sizes(num_compressed_blocks, stream); rmm::device_uvector<void*> compressed_data_ptrs(num_compressed_blocks, stream); rmm::device_uvector<size_t> compressed_bytes_written(num_compressed_blocks, stream); auto comp_it = thrust::make_zip_iterator(uncompressed_data_ptrs.begin(), uncompressed_data_sizes.begin(), compressed_data_ptrs.begin()); thrust::transform(rmm::exec_policy(stream), comp_in.begin(), comp_in.end(), comp_it, [] __device__(gpu_inflate_input_s in) { return thrust::make_tuple(in.srcDevice, in.srcSize, in.dstDevice); }); nvcomp_status = nvcompBatchedSnappyCompressAsync(uncompressed_data_ptrs.data(), uncompressed_data_sizes.data(), max_comp_blk_size, num_compressed_blocks, scratch.data(), scratch.size(), compressed_data_ptrs.data(), compressed_bytes_written.data(), nvcompBatchedSnappyDefaultOpts, stream.value()); CUDF_EXPECTS(nvcomp_status == nvcompStatus_t::nvcompSuccess, "Error in snappy compression"); thrust::transform(rmm::exec_policy(stream), compressed_bytes_written.begin(), compressed_bytes_written.end(), comp_out.begin(), [] __device__(size_t size) { gpu_inflate_status_s status{}; status.bytes_written = size; return status; }); } catch (...) { // If we reach this then there was an error in compressing so set an error status for each // block thrust::for_each(rmm::exec_policy(stream), comp_out.begin(), comp_out.end(), [] __device__(gpu_inflate_status_s & stat) { stat.status = 1; }); }; } else { gpu_snap(comp_in.data(), comp_out.data(), num_compressed_blocks, stream); } } dim3 dim_block_compact(1024, 1); gpuCompactCompressedBlocks<<<dim_grid, dim_block_compact, 0, stream.value()>>>( strm_desc, comp_in, comp_out, compressed_data, comp_blk_size, max_comp_blk_size); } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
d5f2f4b44d70330647bda8b37c659e728edb5c54.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_mulScalar (int n, double *result, double *x, double y) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = x[id] * y; } }
d5f2f4b44d70330647bda8b37c659e728edb5c54.cu
#include "includes.h" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector arithmetic ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar arithmetic =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector comparison ====================================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector-and-scalar comparison =========================================== extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //=== Vector math (one argument) ============================================= // Calculate the arc cosine of the input argument. extern "C" // Calculate the nonnegative arc hyperbolic cosine of the input argument. extern "C" // Calculate the arc sine of the input argument. extern "C" // Calculate the arc hyperbolic sine of the input argument. extern "C" // Calculate the arc tangent of the input argument. extern "C" // Calculate the arc hyperbolic tangent of the input argument. extern "C" // Calculate the cube root of the input argument. extern "C" // Calculate ceiling of the input argument. extern "C" // Calculate the cosine of the input argument. extern "C" // Calculate the hyperbolic cosine of the input argument. extern "C" // Calculate the cosine of the input argument × p . extern "C" // Calculate the complementary error function of the input argument. extern "C" // Calculate the inverse complementary error function of the input argument. extern "C" // Calculate the scaled complementary error function of the input argument. extern "C" // Calculate the error function of the input argument. extern "C" // Calculate the inverse error function of the input argument. extern "C" // Calculate the base 10 exponential of the input argument. extern "C" // Calculate the base 2 exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument. extern "C" // Calculate the base e exponential of the input argument, minus 1. extern "C" // Calculate the absolute value of its argument. extern "C" // Calculate the largest integer less than or equal to x. extern "C" // Calculate the value of the Bessel function of the first kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the first kind of order 1 for the input argument. extern "C" // Calculate the natural logarithm of the absolute value of the gamma function of the input argument. extern "C" // Calculate the base 10 logarithm of the input argument. extern "C" // Calculate the value of l o g e ( 1 + x ) . extern "C" // Calculate the base 2 logarithm of the input argument. extern "C" // Calculate the doubleing point representation of the exponent of the input argument. extern "C" // Calculate the natural logarithm of the input argument. extern "C" // Calculate the standard normal cumulative distribution function. extern "C" // Calculate the inverse of the standard normal cumulative distribution function. extern "C" // Calculate reciprocal cube root function. extern "C" // Round input to nearest integer value in doubleing-point. extern "C" // Round to nearest integer value in doubleing-point. extern "C" // Calculate the reciprocal of the square root of the input argument. extern "C" // Calculate the sine of the input argument. extern "C" // Calculate the hyperbolic sine of the input argument. extern "C" // Calculate the sine of the input argument × p . extern "C" // Calculate the square root of the input argument. extern "C" // Calculate the tangent of the input argument. extern "C" // Calculate the hyperbolic tangent of the input argument. extern "C" // Calculate the gamma function of the input argument. extern "C" // Truncate input argument to the integral part. extern "C" // Calculate the value of the Bessel function of the second kind of order 0 for the input argument. extern "C" // Calculate the value of the Bessel function of the second kind of order 1 for the input argument. extern "C" //=== Vector math (two arguments) ============================================ // Create value with given magnitude, copying sign of second value. extern "C" // Compute the positive difference between x and y. extern "C" // Divide two doubleing point values. extern "C" // Determine the maximum numeric value of the arguments. extern "C" // Determine the minimum numeric value of the arguments. extern "C" // Calculate the doubleing-point remainder of x / y. extern "C" // Calculate the square root of the sum of squares of two arguments. extern "C" // Return next representable single-precision doubleing-point value afer argument. extern "C" // Calculate the value of first argument to the power of second argument. extern "C" // Compute single-precision doubleing-point remainder. extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" extern "C" //WARNING : device_sum size should be gridDim.x __global__ void vec_mulScalar (int n, double *result, double *x, double y) { int idx = threadIdx.x + blockIdx.x * blockDim.x; int idy = threadIdx.y + blockIdx.y * blockDim.y; int id = idy * gridDim.x * blockDim.x + idx; if (id < n) { result[id] = x[id] * y; } }
286c9f5df3a5fd7906dd949b81816cfc0520c0c4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * cache_kernels.cu: This file is part of the gpuroofperf-bench benchmark suite. * * Contact: Elias Konstantinidis <[email protected]> **/ #include <algorithm> #include "cache_kernels.h" namespace benchmark_kernels { #define TOTAL_ITERATIONS (8192) #define UNROLL_ITERATIONS (64) #define UNROLL_ITERATIONS_MEM (UNROLL_ITERATIONS/2) const int BLOCK_SIZE = 256; texture< int, 1, hipReadModeElementType> texdataI1; texture<int2, 1, hipReadModeElementType> texdataI2; texture<int4, 1, hipReadModeElementType> texdataI4; template<class T> class dev_fun{ public: // Pointer displacement operation __device__ unsigned int operator()(T v1, unsigned int v2); // Compute operation (#1) __device__ T operator()(const T &v1, const T &v2); // Compute operation (#2) __device__ T comp_per_element(const T &v1, const T &v2); // Value initialization __device__ T init(int v); // Element loading __device__ T load(volatile const T* p, unsigned int offset); // Element storing __device__ void store(volatile T* p, unsigned int offset, const T &value); // Get first element __device__ int first_element(const T &v); // Reduce elements (XOR operation) __device__ int reduce(const T &v); }; template<> __device__ unsigned int dev_fun<int>::operator()(int v1, unsigned int v2){ return v2+static_cast<unsigned int>(v1) ; } template<> __device__ int dev_fun<int>::operator()(const int &v1, const int &v2){ return v1 + v2; } template<> __device__ int dev_fun<int>::comp_per_element(const int &v1, const int &v2){ return v1 - v2; } template<> __device__ int dev_fun<int>::init(int v){ return v; } template<> __device__ int dev_fun<int>::load(volatile const int* p, unsigned int offset){ int retval; p += offset; // Global level caching (.cg Cache at global level (cache in L2 and below, not L1).) asm volatile ("ld.cg.u32 %0, [%1];" : "=r"(retval) : "l"(p)); return retval; } template<> __device__ void dev_fun<int>::store(volatile int* p, unsigned int offset, const int &value){ p += offset; // Streaming store (.cs Cache streaming, likely to be accessed once.) asm volatile ("st.cs.global.u32 [%0], %1;" :: "l"(p), "r"(value)); } template<> __device__ int dev_fun<int>::first_element(const int &v){ return v; } template<> __device__ int dev_fun<int>::reduce(const int &v){ return v; } template<> __device__ unsigned int dev_fun<int2>::operator()(int2 v1, unsigned int v2){ return v2+(unsigned int)(v1.x+v1.y) ; } template<> __device__ int2 dev_fun<int2>::operator()(const int2 &v1, const int2 &v2){ return make_int2(v1.x + v2.x, v1.y + v2.y); } template<> __device__ int2 dev_fun<int2>::comp_per_element(const int2 &v1, const int2 &v2){ return make_int2(v1.x - v2.x, v1.y - v2.y); } template<> __device__ int2 dev_fun<int2>::init(int v){ return make_int2(v, v); } template<> __device__ int2 dev_fun<int2>::load(volatile const int2* p, unsigned int offset){ union{ unsigned long long ll; int2 i2; } retval; p += offset; // Global level caching asm volatile ("ld.cg.u64 %0, [%1];" : "=l"(retval.ll) : "l"(p)); return retval.i2; } template<> __device__ void dev_fun<int2>::store(volatile int2* p, unsigned int offset, const int2 &value){ union{ unsigned long long ll; int2 i2; } retval; retval.i2 = value; p += offset; // Streaming store asm volatile ("st.cs.global.u64 [%0], %1;" :: "l"(p), "l"(retval.ll)); } template<> __device__ int dev_fun<int2>::first_element(const int2 &v){ return v.x; } template<> __device__ int dev_fun<int2>::reduce(const int2 &v){ return v.x ^ v.y; } template<> __device__ unsigned int dev_fun<int4>::operator()(int4 v1, unsigned int v2){ return v2+(unsigned int)(v1.x+v1.y+v1.z+v1.w) ; } template<> __device__ int4 dev_fun<int4>::operator()(const int4 &v1, const int4 &v2){ return make_int4(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z, v1.w + v2.w); } template<> __device__ int4 dev_fun<int4>::comp_per_element(const int4 &v1, const int4 &v2){ return make_int4(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z, v1.w - v2.w); } template<> __device__ int4 dev_fun<int4>::init(int v){ return make_int4(v, v, v, v); } template<> __device__ int4 dev_fun<int4>::load(volatile const int4* p, unsigned int offset){ int4 retval; p += offset; // Global level caching asm volatile ("ld.cg.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(retval.x), "=r"(retval.y), "=r"(retval.z), "=r"(retval.w) : "l"(p)); return retval; } template<> __device__ void dev_fun<int4>::store(volatile int4* p, unsigned int offset, const int4 &value){ p += offset; // Streaming store asm volatile ("st.cs.global.v4.u32 [%0], {%1,%2,%3,%4};" :: "l"(p), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) ); } template<> __device__ int dev_fun<int4>::first_element(const int4 &v){ return v.x; } template<> __device__ int dev_fun<int4>::reduce(const int4 &v){ return v.x ^ v.y ^ v.z ^ v.w; } template <class T, int blockdim, int stepwidth, int index_clamping> __global__ void benchmark_func(T * const g_data){ dev_fun<T> func; // Thread block-wise striding int index = stepwidth*blockIdx.x*blockdim + threadIdx.x; index = index_clamping==0 ? index : index % index_clamping; const int stride = blockdim; unsigned int offset = index; T temp = func.init(0); for(int j=0; j<TOTAL_ITERATIONS; j+=UNROLL_ITERATIONS){ // Pretend updating of offset in order to force repetitive loads offset = func(temp, offset); #ifndef TEX_LOADS union { const T *ptr; int2 i; } g_data_load_ptr = { g_data+offset }; #endif #pragma unroll for(int i=0; i<UNROLL_ITERATIONS; i++){ const unsigned int iteration_offset = i % stepwidth; const T v = func.load(g_data_load_ptr.ptr, iteration_offset*stride); // Pretend update of data pointer in order to force reloads g_data_load_ptr.i.x ^= func.reduce(v); temp = v; } } offset = func(temp, offset); if( offset != index ) // Does not actually occur *g_data = func.init(offset); } template<class datatype> void runbench_warmup(datatype *cd, long size){ const long reduced_grid_size = size/(UNROLL_ITERATIONS_MEM)/32; const int TOTAL_REDUCED_BLOCKS = reduced_grid_size/BLOCK_SIZE; dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimReducedGrid(TOTAL_REDUCED_BLOCKS, 1, 1); hipLaunchKernelGGL(( benchmark_func< datatype, BLOCK_SIZE, 1, 256 >), dim3(dimReducedGrid), dim3(dimBlock) , 0, 0, cd); CUDA_SAFE_CALL( hipGetLastError() ); CUDA_SAFE_CALL( hipDeviceSynchronize() ); } template<class datatype, int stepwidth, int index_clamping> double runbench(int total_blocks, datatype *cd, long size){ const long compute_grid_size = total_blocks*BLOCK_SIZE; const long data_size = ((index_clamping==0) ? compute_grid_size : min((int)compute_grid_size, (int)index_clamping))*stepwidth;//*(2-readonly); const long long total_iterations = (long long)(TOTAL_ITERATIONS)*compute_grid_size; const long long memoryoperations = total_iterations; // Set device memory CUDA_SAFE_CALL( hipMemset(cd, 0, size*sizeof(datatype)) ); // initialize to zeros dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid(total_blocks, 1, 1); hipEvent_t start, stop; initializeEvents(&start, &stop); hipLaunchKernelGGL(( benchmark_func< datatype, BLOCK_SIZE, stepwidth, index_clamping >), dim3(dimGrid), dim3(dimBlock) , 0, 0, cd); float kernel_time = finalizeEvents(start, stop); double bandwidth = (static_cast<double>(memoryoperations)*sizeof(datatype))/kernel_time*1000./(1000.*1000.*1000.); int current_device; hipDeviceProp_t deviceProp; CUDA_SAFE_CALL( hipGetDevice(&current_device) ); CUDA_SAFE_CALL( hipGetDeviceProperties(&deviceProp, current_device) ); return bandwidth; } template<class datatype> double cachebenchGPU(long size){ // Construct grid size hipDeviceProp_t deviceProp; int current_device; CUDA_SAFE_CALL( hipGetDevice(&current_device) ); CUDA_SAFE_CALL( hipGetDeviceProperties(&deviceProp, current_device) ); const int SM_count = deviceProp.multiProcessorCount; const int Threads_per_SM = deviceProp.maxThreadsPerMultiProcessor; const int BLOCKS_PER_SM = Threads_per_SM/BLOCK_SIZE; const int TOTAL_BLOCKS = BLOCKS_PER_SM * SM_count; datatype *cd; CUDA_SAFE_CALL( hipMalloc((void**)&cd, size*sizeof(datatype)) ); // Set device memory CUDA_SAFE_CALL( hipMemset(cd, 0, size*sizeof(datatype)) ); // initialize to zeros // Bind textures to buffer hipBindTexture(0, texdataI1, cd, size*sizeof(datatype)); hipBindTexture(0, texdataI2, cd, size*sizeof(datatype)); hipBindTexture(0, texdataI4, cd, size*sizeof(datatype)); // Synchronize in order to wait for memory operations to finish CUDA_SAFE_CALL( hipDeviceSynchronize() ); runbench_warmup(cd, size); double peak_bw = 0.0; peak_bw = max( peak_bw, runbench<datatype, 1, 512>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 1, 1024>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 1, 2048>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 1, 4096>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 1, 8192>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 1, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 2, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 3, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 4, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 5, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 6, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 7, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 8, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 9, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 10, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 11, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 12, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 13, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 14, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 15, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 16, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 18, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 20, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 22, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 24, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 28, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 32, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 40, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 48, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 56, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 64, 0>(TOTAL_BLOCKS, cd, size) ); // Copy results back to host memory (not needed) //CUDA_SAFE_CALL( hipMemcpy(c, cd, size*sizeof(datatype), hipMemcpyDeviceToHost) ); // Unbind textures hipUnbindTexture(texdataI1); hipUnbindTexture(texdataI2); hipUnbindTexture(texdataI4); CUDA_SAFE_CALL( hipFree(cd) ); return peak_bw; } generic_benchmark_result_list run_cache_benchmark(void) { constexpr unsigned int VECTOR_SIZE = 8*1024*1024; double peak_bw_ro_int1 = cachebenchGPU<int>(VECTOR_SIZE); double peak_bw_ro_int2 = cachebenchGPU<int2>(VECTOR_SIZE); double peak_bw_ro_int4 = cachebenchGPU<int4>(VECTOR_SIZE); generic_benchmark_result_list results; results["BW_L2"] = ::max( {peak_bw_ro_int1, peak_bw_ro_int2, peak_bw_ro_int4} ); return results; } }
286c9f5df3a5fd7906dd949b81816cfc0520c0c4.cu
/** * cache_kernels.cu: This file is part of the gpuroofperf-bench benchmark suite. * * Contact: Elias Konstantinidis <[email protected]> **/ #include <algorithm> #include "cache_kernels.h" namespace benchmark_kernels { #define TOTAL_ITERATIONS (8192) #define UNROLL_ITERATIONS (64) #define UNROLL_ITERATIONS_MEM (UNROLL_ITERATIONS/2) const int BLOCK_SIZE = 256; texture< int, 1, cudaReadModeElementType> texdataI1; texture<int2, 1, cudaReadModeElementType> texdataI2; texture<int4, 1, cudaReadModeElementType> texdataI4; template<class T> class dev_fun{ public: // Pointer displacement operation __device__ unsigned int operator()(T v1, unsigned int v2); // Compute operation (#1) __device__ T operator()(const T &v1, const T &v2); // Compute operation (#2) __device__ T comp_per_element(const T &v1, const T &v2); // Value initialization __device__ T init(int v); // Element loading __device__ T load(volatile const T* p, unsigned int offset); // Element storing __device__ void store(volatile T* p, unsigned int offset, const T &value); // Get first element __device__ int first_element(const T &v); // Reduce elements (XOR operation) __device__ int reduce(const T &v); }; template<> __device__ unsigned int dev_fun<int>::operator()(int v1, unsigned int v2){ return v2+static_cast<unsigned int>(v1) ; } template<> __device__ int dev_fun<int>::operator()(const int &v1, const int &v2){ return v1 + v2; } template<> __device__ int dev_fun<int>::comp_per_element(const int &v1, const int &v2){ return v1 - v2; } template<> __device__ int dev_fun<int>::init(int v){ return v; } template<> __device__ int dev_fun<int>::load(volatile const int* p, unsigned int offset){ int retval; p += offset; // Global level caching (.cg Cache at global level (cache in L2 and below, not L1).) asm volatile ("ld.cg.u32 %0, [%1];" : "=r"(retval) : "l"(p)); return retval; } template<> __device__ void dev_fun<int>::store(volatile int* p, unsigned int offset, const int &value){ p += offset; // Streaming store (.cs Cache streaming, likely to be accessed once.) asm volatile ("st.cs.global.u32 [%0], %1;" :: "l"(p), "r"(value)); } template<> __device__ int dev_fun<int>::first_element(const int &v){ return v; } template<> __device__ int dev_fun<int>::reduce(const int &v){ return v; } template<> __device__ unsigned int dev_fun<int2>::operator()(int2 v1, unsigned int v2){ return v2+(unsigned int)(v1.x+v1.y) ; } template<> __device__ int2 dev_fun<int2>::operator()(const int2 &v1, const int2 &v2){ return make_int2(v1.x + v2.x, v1.y + v2.y); } template<> __device__ int2 dev_fun<int2>::comp_per_element(const int2 &v1, const int2 &v2){ return make_int2(v1.x - v2.x, v1.y - v2.y); } template<> __device__ int2 dev_fun<int2>::init(int v){ return make_int2(v, v); } template<> __device__ int2 dev_fun<int2>::load(volatile const int2* p, unsigned int offset){ union{ unsigned long long ll; int2 i2; } retval; p += offset; // Global level caching asm volatile ("ld.cg.u64 %0, [%1];" : "=l"(retval.ll) : "l"(p)); return retval.i2; } template<> __device__ void dev_fun<int2>::store(volatile int2* p, unsigned int offset, const int2 &value){ union{ unsigned long long ll; int2 i2; } retval; retval.i2 = value; p += offset; // Streaming store asm volatile ("st.cs.global.u64 [%0], %1;" :: "l"(p), "l"(retval.ll)); } template<> __device__ int dev_fun<int2>::first_element(const int2 &v){ return v.x; } template<> __device__ int dev_fun<int2>::reduce(const int2 &v){ return v.x ^ v.y; } template<> __device__ unsigned int dev_fun<int4>::operator()(int4 v1, unsigned int v2){ return v2+(unsigned int)(v1.x+v1.y+v1.z+v1.w) ; } template<> __device__ int4 dev_fun<int4>::operator()(const int4 &v1, const int4 &v2){ return make_int4(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z, v1.w + v2.w); } template<> __device__ int4 dev_fun<int4>::comp_per_element(const int4 &v1, const int4 &v2){ return make_int4(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z, v1.w - v2.w); } template<> __device__ int4 dev_fun<int4>::init(int v){ return make_int4(v, v, v, v); } template<> __device__ int4 dev_fun<int4>::load(volatile const int4* p, unsigned int offset){ int4 retval; p += offset; // Global level caching asm volatile ("ld.cg.v4.u32 {%0,%1,%2,%3}, [%4];" : "=r"(retval.x), "=r"(retval.y), "=r"(retval.z), "=r"(retval.w) : "l"(p)); return retval; } template<> __device__ void dev_fun<int4>::store(volatile int4* p, unsigned int offset, const int4 &value){ p += offset; // Streaming store asm volatile ("st.cs.global.v4.u32 [%0], {%1,%2,%3,%4};" :: "l"(p), "r"(value.x), "r"(value.y), "r"(value.z), "r"(value.w) ); } template<> __device__ int dev_fun<int4>::first_element(const int4 &v){ return v.x; } template<> __device__ int dev_fun<int4>::reduce(const int4 &v){ return v.x ^ v.y ^ v.z ^ v.w; } template <class T, int blockdim, int stepwidth, int index_clamping> __global__ void benchmark_func(T * const g_data){ dev_fun<T> func; // Thread block-wise striding int index = stepwidth*blockIdx.x*blockdim + threadIdx.x; index = index_clamping==0 ? index : index % index_clamping; const int stride = blockdim; unsigned int offset = index; T temp = func.init(0); for(int j=0; j<TOTAL_ITERATIONS; j+=UNROLL_ITERATIONS){ // Pretend updating of offset in order to force repetitive loads offset = func(temp, offset); #ifndef TEX_LOADS union { const T *ptr; int2 i; } g_data_load_ptr = { g_data+offset }; #endif #pragma unroll for(int i=0; i<UNROLL_ITERATIONS; i++){ const unsigned int iteration_offset = i % stepwidth; const T v = func.load(g_data_load_ptr.ptr, iteration_offset*stride); // Pretend update of data pointer in order to force reloads g_data_load_ptr.i.x ^= func.reduce(v); temp = v; } } offset = func(temp, offset); if( offset != index ) // Does not actually occur *g_data = func.init(offset); } template<class datatype> void runbench_warmup(datatype *cd, long size){ const long reduced_grid_size = size/(UNROLL_ITERATIONS_MEM)/32; const int TOTAL_REDUCED_BLOCKS = reduced_grid_size/BLOCK_SIZE; dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimReducedGrid(TOTAL_REDUCED_BLOCKS, 1, 1); benchmark_func< datatype, BLOCK_SIZE, 1, 256 ><<< dimReducedGrid, dimBlock >>>(cd); CUDA_SAFE_CALL( cudaGetLastError() ); CUDA_SAFE_CALL( cudaDeviceSynchronize() ); } template<class datatype, int stepwidth, int index_clamping> double runbench(int total_blocks, datatype *cd, long size){ const long compute_grid_size = total_blocks*BLOCK_SIZE; const long data_size = ((index_clamping==0) ? compute_grid_size : min((int)compute_grid_size, (int)index_clamping))*stepwidth;//*(2-readonly); const long long total_iterations = (long long)(TOTAL_ITERATIONS)*compute_grid_size; const long long memoryoperations = total_iterations; // Set device memory CUDA_SAFE_CALL( cudaMemset(cd, 0, size*sizeof(datatype)) ); // initialize to zeros dim3 dimBlock(BLOCK_SIZE, 1, 1); dim3 dimGrid(total_blocks, 1, 1); cudaEvent_t start, stop; initializeEvents(&start, &stop); benchmark_func< datatype, BLOCK_SIZE, stepwidth, index_clamping ><<< dimGrid, dimBlock >>>(cd); float kernel_time = finalizeEvents(start, stop); double bandwidth = (static_cast<double>(memoryoperations)*sizeof(datatype))/kernel_time*1000./(1000.*1000.*1000.); int current_device; cudaDeviceProp deviceProp; CUDA_SAFE_CALL( cudaGetDevice(&current_device) ); CUDA_SAFE_CALL( cudaGetDeviceProperties(&deviceProp, current_device) ); return bandwidth; } template<class datatype> double cachebenchGPU(long size){ // Construct grid size cudaDeviceProp deviceProp; int current_device; CUDA_SAFE_CALL( cudaGetDevice(&current_device) ); CUDA_SAFE_CALL( cudaGetDeviceProperties(&deviceProp, current_device) ); const int SM_count = deviceProp.multiProcessorCount; const int Threads_per_SM = deviceProp.maxThreadsPerMultiProcessor; const int BLOCKS_PER_SM = Threads_per_SM/BLOCK_SIZE; const int TOTAL_BLOCKS = BLOCKS_PER_SM * SM_count; datatype *cd; CUDA_SAFE_CALL( cudaMalloc((void**)&cd, size*sizeof(datatype)) ); // Set device memory CUDA_SAFE_CALL( cudaMemset(cd, 0, size*sizeof(datatype)) ); // initialize to zeros // Bind textures to buffer cudaBindTexture(0, texdataI1, cd, size*sizeof(datatype)); cudaBindTexture(0, texdataI2, cd, size*sizeof(datatype)); cudaBindTexture(0, texdataI4, cd, size*sizeof(datatype)); // Synchronize in order to wait for memory operations to finish CUDA_SAFE_CALL( cudaDeviceSynchronize() ); runbench_warmup(cd, size); double peak_bw = 0.0; peak_bw = max( peak_bw, runbench<datatype, 1, 512>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 1, 1024>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 1, 2048>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 1, 4096>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 1, 8192>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 1, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 2, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 3, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 4, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 5, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 6, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 7, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 8, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 9, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 10, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 11, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 12, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 13, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 14, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 15, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 16, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 18, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 20, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 22, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 24, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 28, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 32, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 40, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 48, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 56, 0>(TOTAL_BLOCKS, cd, size) ); peak_bw = max( peak_bw, runbench<datatype, 64, 0>(TOTAL_BLOCKS, cd, size) ); // Copy results back to host memory (not needed) //CUDA_SAFE_CALL( cudaMemcpy(c, cd, size*sizeof(datatype), cudaMemcpyDeviceToHost) ); // Unbind textures cudaUnbindTexture(texdataI1); cudaUnbindTexture(texdataI2); cudaUnbindTexture(texdataI4); CUDA_SAFE_CALL( cudaFree(cd) ); return peak_bw; } generic_benchmark_result_list run_cache_benchmark(void) { constexpr unsigned int VECTOR_SIZE = 8*1024*1024; double peak_bw_ro_int1 = cachebenchGPU<int>(VECTOR_SIZE); double peak_bw_ro_int2 = cachebenchGPU<int2>(VECTOR_SIZE); double peak_bw_ro_int4 = cachebenchGPU<int4>(VECTOR_SIZE); generic_benchmark_result_list results; results["BW_L2"] = std::max( {peak_bw_ro_int1, peak_bw_ro_int2, peak_bw_ro_int4} ); return results; } }
f06f3fb236468bc91b815c64af3f3dd6f2f062e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void move_random_ai_kernel(int * to_coord, char * board, hiprandState_t* rand_states, char * valid_mv_map_internal){ int gm = blockIdx.x; int gm_offset = gm*MAP_SZ; COUNT_VALID // select random move int rand_ind = (hiprand(&rand_states[gm]) % (n_valid_mvs-1)) + 1; to_coord[gm] = valid_mv_inds[rand_ind]; DASSERT(to_coord[gm] >= 0 && to_coord[gm] < MAP_SZ && board[gm_offset + to_coord[gm]] == 0) } void move_random_ai_launcher(int * moving_player){ hipError_t err; REQ_INIT hipLaunchKernelGGL(( move_random_ai_kernel) , dim3(BATCH_SZ), dim3(1) , 0, 0, ai_to_coord, board, rand_states, valid_mv_map_internal); CHECK_CUDA_ERR move_unit_launcher(ai_to_coord, moving_player, moved_internal); VERIFY_BUFFER_INTEGRITY }
f06f3fb236468bc91b815c64af3f3dd6f2f062e3.cu
__global__ void move_random_ai_kernel(int * to_coord, char * board, curandState_t* rand_states, char * valid_mv_map_internal){ int gm = blockIdx.x; int gm_offset = gm*MAP_SZ; COUNT_VALID // select random move int rand_ind = (curand(&rand_states[gm]) % (n_valid_mvs-1)) + 1; to_coord[gm] = valid_mv_inds[rand_ind]; DASSERT(to_coord[gm] >= 0 && to_coord[gm] < MAP_SZ && board[gm_offset + to_coord[gm]] == 0) } void move_random_ai_launcher(int * moving_player){ cudaError_t err; REQ_INIT move_random_ai_kernel <<< BATCH_SZ, 1 >>> (ai_to_coord, board, rand_states, valid_mv_map_internal); CHECK_CUDA_ERR move_unit_launcher(ai_to_coord, moving_player, moved_internal); VERIFY_BUFFER_INTEGRITY }
cbc06a172e20e9aa05e4b790b517685f79377138.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <iostream> #include "paddle/fluid/operators/center_loss_op.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" namespace paddle { namespace operators { using phi::PADDLE_CUDA_NUM_THREADS; template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void ComputeDifferent(T *centers_diff, const T *X, const T *centers, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE(id >= 0, "Id should larger than 0 but received id: %d.", id); PADDLE_ENFORCE( id < N, "Id should smaller than %d but received id: %d.", N, id); T *out = centers_diff + idy * D; const T *x = X + idy * D; const T *cent = centers + id * D; for (int i = idx; i < D; i += BlockDimX) { out[i] = x[i] - cent[i]; } idy += BlockDimY * GridDimX; } } template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void UpdateCenters(T *centers, T *centers_diff, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D, const T *alpha) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; int count; while (idy < K) { int count = 1; int64_t id = ids[idy]; PADDLE_ENFORCE(id >= 0, "Id should larger than 0 but received id: %d.", id); PADDLE_ENFORCE( id < N, "Id should smaller than %d but received id: %d.", N, id); for (int i = 0; i < K; i++) { if (ids[i] == id) { count++; } } const T *diff = centers_diff + idy * D; T *cent = centers + id * D; for (int i = idx; i < D; i += BlockDimX) { phi::CudaAtomicAdd(&cent[i], alpha[0] * diff[i] / count); } idy += BlockDimY * GridDimX; } } template <typename T, typename DeviceContext> class CenterLossCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto &device_context = ctx.template device_context<DeviceContext>(); auto stream = device_context.stream(); auto *X = ctx.Input<phi::DenseTensor>("X"); // deep feature auto *labels = ctx.Input<phi::DenseTensor>("Label"); auto *centers = ctx.Input<phi::DenseTensor>("Centers"); auto *update_rate = ctx.Input<phi::DenseTensor>("CenterUpdateRate"); int cluster_num = ctx.Attr<int>("cluster_num"); auto *lr_center = update_rate->data<T>(); bool need_update = static_cast<T>(ctx.Attr<bool>("need_update")); auto x_data = X->data<T>(); auto label_data = labels->data<int64_t>(); auto x_dims = X->dims(); int batch_size = x_dims[0]; const int deep_feat_dim = x_dims[1]; auto *centers_diff = ctx.Output<phi::DenseTensor>("SampleCenterDiff"); auto centers_diff_data = centers_diff->mutable_data<T>(ctx.GetPlace()); auto centers_data = centers->data<T>(); auto centers_dim = centers->dims(); auto *out_loss = ctx.Output<phi::DenseTensor>("Loss"); auto loss_data = out_loss->mutable_data<T>(ctx.GetPlace()); auto *centers_out = ctx.Output<phi::DenseTensor>("CentersOut"); auto *centers_out_data = centers_out->mutable_data<T>(ctx.GetPlace()); auto ctx_place = ctx.GetPlace(); if (centers != centers_out) { framework::TensorCopy( *static_cast<const phi::DenseTensor *>(centers), ctx_place, *platform::DeviceContextPool::Instance().Get(ctx_place), static_cast<phi::DenseTensor *>(centers_out)); } int64_t numel = X->numel(); size_t N = centers->dims()[0]; size_t D = centers->dims()[1]; size_t K = labels->numel(); dim3 threads(128, 8); dim3 grids(8, 1); hipLaunchKernelGGL(( ComputeDifferent<T, 128, 8, 8>), dim3(grids), dim3(threads), 0, stream, centers_diff_data, x_data, centers_data, label_data, N, K, D); auto &place = *ctx.template device_context<DeviceContext>().eigen_device(); auto sub_result = EigenMatrix<T>::From(*centers_diff); auto sub_res_pow2 = (sub_result * sub_result) / T(2.0); auto z = EigenVector<T>::Flatten(*out_loss); z.device(place) = sub_res_pow2.sum(Eigen::array<int, 1>({{1}})); if (need_update) { hipLaunchKernelGGL(( UpdateCenters<T, 128, 8, 8>), dim3(grids), dim3(threads), 0, stream, centers_out_data, centers_diff_data, label_data, N, K, D, lr_center); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; PD_REGISTER_STRUCT_KERNEL( center_loss, GPU, ALL_LAYOUT, ops::CenterLossCUDAKernel, float, double) {} PD_REGISTER_STRUCT_KERNEL(center_loss_grad, GPU, ALL_LAYOUT, ops::CenterLossGradKernel, float, double) {}
cbc06a172e20e9aa05e4b790b517685f79377138.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <iostream> #include "paddle/fluid/operators/center_loss_op.h" #include "paddle/fluid/platform/device/gpu/gpu_info.h" #include "paddle/phi/backends/gpu/gpu_primitives.h" namespace paddle { namespace operators { using phi::PADDLE_CUDA_NUM_THREADS; template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void ComputeDifferent(T *centers_diff, const T *X, const T *centers, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; while (idy < K) { int64_t id = ids[idy]; PADDLE_ENFORCE(id >= 0, "Id should larger than 0 but received id: %d.", id); PADDLE_ENFORCE( id < N, "Id should smaller than %d but received id: %d.", N, id); T *out = centers_diff + idy * D; const T *x = X + idy * D; const T *cent = centers + id * D; for (int i = idx; i < D; i += BlockDimX) { out[i] = x[i] - cent[i]; } idy += BlockDimY * GridDimX; } } template <typename T, int BlockDimX, int BlockDimY, int GridDimX> __global__ void UpdateCenters(T *centers, T *centers_diff, const int64_t *ids, const int64_t N, const int64_t K, const int64_t D, const T *alpha) { int idx = threadIdx.x; int idy = blockIdx.x + threadIdx.y * GridDimX; int count; while (idy < K) { int count = 1; int64_t id = ids[idy]; PADDLE_ENFORCE(id >= 0, "Id should larger than 0 but received id: %d.", id); PADDLE_ENFORCE( id < N, "Id should smaller than %d but received id: %d.", N, id); for (int i = 0; i < K; i++) { if (ids[i] == id) { count++; } } const T *diff = centers_diff + idy * D; T *cent = centers + id * D; for (int i = idx; i < D; i += BlockDimX) { phi::CudaAtomicAdd(&cent[i], alpha[0] * diff[i] / count); } idy += BlockDimY * GridDimX; } } template <typename T, typename DeviceContext> class CenterLossCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { auto &device_context = ctx.template device_context<DeviceContext>(); auto stream = device_context.stream(); auto *X = ctx.Input<phi::DenseTensor>("X"); // deep feature auto *labels = ctx.Input<phi::DenseTensor>("Label"); auto *centers = ctx.Input<phi::DenseTensor>("Centers"); auto *update_rate = ctx.Input<phi::DenseTensor>("CenterUpdateRate"); int cluster_num = ctx.Attr<int>("cluster_num"); auto *lr_center = update_rate->data<T>(); bool need_update = static_cast<T>(ctx.Attr<bool>("need_update")); auto x_data = X->data<T>(); auto label_data = labels->data<int64_t>(); auto x_dims = X->dims(); int batch_size = x_dims[0]; const int deep_feat_dim = x_dims[1]; auto *centers_diff = ctx.Output<phi::DenseTensor>("SampleCenterDiff"); auto centers_diff_data = centers_diff->mutable_data<T>(ctx.GetPlace()); auto centers_data = centers->data<T>(); auto centers_dim = centers->dims(); auto *out_loss = ctx.Output<phi::DenseTensor>("Loss"); auto loss_data = out_loss->mutable_data<T>(ctx.GetPlace()); auto *centers_out = ctx.Output<phi::DenseTensor>("CentersOut"); auto *centers_out_data = centers_out->mutable_data<T>(ctx.GetPlace()); auto ctx_place = ctx.GetPlace(); if (centers != centers_out) { framework::TensorCopy( *static_cast<const phi::DenseTensor *>(centers), ctx_place, *platform::DeviceContextPool::Instance().Get(ctx_place), static_cast<phi::DenseTensor *>(centers_out)); } int64_t numel = X->numel(); size_t N = centers->dims()[0]; size_t D = centers->dims()[1]; size_t K = labels->numel(); dim3 threads(128, 8); dim3 grids(8, 1); ComputeDifferent<T, 128, 8, 8><<<grids, threads, 0, stream>>>( centers_diff_data, x_data, centers_data, label_data, N, K, D); auto &place = *ctx.template device_context<DeviceContext>().eigen_device(); auto sub_result = EigenMatrix<T>::From(*centers_diff); auto sub_res_pow2 = (sub_result * sub_result) / T(2.0); auto z = EigenVector<T>::Flatten(*out_loss); z.device(place) = sub_res_pow2.sum(Eigen::array<int, 1>({{1}})); if (need_update) { UpdateCenters<T, 128, 8, 8><<<grids, threads, 0, stream>>>( centers_out_data, centers_diff_data, label_data, N, K, D, lr_center); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; PD_REGISTER_STRUCT_KERNEL( center_loss, GPU, ALL_LAYOUT, ops::CenterLossCUDAKernel, float, double) {} PD_REGISTER_STRUCT_KERNEL(center_loss_grad, GPU, ALL_LAYOUT, ops::CenterLossGradKernel, float, double) {}
642e899c117b1bd2115a34cdfee14eeb77001d82.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * gpu_cuckoo.cu * * Created on: 21-05-2015 * Author: Karol Dzitkowski * * This code was created as my implementation of CUDPP algorithm * of cuckoo hashing found on: https://github.com/cudpp/cudpp * which I used as a model for this implementation */ #include "macros.h" #include "constants.h" #include "hash_function.cuh" #include "cuckoo_hash.hpp" #include "common_cuckoo_hash.cuh" template<unsigned N> __device__ int2 devRetrieveKey( const int2* hashMap, const int hashMap_size, const Constants<N> constants, const int stash_size, const int key ) { unsigned idx = hashFunction(constants.values[0], key, hashMap_size); int2 entry = hashMap[idx]; #pragma unroll for(unsigned i=1; i<N; ++i) { if(entry.x != key && entry.x != EMPTY_BUCKET_KEY) { idx = hashFunction(constants.values[i], key, hashMap_size); entry = hashMap[idx]; } } if(stash_size && entry.x != key) { const int2* stash = hashMap + hashMap_size; idx = hashFunction(constants.values[0], key, stash_size); entry = stash[idx]; } if(entry.x != key) { entry.x = EMPTY_BUCKET_KEY; entry.y = EMPTY_BUCKET_KEY; } return entry; } template<unsigned N> __global__ void retrieve( const int* keys, const int count, const int2* hashMap, const int hashMap_size, const Constants<N> constants, const int stash_size, int2* result) { unsigned long long int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if(idx >= count) return; result[idx] = devRetrieveKey<N>( hashMap, hashMap_size, constants, stash_size, keys[idx]); } template<unsigned N> __device__ unsigned next_loc_cuckoo( const Constants<N> constants, const int hashMap_size, const int key_value, const int last_loc) { unsigned locations[N]; #pragma unroll for (int i=0; i<N; ++i) locations[i] = hashFunction(constants.values[i], key_value, hashMap_size); unsigned next_location = locations[0]; #pragma unroll for (int i=N-2; i>=0; --i) { next_location = (last_loc == locations[i] ? locations[i+1] : next_location); } return next_location; } union entry { int2 value; unsigned long long hidden; }; template<unsigned N> __device__ bool devInsertElem( int2* hashMap, const int hashMap_size, const Constants<N> constants, const int stash_size, const int max_iters, int2 value) { unsigned idx = hashFunction(constants.values[0], value.x, hashMap_size); entry e; e.value = value; unsigned long long int* slot; for(unsigned i = 1; i <= max_iters; i++) { slot = reinterpret_cast<unsigned long long int*>(hashMap + idx); e.hidden = atomicExch(slot, e.hidden); if(e.value.x == EMPTY_BUCKET_KEY) break; idx = next_loc_cuckoo(constants, hashMap_size, e.value.x, idx); } if (e.value.x != EMPTY_BUCKET_KEY) { idx = hashFunction(constants.values[0], e.value.x, stash_size); slot = (unsigned long long int*)(hashMap + (hashMap_size + idx)); auto replaced = atomicCAS(slot, EMPTY_BUCKET, e.hidden); if (replaced != EMPTY_BUCKET) return true; } return false; } template<unsigned N> __global__ void insert( const int2* keys, const int count, int2* hashMap, const int hashMap_size, const Constants<N> constants, const int stash_size, const int max_iters, bool* failure) { unsigned long long int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if(idx >= count) return; bool result = devInsertElem<N>( hashMap, hashMap_size, constants, stash_size, max_iters, keys[idx]); if(result) *failure = true; } template<unsigned N> bool common_cuckooHash( int2* values, int in_size, int2* hashMap, int hashMap_size, Constants<N> constants, int stash_size) { auto grid = CuckooHash<N>::GetGrid(in_size); bool* d_result; bool h_result; CUDA_CALL( hipMalloc((void**)&d_result, sizeof(bool)) ); CUDA_CALL( hipMemset(d_result, 0, sizeof(bool)) ); int blockSize = CuckooHash<N>::DEFAULT_BLOCK_SIZE; int maxIters = MAX_RETRIES * N; hipLaunchKernelGGL(( insert<N>), dim3(grid), dim3(blockSize), 0, 0, values, in_size, hashMap, hashMap_size, constants, stash_size, maxIters, d_result); CUDA_CALL( hipMemcpy(&h_result, d_result, sizeof(bool), hipMemcpyDeviceToHost) ); CUDA_CALL( hipFree(d_result) ); return h_result; } template<unsigned N> int2* common_cuckooRetrieve( int* keys, int size, int2* hashMap, int hashMap_size, Constants<N> constants, int stash_size) { auto grid = CuckooHash<N>::GetGrid(size); int2* d_result; CUDA_CALL( hipMalloc((void**)&d_result, size*sizeof(int2)) ); int blockSize = CuckooHash<N>::DEFAULT_BLOCK_SIZE; hipLaunchKernelGGL(( retrieve<N>), dim3(grid), dim3(blockSize), 0, 0, keys, size, hashMap, hashMap_size, constants, stash_size, d_result); return d_result; } template bool common_cuckooHash<2>(int2*, int, int2*, int, Constants<2>, int); template bool common_cuckooHash<3>(int2*, int, int2*, int, Constants<3>, int); template bool common_cuckooHash<4>(int2*, int, int2*, int, Constants<4>, int); template bool common_cuckooHash<5>(int2*, int, int2*, int, Constants<5>, int); template int2* common_cuckooRetrieve<2>(int*, int, int2*, int, Constants<2>, int); template int2* common_cuckooRetrieve<3>(int*, int, int2*, int, Constants<3>, int); template int2* common_cuckooRetrieve<4>(int*, int, int2*, int, Constants<4>, int); template int2* common_cuckooRetrieve<5>(int*, int, int2*, int, Constants<5>, int);
642e899c117b1bd2115a34cdfee14eeb77001d82.cu
/* * gpu_cuckoo.cu * * Created on: 21-05-2015 * Author: Karol Dzitkowski * * This code was created as my implementation of CUDPP algorithm * of cuckoo hashing found on: https://github.com/cudpp/cudpp * which I used as a model for this implementation */ #include "macros.h" #include "constants.h" #include "hash_function.cuh" #include "cuckoo_hash.hpp" #include "common_cuckoo_hash.cuh" template<unsigned N> __device__ int2 devRetrieveKey( const int2* hashMap, const int hashMap_size, const Constants<N> constants, const int stash_size, const int key ) { unsigned idx = hashFunction(constants.values[0], key, hashMap_size); int2 entry = hashMap[idx]; #pragma unroll for(unsigned i=1; i<N; ++i) { if(entry.x != key && entry.x != EMPTY_BUCKET_KEY) { idx = hashFunction(constants.values[i], key, hashMap_size); entry = hashMap[idx]; } } if(stash_size && entry.x != key) { const int2* stash = hashMap + hashMap_size; idx = hashFunction(constants.values[0], key, stash_size); entry = stash[idx]; } if(entry.x != key) { entry.x = EMPTY_BUCKET_KEY; entry.y = EMPTY_BUCKET_KEY; } return entry; } template<unsigned N> __global__ void retrieve( const int* keys, const int count, const int2* hashMap, const int hashMap_size, const Constants<N> constants, const int stash_size, int2* result) { unsigned long long int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if(idx >= count) return; result[idx] = devRetrieveKey<N>( hashMap, hashMap_size, constants, stash_size, keys[idx]); } template<unsigned N> __device__ unsigned next_loc_cuckoo( const Constants<N> constants, const int hashMap_size, const int key_value, const int last_loc) { unsigned locations[N]; #pragma unroll for (int i=0; i<N; ++i) locations[i] = hashFunction(constants.values[i], key_value, hashMap_size); unsigned next_location = locations[0]; #pragma unroll for (int i=N-2; i>=0; --i) { next_location = (last_loc == locations[i] ? locations[i+1] : next_location); } return next_location; } union entry { int2 value; unsigned long long hidden; }; template<unsigned N> __device__ bool devInsertElem( int2* hashMap, const int hashMap_size, const Constants<N> constants, const int stash_size, const int max_iters, int2 value) { unsigned idx = hashFunction(constants.values[0], value.x, hashMap_size); entry e; e.value = value; unsigned long long int* slot; for(unsigned i = 1; i <= max_iters; i++) { slot = reinterpret_cast<unsigned long long int*>(hashMap + idx); e.hidden = atomicExch(slot, e.hidden); if(e.value.x == EMPTY_BUCKET_KEY) break; idx = next_loc_cuckoo(constants, hashMap_size, e.value.x, idx); } if (e.value.x != EMPTY_BUCKET_KEY) { idx = hashFunction(constants.values[0], e.value.x, stash_size); slot = (unsigned long long int*)(hashMap + (hashMap_size + idx)); auto replaced = atomicCAS(slot, EMPTY_BUCKET, e.hidden); if (replaced != EMPTY_BUCKET) return true; } return false; } template<unsigned N> __global__ void insert( const int2* keys, const int count, int2* hashMap, const int hashMap_size, const Constants<N> constants, const int stash_size, const int max_iters, bool* failure) { unsigned long long int idx = threadIdx.x + blockIdx.x * blockDim.x + blockIdx.y * blockDim.x * gridDim.x; if(idx >= count) return; bool result = devInsertElem<N>( hashMap, hashMap_size, constants, stash_size, max_iters, keys[idx]); if(result) *failure = true; } template<unsigned N> bool common_cuckooHash( int2* values, int in_size, int2* hashMap, int hashMap_size, Constants<N> constants, int stash_size) { auto grid = CuckooHash<N>::GetGrid(in_size); bool* d_result; bool h_result; CUDA_CALL( cudaMalloc((void**)&d_result, sizeof(bool)) ); CUDA_CALL( cudaMemset(d_result, 0, sizeof(bool)) ); int blockSize = CuckooHash<N>::DEFAULT_BLOCK_SIZE; int maxIters = MAX_RETRIES * N; insert<N><<<grid, blockSize>>>( values, in_size, hashMap, hashMap_size, constants, stash_size, maxIters, d_result); CUDA_CALL( cudaMemcpy(&h_result, d_result, sizeof(bool), cudaMemcpyDeviceToHost) ); CUDA_CALL( cudaFree(d_result) ); return h_result; } template<unsigned N> int2* common_cuckooRetrieve( int* keys, int size, int2* hashMap, int hashMap_size, Constants<N> constants, int stash_size) { auto grid = CuckooHash<N>::GetGrid(size); int2* d_result; CUDA_CALL( cudaMalloc((void**)&d_result, size*sizeof(int2)) ); int blockSize = CuckooHash<N>::DEFAULT_BLOCK_SIZE; retrieve<N><<<grid, blockSize>>>( keys, size, hashMap, hashMap_size, constants, stash_size, d_result); return d_result; } template bool common_cuckooHash<2>(int2*, int, int2*, int, Constants<2>, int); template bool common_cuckooHash<3>(int2*, int, int2*, int, Constants<3>, int); template bool common_cuckooHash<4>(int2*, int, int2*, int, Constants<4>, int); template bool common_cuckooHash<5>(int2*, int, int2*, int, Constants<5>, int); template int2* common_cuckooRetrieve<2>(int*, int, int2*, int, Constants<2>, int); template int2* common_cuckooRetrieve<3>(int*, int, int2*, int, Constants<3>, int); template int2* common_cuckooRetrieve<4>(int*, int, int2*, int, Constants<4>, int); template int2* common_cuckooRetrieve<5>(int*, int, int2*, int, Constants<5>, int);
260f6147c3cb85ebe4a91477761f847ee5e6cee9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> s d c */ #include <stdio.h> #include "common_magma.h" #define zgemv_bs 32 extern __shared__ magmaDoubleComplex shared_data[]; __global__ void kernel_zgemvn_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A = A_array[blockIdx.x]; magmaDoubleComplex *x = x_array[blockIdx.x]; magmaDoubleComplex *y = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; magmaDoubleComplex *buff = (magmaDoubleComplex*)shared_data; if(tx < n) { buff[tx] = x[tx*incx]; } __syncthreads(); if(tx < m ) { for(int j=0; j < n ; j++) { res += A[tx]*buff[j]; A += lda; } y[tx*incy] = alpha * res + y[tx*incy] * beta; } } /* Matrix Non-transpose Vector Multiplication y := alpha*A*x + beta*y, */ extern "C" void magmablas_zgemvn_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { if( m > 512 || n > 512) { fprintf( stderr, "m=%d, n=%d, zgemv_batched nontranspose assume row && column lower than %d. Plz call magmablas_zgemv instead", m, n, 512); return ; } dim3 grid(batchCount, 1, 1); dim3 threads(max(m,n), 1, 1); hipLaunchKernelGGL(( kernel_zgemvn_batched), dim3(grid), dim3(threads), n * sizeof(magmaDoubleComplex) , 0, m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } __global__ void kernel_zgemvt_batched( int m, int n, int m1, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A_ptr = A_array[blockIdx.x]; magmaDoubleComplex *x_ptr = x_array[blockIdx.x]; magmaDoubleComplex *y_ptr = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; if(tx<m) { A_ptr += lda * blockIdx.y + tx; x_ptr += tx * incx; } __shared__ magmaDoubleComplex sdata[zgemv_bs]; for(int i=0; i<m1; i+= zgemv_bs) { res += A_ptr[i] * x_ptr[i*incx]; } if(m > m1) { if( tx + m1 < m ) { res += A_ptr[m1] * x_ptr[m1*incx]; } else { res = res; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s>32;s>>=1) { if(tx<s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if(zgemv_bs > 32) { if(tx<32) { sdata[tx] += sdata[tx+32]; } } if(tx == 0) { for(int i=1;i<32;i++) { sdata[tx] += sdata[tx + i]; } y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy]; } } /* Matrix Transpose Vector Multiplication y := alpha*A'*x + beta*y, */ extern "C" void magmablas_zgemvt_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { dim3 grid(batchCount, n, 1); dim3 threads(zgemv_bs, 1, 1); int m1 = (m / zgemv_bs) * zgemv_bs; hipLaunchKernelGGL(( kernel_zgemvt_batched) , dim3(grid), dim3(threads) , 0, 0, m, n, m1, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } #if defined(PRECISION_z) || defined (PRECISION_c) __global__ void kernel_zgemvc_batched( int m, int n, int m1, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A_ptr = A_array[blockIdx.x]; magmaDoubleComplex *x_ptr = x_array[blockIdx.x]; magmaDoubleComplex *y_ptr = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; if(tx<m) { A_ptr += lda * blockIdx.y + tx; x_ptr += tx * incx; } __shared__ magmaDoubleComplex sdata[zgemv_bs]; for(int i=0; i<m1; i+= zgemv_bs) { res += MAGMA_Z_CNJG (A_ptr[i]) * x_ptr[i*incx]; } if(m > m1) { if( tx + m1 < m ) { res += MAGMA_Z_CNJG(A_ptr[m1]) * x_ptr[m1*incx]; } else { res = res; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s>32;s>>=1) { if(tx<s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if(zgemv_bs > 32) { if(tx<32) { sdata[tx] += sdata[tx+32]; } } if(tx == 0) { for(int i=1;i<32;i++) { sdata[tx] += sdata[tx + i]; } y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy]; } } /* Matrix Conjugate Transpose Vector Multiplication y := alpha*conjg(A')*x + beta*y, */ extern "C" void magmablas_zgemvc_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { dim3 grid(batchCount, n, 1); dim3 threads(zgemv_bs, 1, 1); int m1 = (m / zgemv_bs) * zgemv_bs; hipLaunchKernelGGL(( kernel_zgemvc_batched) , dim3(grid), dim3(threads) , 0, 0, m, n, m1, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } #endif // defined(PRECISION_z) || defined (PRECISION_c) /** Purpose ------- This routine computes Y = alpha opt(A) x + beta y, on the GPU, where A = A_array[i],x = x_array[i] and y = y_array[i], i=[0,batchCount-1]. This is a batched version. @param[in] trans CHARACTER*1. On entry, TRANS specifies the form of op( A ) to be used in the matrix multiplication as follows: = 'N': op( A ) = A. = 'T': op( A ) = A**T. = 'C': op( A ) = A**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix opt(A). @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix opt(A) @param[in] alpha COMPLEX*16. On entry, ALPHA specifies the scalar alpha. @param[in] A_array A = A_array[i] A: COMPLEX*16 array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in] x_array x = x_array[i] x: COMPLEX*16 array of dimension n. @param[in] beta DOUBLE PRECISION. On entry, BETA specifies the scalar beta. @param[out] y_array y = y_array[i]: y: COMPLEX*16 array of dimension n. On exit y = alpha opt(A) x + beta y. @param[in] batchCount INTEGER number of pointers contained in A_array, x_array and y_array. @ingroup magma_zblas2 ******************************************************************* */ extern "C" void magmablas_zgemv_batched( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, magma_int_t lda, magmaDoubleComplex **x_array, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, magma_int_t incy, magma_int_t batchCount) { if ( trans == MagmaNoTrans ) { magmablas_zgemvn_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); } else if ( trans == MagmaTrans ) { magmablas_zgemvt_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); } else if ( trans == MagmaConjTrans ) { #if defined(PRECISION_z) || defined (PRECISION_c) magmablas_zgemvc_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); #else magmablas_zgemvt_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); #endif } else { fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) ); } } #undef zgemv_bs
260f6147c3cb85ebe4a91477761f847ee5e6cee9.cu
/* -- MAGMA (version 1.5.0-beta3) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date July 2014 @precisions normal z -> s d c */ #include <stdio.h> #include "common_magma.h" #define zgemv_bs 32 extern __shared__ magmaDoubleComplex shared_data[]; __global__ void kernel_zgemvn_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A = A_array[blockIdx.x]; magmaDoubleComplex *x = x_array[blockIdx.x]; magmaDoubleComplex *y = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; magmaDoubleComplex *buff = (magmaDoubleComplex*)shared_data; if(tx < n) { buff[tx] = x[tx*incx]; } __syncthreads(); if(tx < m ) { for(int j=0; j < n ; j++) { res += A[tx]*buff[j]; A += lda; } y[tx*incy] = alpha * res + y[tx*incy] * beta; } } /* Matrix Non-transpose Vector Multiplication y := alpha*A*x + beta*y, */ extern "C" void magmablas_zgemvn_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { if( m > 512 || n > 512) { fprintf( stderr, "m=%d, n=%d, zgemv_batched nontranspose assume row && column lower than %d. Plz call magmablas_zgemv instead", m, n, 512); return ; } dim3 grid(batchCount, 1, 1); dim3 threads(max(m,n), 1, 1); kernel_zgemvn_batched<<< grid, threads, n * sizeof(magmaDoubleComplex) >>>( m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } __global__ void kernel_zgemvt_batched( int m, int n, int m1, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A_ptr = A_array[blockIdx.x]; magmaDoubleComplex *x_ptr = x_array[blockIdx.x]; magmaDoubleComplex *y_ptr = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; if(tx<m) { A_ptr += lda * blockIdx.y + tx; x_ptr += tx * incx; } __shared__ magmaDoubleComplex sdata[zgemv_bs]; for(int i=0; i<m1; i+= zgemv_bs) { res += A_ptr[i] * x_ptr[i*incx]; } if(m > m1) { if( tx + m1 < m ) { res += A_ptr[m1] * x_ptr[m1*incx]; } else { res = res; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s>32;s>>=1) { if(tx<s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if(zgemv_bs > 32) { if(tx<32) { sdata[tx] += sdata[tx+32]; } } if(tx == 0) { for(int i=1;i<32;i++) { sdata[tx] += sdata[tx + i]; } y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy]; } } /* Matrix Transpose Vector Multiplication y := alpha*A'*x + beta*y, */ extern "C" void magmablas_zgemvt_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { dim3 grid(batchCount, n, 1); dim3 threads(zgemv_bs, 1, 1); int m1 = (m / zgemv_bs) * zgemv_bs; kernel_zgemvt_batched <<< grid, threads >>>(m, n, m1, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } #if defined(PRECISION_z) || defined (PRECISION_c) __global__ void kernel_zgemvc_batched( int m, int n, int m1, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy) { magmaDoubleComplex *A_ptr = A_array[blockIdx.x]; magmaDoubleComplex *x_ptr = x_array[blockIdx.x]; magmaDoubleComplex *y_ptr = y_array[blockIdx.x]; int tx = threadIdx.x; magmaDoubleComplex res = MAGMA_Z_ZERO; if(tx<m) { A_ptr += lda * blockIdx.y + tx; x_ptr += tx * incx; } __shared__ magmaDoubleComplex sdata[zgemv_bs]; for(int i=0; i<m1; i+= zgemv_bs) { res += MAGMA_Z_CNJG (A_ptr[i]) * x_ptr[i*incx]; } if(m > m1) { if( tx + m1 < m ) { res += MAGMA_Z_CNJG(A_ptr[m1]) * x_ptr[m1*incx]; } else { res = res; } } sdata[tx] = res; __syncthreads(); for(int s=blockDim.x/2; s>32;s>>=1) { if(tx<s) { sdata[tx] += sdata[tx+s]; } __syncthreads(); } if(zgemv_bs > 32) { if(tx<32) { sdata[tx] += sdata[tx+32]; } } if(tx == 0) { for(int i=1;i<32;i++) { sdata[tx] += sdata[tx + i]; } y_ptr[blockIdx.y * incy] = sdata[0] * alpha + beta * y_ptr[blockIdx.y*incy]; } } /* Matrix Conjugate Transpose Vector Multiplication y := alpha*conjg(A')*x + beta*y, */ extern "C" void magmablas_zgemvc_batched( int m, int n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, int lda, magmaDoubleComplex **x_array, int incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, int incy, int batchCount) { dim3 grid(batchCount, n, 1); dim3 threads(zgemv_bs, 1, 1); int m1 = (m / zgemv_bs) * zgemv_bs; kernel_zgemvc_batched <<< grid, threads >>>(m, n, m1, alpha, A_array, lda, x_array, incx, beta, y_array, incy); } #endif // defined(PRECISION_z) || defined (PRECISION_c) /** Purpose ------- This routine computes Y = alpha opt(A) x + beta y, on the GPU, where A = A_array[i],x = x_array[i] and y = y_array[i], i=[0,batchCount-1]. This is a batched version. @param[in] trans CHARACTER*1. On entry, TRANS specifies the form of op( A ) to be used in the matrix multiplication as follows: = 'N': op( A ) = A. = 'T': op( A ) = A**T. = 'C': op( A ) = A**H. @param[in] m INTEGER. On entry, M specifies the number of rows of the matrix opt(A). @param[in] n INTEGER. On entry, N specifies the number of columns of the matrix opt(A) @param[in] alpha COMPLEX*16. On entry, ALPHA specifies the scalar alpha. @param[in] A_array A = A_array[i] A: COMPLEX*16 array of dimension ( LDA, n ) on the GPU. @param[in] lda INTEGER. LDA specifies the leading dimension of A. @param[in] x_array x = x_array[i] x: COMPLEX*16 array of dimension n. @param[in] beta DOUBLE PRECISION. On entry, BETA specifies the scalar beta. @param[out] y_array y = y_array[i]: y: COMPLEX*16 array of dimension n. On exit y = alpha opt(A) x + beta y. @param[in] batchCount INTEGER number of pointers contained in A_array, x_array and y_array. @ingroup magma_zblas2 ******************************************************************* */ extern "C" void magmablas_zgemv_batched( magma_trans_t trans, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex **A_array, magma_int_t lda, magmaDoubleComplex **x_array, magma_int_t incx, magmaDoubleComplex beta, magmaDoubleComplex **y_array, magma_int_t incy, magma_int_t batchCount) { if ( trans == MagmaNoTrans ) { magmablas_zgemvn_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); } else if ( trans == MagmaTrans ) { magmablas_zgemvt_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); } else if ( trans == MagmaConjTrans ) { #if defined(PRECISION_z) || defined (PRECISION_c) magmablas_zgemvc_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); #else magmablas_zgemvt_batched(m, n, alpha, A_array, lda, x_array, incx, beta, y_array, incy, batchCount); #endif } else { fprintf( stderr, "trans = %c is invalid\n", lapacke_trans_const(trans) ); } } #undef zgemv_bs
3eae6a34c416cf3547aebb6703217bd2985bdc67.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <assert.h> #include <hipcub/hipcub.hpp> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/cross_entropy_op.h" #include "caffe2/operators/operator_fallback_gpu.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float log_threshold, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D); Ydata[i] = -logf(max(Xdata[i * D + labeldata[i]], log_threshold)); } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float* dYdata, const float log_threshold, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = - dYdata[i] / max(Xdata[idx], log_threshold); } } } // namespace template <> bool LabelCrossEntropyOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto* Y = Output(0); int N, D; if (X.ndim() > 1) { N = X.dim32(0); D = X.size_from_dim(1); } else { N = 1; D = X.dim32(0); } CAFFE_ENFORCE( (label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1)); CAFFE_ENFORCE_EQ(label.dim32(0), N); Y->Resize(vector<TIndex>(size_t(1), N)); hipLaunchKernelGGL(( LabelCrossEntropyKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), label.data<int>(), kLOG_THRESHOLD(), Y->mutable_data<float>()); return true; } template <> bool LabelCrossEntropyGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto& dY = Input(2); auto* dX = Output(0); int N, D; if (X.ndim() > 1) { N = X.dim32(0); D = X.size_from_dim(1); } else { N = 1; D = X.dim32(0); } CAFFE_ENFORCE( (label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1)); CAFFE_ENFORCE_EQ(label.dim32(0), N); CAFFE_ENFORCE_EQ(dY.ndim(), 1); CAFFE_ENFORCE_EQ(dY.dim32(0), N); dX->ResizeLike(X); math::Set<float, CUDAContext>( dX->size(), 0.f, dX->mutable_data<float>(), &context_); hipLaunchKernelGGL(( LabelCrossEntropyGradientKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, D, X.data<float>(), label.data<int>(), dY.data<float>(), kLOG_THRESHOLD(), dX->mutable_data<float>()); return true; } namespace { __global__ void MakeTwoClassKernel( const int N, const float* Xdata, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { Ydata[i * 2] = 1.0 - Xdata[i]; Ydata[i * 2 + 1] = Xdata[i]; } } __global__ void MakeTwoClassGradientKernel( const int N, const float* dYdata, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { dXdata[i] = dYdata[i * 2 + 1] - dYdata[i * 2]; } } } // namespace template <> bool MakeTwoClassOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); auto shape = X.dims(); shape.push_back(2); CAFFE_ENFORCE_LT(X.size(), std::numeric_limits<int>::max() / 2); Y->Resize(shape); int N = X.size(); hipLaunchKernelGGL(( MakeTwoClassKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, X.data<float>(), Y->mutable_data<float>()); return true; } template <> bool MakeTwoClassGradientOp<float, CUDAContext>::RunOnDevice() { auto& dY = Input(0); auto* dX = Output(0); auto shape = dY.dims(); CAFFE_ENFORCE_GE(shape.size(), 1); CAFFE_ENFORCE_EQ(shape.back(), 2); shape.pop_back(); CAFFE_ENFORCE_LT(dY.size(), std::numeric_limits<int>::max()); dX->Resize(shape); int N = dX->size(); hipLaunchKernelGGL(( MakeTwoClassGradientKernel), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, dY.data<float>(), dX->mutable_data<float>()); return true; } namespace { __device__ float sigmoid_xent_forward(float lgt, float tgt) { return lgt * (tgt - (lgt >= 0)) - log(1 + exp(lgt - 2 * lgt * (lgt >= 0))); } __device__ float sigmoid_xent_backward(float lgt, float tgt) { return tgt - 1. / (1. + exp(-lgt)); } __global__ void SigmoidCrossEntropyWithLogitsKernel( const int outer_size, const int inner_size, const float* logits_ptr, const float* targets_ptr, float* out_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float value = 0; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]); } typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { out_ptr[i] = -sum / inner_size; } } __global__ void SigmoidCrossEntropyGradientWithLogitsKernel( const int outer_size, const int inner_size, const float* g_ptr, const float* logits_ptr, const float* targets_ptr, float* out_ptr) { CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) { int i = in_idx / inner_size; auto g_factor = -g_ptr[i] / inner_size; out_ptr[in_idx] = g_factor * sigmoid_xent_backward(logits_ptr[in_idx], targets_ptr[in_idx]); } } } // namespace template <> bool SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>::RunOnDevice() { auto& logits = Input(0); auto& targets = Input(1); CAFFE_ENFORCE(logits.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; auto* out = Output(0); if (logits.ndim() == 0) { out->Resize(std::vector<TIndex>{}); } else { std::vector<TIndex> dims(logits.dims().begin(), logits.dims().end() - 1); out->Resize(dims); } auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); hipLaunchKernelGGL(( SigmoidCrossEntropyWithLogitsKernel), dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), outer_size, inner_size, logits_ptr, targets_ptr, out_ptr); return true; } template <> bool SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>:: RunOnDevice() { auto& g = Input(0); auto& logits = Input(1); auto& targets = Input(2); CAFFE_ENFORCE(logits.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; CAFFE_ENFORCE(g.size() == outer_size); auto* out = Output(0); out->ResizeLike(logits); auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); auto* g_ptr = g.data<float>(); hipLaunchKernelGGL(( SigmoidCrossEntropyGradientWithLogitsKernel), dim3(CAFFE_GET_BLOCKS(outer_size * inner_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), outer_size, inner_size, g_ptr, logits_ptr, targets_ptr, out_ptr); return true; } namespace { __global__ void WeightedSigmoidCrossEntropyWithLogitsKernel( const int outer_size, const int inner_size, const float* logits_ptr, const float* targets_ptr, const float* weights_ptr, float* out_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float value = 0; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]) * weights_ptr[in_idx]; } typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { out_ptr[i] = -sum / inner_size; } } __global__ void WeightedSigmoidCrossEntropyGradientWithLogitsKernel( const int outer_size, const int inner_size, const float* g_ptr, const float* logits_ptr, const float* targets_ptr, const float* weights_ptr, float* out_ptr) { CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) { int i = in_idx / inner_size; auto g_factor = -g_ptr[i] / inner_size; out_ptr[in_idx] = g_factor * sigmoid_xent_backward(logits_ptr[in_idx], targets_ptr[in_idx]) * weights_ptr[in_idx]; } } } // namespace template <> bool WeightedSigmoidCrossEntropyWithLogitsOp<float, CUDAContext>:: RunOnDevice() { auto& logits = Input(0); auto& targets = Input(1); auto& weights = Input(2); CAFFE_ENFORCE(logits.dims() == targets.dims()); CAFFE_ENFORCE(weights.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; auto* out = Output(0); if (logits.ndim() == 0) { out->Resize(std::vector<TIndex>{}); } else { std::vector<TIndex> dims(logits.dims().begin(), logits.dims().end() - 1); out->Resize(dims); } auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); auto* weights_ptr = weights.data<float>(); hipLaunchKernelGGL(( WeightedSigmoidCrossEntropyWithLogitsKernel), dim3(outer_size), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), outer_size, inner_size, logits_ptr, targets_ptr, weights_ptr, out_ptr); return true; } template <> bool WeightedSigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>:: RunOnDevice() { auto& g = Input(0); auto& logits = Input(1); auto& targets = Input(2); auto& weights = Input(3); CAFFE_ENFORCE(logits.dims() == targets.dims()); CAFFE_ENFORCE(weights.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; CAFFE_ENFORCE(g.size() == outer_size); auto* out = Output(0); out->ResizeLike(logits); auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); auto* weights_ptr = weights.data<float>(); auto* g_ptr = g.data<float>(); hipLaunchKernelGGL(( WeightedSigmoidCrossEntropyGradientWithLogitsKernel), dim3(CAFFE_GET_BLOCKS(outer_size * inner_size)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), outer_size, inner_size, g_ptr, logits_ptr, targets_ptr, weights_ptr, out_ptr); return true; } REGISTER_CUDA_OPERATOR(LabelCrossEntropy, LabelCrossEntropyOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LabelCrossEntropyGradient, LabelCrossEntropyGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyWithLogits, SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyWithLogitsGradient, SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( WeightedSigmoidCrossEntropyWithLogits, WeightedSigmoidCrossEntropyWithLogitsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( WeightedSigmoidCrossEntropyWithLogitsGradient, WeightedSigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClass, MakeTwoClassOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClassGradient, MakeTwoClassGradientOp<float, CUDAContext>); //TODO(surya) Add full GPU/CUDA support for the CrossEntropyOp REGISTER_CUDA_OPERATOR(CrossEntropy, GPUFallbackOp<CrossEntropyOp<float, CPUContext>>); REGISTER_CUDA_OPERATOR(CrossEntropyGradient, GPUFallbackOp<CrossEntropyGradientOp<float, CPUContext>>); } // namespace caffe2
3eae6a34c416cf3547aebb6703217bd2985bdc67.cu
#include <assert.h> #include <cub/block/block_reduce.cuh> #include "caffe2/core/context_gpu.h" #include "caffe2/operators/cross_entropy_op.h" #include "caffe2/operators/operator_fallback_gpu.h" namespace caffe2 { namespace { __global__ void LabelCrossEntropyKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float log_threshold, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { CUDA_KERNEL_ASSERT(labeldata[i] >= 0 && labeldata[i] < D); Ydata[i] = -logf(max(Xdata[i * D + labeldata[i]], log_threshold)); } } __global__ void LabelCrossEntropyGradientKernel( const int N, const int D, const float* Xdata, const int* labeldata, const float* dYdata, const float log_threshold, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { int idx = i * D + labeldata[i]; dXdata[idx] = - dYdata[i] / max(Xdata[idx], log_threshold); } } } // namespace template <> bool LabelCrossEntropyOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto* Y = Output(0); int N, D; if (X.ndim() > 1) { N = X.dim32(0); D = X.size_from_dim(1); } else { N = 1; D = X.dim32(0); } CAFFE_ENFORCE( (label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1)); CAFFE_ENFORCE_EQ(label.dim32(0), N); Y->Resize(vector<TIndex>(size_t(1), N)); LabelCrossEntropyKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, X.data<float>(), label.data<int>(), kLOG_THRESHOLD(), Y->mutable_data<float>()); return true; } template <> bool LabelCrossEntropyGradientOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto& label = Input(1); auto& dY = Input(2); auto* dX = Output(0); int N, D; if (X.ndim() > 1) { N = X.dim32(0); D = X.size_from_dim(1); } else { N = 1; D = X.dim32(0); } CAFFE_ENFORCE( (label.ndim() == 1) || (label.ndim() == 2 && label.dim32(1) == 1)); CAFFE_ENFORCE_EQ(label.dim32(0), N); CAFFE_ENFORCE_EQ(dY.ndim(), 1); CAFFE_ENFORCE_EQ(dY.dim32(0), N); dX->ResizeLike(X); math::Set<float, CUDAContext>( dX->size(), 0.f, dX->mutable_data<float>(), &context_); LabelCrossEntropyGradientKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, D, X.data<float>(), label.data<int>(), dY.data<float>(), kLOG_THRESHOLD(), dX->mutable_data<float>()); return true; } namespace { __global__ void MakeTwoClassKernel( const int N, const float* Xdata, float* Ydata) { CUDA_1D_KERNEL_LOOP(i, N) { Ydata[i * 2] = 1.0 - Xdata[i]; Ydata[i * 2 + 1] = Xdata[i]; } } __global__ void MakeTwoClassGradientKernel( const int N, const float* dYdata, float* dXdata) { CUDA_1D_KERNEL_LOOP(i, N) { dXdata[i] = dYdata[i * 2 + 1] - dYdata[i * 2]; } } } // namespace template <> bool MakeTwoClassOp<float, CUDAContext>::RunOnDevice() { auto& X = Input(0); auto* Y = Output(0); auto shape = X.dims(); shape.push_back(2); CAFFE_ENFORCE_LT(X.size(), std::numeric_limits<int>::max() / 2); Y->Resize(shape); int N = X.size(); MakeTwoClassKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, X.data<float>(), Y->mutable_data<float>()); return true; } template <> bool MakeTwoClassGradientOp<float, CUDAContext>::RunOnDevice() { auto& dY = Input(0); auto* dX = Output(0); auto shape = dY.dims(); CAFFE_ENFORCE_GE(shape.size(), 1); CAFFE_ENFORCE_EQ(shape.back(), 2); shape.pop_back(); CAFFE_ENFORCE_LT(dY.size(), std::numeric_limits<int>::max()); dX->Resize(shape); int N = dX->size(); MakeTwoClassGradientKernel<<<CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, dY.data<float>(), dX->mutable_data<float>()); return true; } namespace { __device__ float sigmoid_xent_forward(float lgt, float tgt) { return lgt * (tgt - (lgt >= 0)) - log(1 + exp(lgt - 2 * lgt * (lgt >= 0))); } __device__ float sigmoid_xent_backward(float lgt, float tgt) { return tgt - 1. / (1. + exp(-lgt)); } __global__ void SigmoidCrossEntropyWithLogitsKernel( const int outer_size, const int inner_size, const float* logits_ptr, const float* targets_ptr, float* out_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float value = 0; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]); } typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { out_ptr[i] = -sum / inner_size; } } __global__ void SigmoidCrossEntropyGradientWithLogitsKernel( const int outer_size, const int inner_size, const float* g_ptr, const float* logits_ptr, const float* targets_ptr, float* out_ptr) { CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) { int i = in_idx / inner_size; auto g_factor = -g_ptr[i] / inner_size; out_ptr[in_idx] = g_factor * sigmoid_xent_backward(logits_ptr[in_idx], targets_ptr[in_idx]); } } } // namespace template <> bool SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>::RunOnDevice() { auto& logits = Input(0); auto& targets = Input(1); CAFFE_ENFORCE(logits.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; auto* out = Output(0); if (logits.ndim() == 0) { out->Resize(std::vector<TIndex>{}); } else { std::vector<TIndex> dims(logits.dims().begin(), logits.dims().end() - 1); out->Resize(dims); } auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); SigmoidCrossEntropyWithLogitsKernel<<< outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( outer_size, inner_size, logits_ptr, targets_ptr, out_ptr); return true; } template <> bool SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>:: RunOnDevice() { auto& g = Input(0); auto& logits = Input(1); auto& targets = Input(2); CAFFE_ENFORCE(logits.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; CAFFE_ENFORCE(g.size() == outer_size); auto* out = Output(0); out->ResizeLike(logits); auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); auto* g_ptr = g.data<float>(); SigmoidCrossEntropyGradientWithLogitsKernel<<< CAFFE_GET_BLOCKS(outer_size * inner_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( outer_size, inner_size, g_ptr, logits_ptr, targets_ptr, out_ptr); return true; } namespace { __global__ void WeightedSigmoidCrossEntropyWithLogitsKernel( const int outer_size, const int inner_size, const float* logits_ptr, const float* targets_ptr, const float* weights_ptr, float* out_ptr) { int i = blockIdx.x; int last_idx = (i + 1) * inner_size; float value = 0; for (int in_idx = i * inner_size + threadIdx.x; in_idx < last_idx; in_idx += blockDim.x) { value += sigmoid_xent_forward(logits_ptr[in_idx], targets_ptr[in_idx]) * weights_ptr[in_idx]; } typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ typename BlockReduce::TempStorage temp_storage; float sum = BlockReduce(temp_storage).Sum(value); if (threadIdx.x == 0) { out_ptr[i] = -sum / inner_size; } } __global__ void WeightedSigmoidCrossEntropyGradientWithLogitsKernel( const int outer_size, const int inner_size, const float* g_ptr, const float* logits_ptr, const float* targets_ptr, const float* weights_ptr, float* out_ptr) { CUDA_1D_KERNEL_LOOP(in_idx, outer_size * inner_size) { int i = in_idx / inner_size; auto g_factor = -g_ptr[i] / inner_size; out_ptr[in_idx] = g_factor * sigmoid_xent_backward(logits_ptr[in_idx], targets_ptr[in_idx]) * weights_ptr[in_idx]; } } } // namespace template <> bool WeightedSigmoidCrossEntropyWithLogitsOp<float, CUDAContext>:: RunOnDevice() { auto& logits = Input(0); auto& targets = Input(1); auto& weights = Input(2); CAFFE_ENFORCE(logits.dims() == targets.dims()); CAFFE_ENFORCE(weights.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; auto* out = Output(0); if (logits.ndim() == 0) { out->Resize(std::vector<TIndex>{}); } else { std::vector<TIndex> dims(logits.dims().begin(), logits.dims().end() - 1); out->Resize(dims); } auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); auto* weights_ptr = weights.data<float>(); WeightedSigmoidCrossEntropyWithLogitsKernel<<< outer_size, CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( outer_size, inner_size, logits_ptr, targets_ptr, weights_ptr, out_ptr); return true; } template <> bool WeightedSigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>:: RunOnDevice() { auto& g = Input(0); auto& logits = Input(1); auto& targets = Input(2); auto& weights = Input(3); CAFFE_ENFORCE(logits.dims() == targets.dims()); CAFFE_ENFORCE(weights.dims() == targets.dims()); const auto inner_size = logits.ndim() > 0 ? logits.dims().back() : 1; const auto outer_size = logits.size() / inner_size; CAFFE_ENFORCE(g.size() == outer_size); auto* out = Output(0); out->ResizeLike(logits); auto* out_ptr = out->mutable_data<float>(); auto* logits_ptr = logits.data<float>(); auto* targets_ptr = targets.data<float>(); auto* weights_ptr = weights.data<float>(); auto* g_ptr = g.data<float>(); WeightedSigmoidCrossEntropyGradientWithLogitsKernel<<< CAFFE_GET_BLOCKS(outer_size * inner_size), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( outer_size, inner_size, g_ptr, logits_ptr, targets_ptr, weights_ptr, out_ptr); return true; } REGISTER_CUDA_OPERATOR(LabelCrossEntropy, LabelCrossEntropyOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(LabelCrossEntropyGradient, LabelCrossEntropyGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyWithLogits, SigmoidCrossEntropyWithLogitsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( SigmoidCrossEntropyWithLogitsGradient, SigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( WeightedSigmoidCrossEntropyWithLogits, WeightedSigmoidCrossEntropyWithLogitsOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( WeightedSigmoidCrossEntropyWithLogitsGradient, WeightedSigmoidCrossEntropyWithLogitsGradientOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClass, MakeTwoClassOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(MakeTwoClassGradient, MakeTwoClassGradientOp<float, CUDAContext>); //TODO(surya) Add full GPU/CUDA support for the CrossEntropyOp REGISTER_CUDA_OPERATOR(CrossEntropy, GPUFallbackOp<CrossEntropyOp<float, CPUContext>>); REGISTER_CUDA_OPERATOR(CrossEntropyGradient, GPUFallbackOp<CrossEntropyGradientOp<float, CPUContext>>); } // namespace caffe2
8dd16ad492a16886d5aa4530924dda909dcd6f8c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //---------------------------------*-CUDA-*----------------------------------// // Copyright 2020-2023 UT-Battelle, LLC, and other Celeritas developers. // See the top-level COPYRIGHT file for details. // SPDX-License-Identifier: (Apache-2.0 OR MIT) //---------------------------------------------------------------------------// //! \file celeritas/phys/Particle.test.cu //---------------------------------------------------------------------------// #include "Particle.test.hh" #include <thrust/device_vector.h> #include "corecel/device_runtime_api.h" #include "corecel/sys/Device.hh" #include "corecel/sys/KernelParamCalculator.device.hh" #include "celeritas/phys/ParticleTrackView.hh" using thrust::raw_pointer_cast; namespace celeritas { namespace test { namespace { //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// __global__ void ptv_test_kernel(unsigned int size, DeviceCRef<ParticleParamsData> params, DeviceRef<ParticleStateData> states, ParticleTrackInitializer const* init, double* result) { auto local_tid = TrackSlotId{KernelParamCalculator::thread_id().unchecked_get()}; if (!(local_tid < size)) return; // Initialize particle ParticleTrackView p(params, states, local_tid); p = init[local_tid.get()]; // Skip result to the start for this thread result += local_tid.get() * PTVTestOutput::props_per_thread(); // Calculate/write values from the track view CELER_ASSERT(p.particle_id() == init[local_tid.get()].particle_id); *result++ = p.energy().value(); *result++ = p.mass().value(); *result++ = p.charge().value(); *result++ = p.decay_constant(); *result++ = p.speed().value(); *result++ = (p.mass() > zero_quantity() ? p.lorentz_factor() : -1); *result++ = p.momentum().value(); *result++ = p.momentum_sq().value(); } //---------------------------------------------------------------------------// } // namespace //---------------------------------------------------------------------------// //! Run on device and return results PTVTestOutput ptv_test(PTVTestInput input) { thrust::device_vector<ParticleTrackInitializer> init = input.init; thrust::device_vector<double> result(init.size() * PTVTestOutput::props_per_thread()); CELER_LAUNCH_KERNEL(ptv_test, device().default_block_size(), init.size(), 0, init.size(), input.params, input.states, raw_pointer_cast(init.data()), raw_pointer_cast(result.data())); CELER_DEVICE_CALL_PREFIX(DeviceSynchronize()); PTVTestOutput output; output.props.resize(result.size()); thrust::copy(result.begin(), result.end(), output.props.begin()); return output; } //---------------------------------------------------------------------------// } // namespace test } // namespace celeritas
8dd16ad492a16886d5aa4530924dda909dcd6f8c.cu
//---------------------------------*-CUDA-*----------------------------------// // Copyright 2020-2023 UT-Battelle, LLC, and other Celeritas developers. // See the top-level COPYRIGHT file for details. // SPDX-License-Identifier: (Apache-2.0 OR MIT) //---------------------------------------------------------------------------// //! \file celeritas/phys/Particle.test.cu //---------------------------------------------------------------------------// #include "Particle.test.hh" #include <thrust/device_vector.h> #include "corecel/device_runtime_api.h" #include "corecel/sys/Device.hh" #include "corecel/sys/KernelParamCalculator.device.hh" #include "celeritas/phys/ParticleTrackView.hh" using thrust::raw_pointer_cast; namespace celeritas { namespace test { namespace { //---------------------------------------------------------------------------// // KERNELS //---------------------------------------------------------------------------// __global__ void ptv_test_kernel(unsigned int size, DeviceCRef<ParticleParamsData> params, DeviceRef<ParticleStateData> states, ParticleTrackInitializer const* init, double* result) { auto local_tid = TrackSlotId{KernelParamCalculator::thread_id().unchecked_get()}; if (!(local_tid < size)) return; // Initialize particle ParticleTrackView p(params, states, local_tid); p = init[local_tid.get()]; // Skip result to the start for this thread result += local_tid.get() * PTVTestOutput::props_per_thread(); // Calculate/write values from the track view CELER_ASSERT(p.particle_id() == init[local_tid.get()].particle_id); *result++ = p.energy().value(); *result++ = p.mass().value(); *result++ = p.charge().value(); *result++ = p.decay_constant(); *result++ = p.speed().value(); *result++ = (p.mass() > zero_quantity() ? p.lorentz_factor() : -1); *result++ = p.momentum().value(); *result++ = p.momentum_sq().value(); } //---------------------------------------------------------------------------// } // namespace //---------------------------------------------------------------------------// //! Run on device and return results PTVTestOutput ptv_test(PTVTestInput input) { thrust::device_vector<ParticleTrackInitializer> init = input.init; thrust::device_vector<double> result(init.size() * PTVTestOutput::props_per_thread()); CELER_LAUNCH_KERNEL(ptv_test, device().default_block_size(), init.size(), 0, init.size(), input.params, input.states, raw_pointer_cast(init.data()), raw_pointer_cast(result.data())); CELER_DEVICE_CALL_PREFIX(DeviceSynchronize()); PTVTestOutput output; output.props.resize(result.size()); thrust::copy(result.begin(), result.end(), output.props.begin()); return output; } //---------------------------------------------------------------------------// } // namespace test } // namespace celeritas
7545bfd13434d506b1e8f066f713942b70a2d52a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "util/cuPrintf.cu" #include <stdio.h> __global__ void device_greetings(void){ cuPrintf("Hello world from the device!\n"); } int main(void){ printf("Hello from the host!\n"); cudaPrintfInit(); hipLaunchKernelGGL(( device_greetings), dim3(2),dim3(3), 0, 0, ); cudaPrintfDisplay(); cudaPrintfEnd(); return 0; }
7545bfd13434d506b1e8f066f713942b70a2d52a.cu
#include "util/cuPrintf.cu" #include <stdio.h> __global__ void device_greetings(void){ cuPrintf("Hello world from the device!\n"); } int main(void){ printf("Hello from the host!\n"); cudaPrintfInit(); device_greetings<<<2,3>>>(); cudaPrintfDisplay(); cudaPrintfEnd(); return 0; }
4a8431d995297f38d260239cdf015231540a758b.hip
// !!! This is a file automatically generated by hipify!!! #include "StiffnessMatrixFirstOrder.h" void StiffnessMatrixFirstOrder::constantCreator(unsigned int numberElement, float* c, float* x, float* y, unsigned int* mesh) { unsigned int i = numberElement*6; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+1]] + x[mesh[numberElement*4+2]] - x[mesh[numberElement*4+3]])/4; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+1]] - x[mesh[numberElement*4+2]] + x[mesh[numberElement*4+3]])/4; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+3]] - x[mesh[numberElement*4+2]] + x[mesh[numberElement*4+1]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+1]] + y[mesh[numberElement*4+2]] - y[mesh[numberElement*4+3]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+1]] - y[mesh[numberElement*4+2]] + y[mesh[numberElement*4+3]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+3]] - y[mesh[numberElement*4+2]] + y[mesh[numberElement*4+1]])/4; // defined the constants c1x to c3y }; void StiffnessMatrixFirstOrder::stiffnessMatrixCalculation(unsigned int numberElement, unsigned int nip ,float* in, unsigned int* ip, float* iw, float* c, float* D, unsigned int* mesh, float* k, unsigned int* i_index, unsigned int* j_index) // numberElement -> the element number needed to be calculated // nip is the number of integration point squared. // in is the integrationNode // ip -> integrationPos // iw -> integrationWeight // c -> constants // D -> material matrix // k -> stiffness matrix { unsigned int counter = 36*(numberElement); for (unsigned int noIP = 0; noIP < nip; noIP++) { // noIP -> the integration point number needed to be calculated double XI = in[ip[2*noIP]]; double YI = in[ip[2*noIP+1]]; // Jacobian double J11 = c[numberElement*6+0]*YI-c[numberElement*6+1]; double J12 = c[numberElement*6+3]*YI-c[numberElement*6+4]; double J21 = c[numberElement*6+0]*XI-c[numberElement*6+2]; double J22 = c[numberElement*6+3]*XI-c[numberElement*6+5]; double detJ = J11*J22-J12*J21; double WeightPerDetJ = (iw[ip[2*noIP]]*iw[ip[2*noIP+1]])/detJ; // derveativs of the shape function N1x N2x ... N1y N2y ... double Ni[8] = {J22*( YI-1)/4 - J12*( XI-1)/4, J22*(-YI+1)/4 - J12*(-XI-1)/4, \ J22*( YI+1)/4 - J12*( XI+1)/4, J22*(-YI-1)/4 - J12*(-XI+1)/4, \ J11*( XI-1)/4 - J21*( YI-1)/4, J11*(-XI-1)/4 - J21*(-YI+1)/4, \ J11*( XI+1)/4 - J21*( YI+1)/4, J11*(-XI+1)/4 - J21*(-YI-1)/4}; // multiplication of shape functions N1x^2 N1x*N2x .... double N[36]; unsigned int counterN = 0; for (unsigned int i = 0; i < 8; i++) { for (unsigned int j = i; j < 8 ; j++) N[counterN++] = Ni[i]*Ni[j]; }; // find the position to start filling the stiffness matrix // writes all 36 components of the 8 by 8 stiffness Matrix considering symmetry k[counter+0] = k[counter+0] + WeightPerDetJ*(D[0]*N[0] + 2*D[4]*N[4] + D[2]*N[26]); k[counter+1] = k[counter+1] + WeightPerDetJ*(D[4]*N[0] + D[5]*N[26] + D[3]*N[4] + D[2]*N[4]); k[counter+2] = k[counter+2] + WeightPerDetJ*(D[2]*N[0] + 2*D[5]*N[4] + D[1]*N[26]); k[counter+3] = k[counter+3] + WeightPerDetJ*(D[0]*N[1] + D[4]*N[5] + D[4]*N[11] + D[2]*N[27]); k[counter+4] = k[counter+4] + WeightPerDetJ*(D[4]*N[1] + D[3]*N[11] + D[2]*N[5] + D[5]*N[27]); k[counter+5] = k[counter+5] + WeightPerDetJ*(D[0]*N[8] + 2*D[4]*N[12] + D[2]*N[30]); k[counter+6] = k[counter+6] + WeightPerDetJ*(D[4]*N[1] + D[3]*N[5] + D[2]*N[11] + D[5]*N[27]); k[counter+7] = k[counter+7] + WeightPerDetJ*(D[2]*N[1] + D[5]*N[5] + D[5]*N[11] + D[1]*N[27]); k[counter+8] = k[counter+8] + WeightPerDetJ*(D[4]*N[8] + D[5]*N[30] + D[3]*N[12] + D[2]*N[12]); k[counter+9] = k[counter+9] + WeightPerDetJ*(D[2]*N[8] + 2*D[5]*N[12] + D[1]*N[30]); k[counter+10] = k[counter+10] + WeightPerDetJ*(D[0]*N[2] + D[4]*N[6] + D[4]*N[17] + D[2]*N[28]); k[counter+11] = k[counter+11] + WeightPerDetJ*(D[4]*N[2] + D[3]*N[17] + D[2]*N[6] + D[5]*N[28]); k[counter+12] = k[counter+12] + WeightPerDetJ*(D[0]*N[9] + D[4]*N[13] + D[4]*N[18] + D[2]*N[31]); k[counter+13] = k[counter+13] + WeightPerDetJ*(D[4]*N[9] + D[3]*N[18] + D[2]*N[13] + D[5]*N[31]); k[counter+14] = k[counter+14] + WeightPerDetJ*(D[0]*N[15] + 2*D[4]*N[19] + D[2]*N[33]); k[counter+15] = k[counter+15] + WeightPerDetJ*(D[4]*N[2] + D[3]*N[6] + D[2]*N[17] + D[5]*N[28]); k[counter+16] = k[counter+16] + WeightPerDetJ*(D[2]*N[2] + D[5]*N[6] + D[5]*N[17] + D[1]*N[28]); k[counter+17] = k[counter+17] + WeightPerDetJ*(D[4]*N[9] + D[3]*N[13] + D[2]*N[18] + D[5]*N[31]); k[counter+18] = k[counter+18] + WeightPerDetJ*(D[2]*N[9] + D[5]*N[13] + D[5]*N[18] + D[1]*N[31]); k[counter+19] = k[counter+19] + WeightPerDetJ*(D[4]*N[15] + D[5]*N[33] + D[3]*N[19] + D[2]*N[19]); k[counter+20] = k[counter+20] + WeightPerDetJ*(D[2]*N[15] + 2*D[5]*N[19] + D[1]*N[33]); k[counter+21] = k[counter+21] + WeightPerDetJ*(D[0]*N[3] + D[4]*N[7] + D[4]*N[22] + D[2]*N[29]); k[counter+22] = k[counter+22] + WeightPerDetJ*(D[4]*N[3] + D[3]*N[22] + D[2]*N[7] + D[5]*N[29]); k[counter+23] = k[counter+23] + WeightPerDetJ*(D[0]*N[10] + D[4]*N[14] + D[4]*N[23] + D[2]*N[32]); k[counter+24] = k[counter+24] + WeightPerDetJ*(D[4]*N[10] + D[3]*N[23] + D[2]*N[14] + D[5]*N[32]); k[counter+25] = k[counter+25] + WeightPerDetJ*(D[0]*N[16] + D[4]*N[20] + D[4]*N[24] + D[2]*N[34]); k[counter+26] = k[counter+26] + WeightPerDetJ*(D[4]*N[16] + D[3]*N[24] + D[2]*N[20] + D[5]*N[34]); k[counter+27] = k[counter+27] + WeightPerDetJ*(D[0]*N[21] + 2*D[4]*N[25] + D[2]*N[35]); k[counter+28] = k[counter+28] + WeightPerDetJ*(D[4]*N[3] + D[3]*N[7] + D[2]*N[22] + D[5]*N[29]); k[counter+29] = k[counter+29] + WeightPerDetJ*(D[2]*N[3] + D[5]*N[7] + D[5]*N[22] + D[1]*N[29]); k[counter+30] = k[counter+30] + WeightPerDetJ*(D[4]*N[10] + D[3]*N[14] + D[2]*N[23] + D[5]*N[32]); k[counter+31] = k[counter+31] + WeightPerDetJ*(D[2]*N[10] + D[5]*N[14] + D[5]*N[23] + D[1]*N[32]); k[counter+32] = k[counter+32] + WeightPerDetJ*(D[4]*N[16] + D[3]*N[20] + D[2]*N[24] + D[5]*N[34]); k[counter+33] = k[counter+33] + WeightPerDetJ*(D[2]*N[16] + D[5]*N[20] + D[5]*N[24] + D[1]*N[34]); k[counter+34] = k[counter+34] + WeightPerDetJ*(D[4]*N[21] + D[5]*N[35] + D[3]*N[25] + D[2]*N[25]); k[counter+35] = k[counter+35] + WeightPerDetJ*(D[2]*N[21] + 2*D[5]*N[25] + D[1]*N[35]); } unsigned int count = counter; unsigned int xi, xj, yi, yj; for (unsigned int i = 0; i<8; i++) for (unsigned int j = 0; j<i+1; j++) { xi = i/2; yi = i%2-1; xj = j/2; yj = j%2-1; i_index[count] = (mesh[numberElement*4+xi]+1)*2+yi; j_index[count++] = (mesh[numberElement*4+xj]+1)*2+yj; } } StiffnessMatrixFirstOrder::StiffnessMatrixFirstOrder(Material& mat, Geometry& geo, unsigned int n) :StiffnessMatrix(mat,geo,n) { Log::Logger().Info("StiffnessMatrixFirstOrder Created by CPU"); sizeStiffMatPerEle = 36; stiffMatSize = numberOfElements*sizeStiffMatPerEle; simulationSize = numberOfElements; stiffMat = new Sparse(stiffMatSize,geometry->get_x_y_size()*2); for (unsigned int i = 0; i<stiffMat->valueSize; i++) stiffMat->value[i] = 0; hipMallocManaged(&c,numberOfElements*6*sizeof(float)); }; StiffnessMatrixFirstOrder::~StiffnessMatrixFirstOrder() { Log::Logger().Info("StiffnessMatrixFirstOrder Deleted by CPU"); hipFree(c); } int StiffnessMatrixFirstOrder::GetStiffnessMatrixSize() { return stiffMatSize; }
4a8431d995297f38d260239cdf015231540a758b.cu
#include "StiffnessMatrixFirstOrder.h" void StiffnessMatrixFirstOrder::constantCreator(unsigned int numberElement, float* c, float* x, float* y, unsigned int* mesh) { unsigned int i = numberElement*6; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+1]] + x[mesh[numberElement*4+2]] - x[mesh[numberElement*4+3]])/4; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+1]] - x[mesh[numberElement*4+2]] + x[mesh[numberElement*4+3]])/4; c[i++] = (x[mesh[numberElement*4+0]] - x[mesh[numberElement*4+3]] - x[mesh[numberElement*4+2]] + x[mesh[numberElement*4+1]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+1]] + y[mesh[numberElement*4+2]] - y[mesh[numberElement*4+3]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+1]] - y[mesh[numberElement*4+2]] + y[mesh[numberElement*4+3]])/4; c[i++] = (y[mesh[numberElement*4+0]] - y[mesh[numberElement*4+3]] - y[mesh[numberElement*4+2]] + y[mesh[numberElement*4+1]])/4; // defined the constants c1x to c3y }; void StiffnessMatrixFirstOrder::stiffnessMatrixCalculation(unsigned int numberElement, unsigned int nip ,float* in, unsigned int* ip, float* iw, float* c, float* D, unsigned int* mesh, float* k, unsigned int* i_index, unsigned int* j_index) // numberElement -> the element number needed to be calculated // nip is the number of integration point squared. // in is the integrationNode // ip -> integrationPos // iw -> integrationWeight // c -> constants // D -> material matrix // k -> stiffness matrix { unsigned int counter = 36*(numberElement); for (unsigned int noIP = 0; noIP < nip; noIP++) { // noIP -> the integration point number needed to be calculated double XI = in[ip[2*noIP]]; double YI = in[ip[2*noIP+1]]; // Jacobian double J11 = c[numberElement*6+0]*YI-c[numberElement*6+1]; double J12 = c[numberElement*6+3]*YI-c[numberElement*6+4]; double J21 = c[numberElement*6+0]*XI-c[numberElement*6+2]; double J22 = c[numberElement*6+3]*XI-c[numberElement*6+5]; double detJ = J11*J22-J12*J21; double WeightPerDetJ = (iw[ip[2*noIP]]*iw[ip[2*noIP+1]])/detJ; // derveativs of the shape function N1x N2x ... N1y N2y ... double Ni[8] = {J22*( YI-1)/4 - J12*( XI-1)/4, J22*(-YI+1)/4 - J12*(-XI-1)/4, \ J22*( YI+1)/4 - J12*( XI+1)/4, J22*(-YI-1)/4 - J12*(-XI+1)/4, \ J11*( XI-1)/4 - J21*( YI-1)/4, J11*(-XI-1)/4 - J21*(-YI+1)/4, \ J11*( XI+1)/4 - J21*( YI+1)/4, J11*(-XI+1)/4 - J21*(-YI-1)/4}; // multiplication of shape functions N1x^2 N1x*N2x .... double N[36]; unsigned int counterN = 0; for (unsigned int i = 0; i < 8; i++) { for (unsigned int j = i; j < 8 ; j++) N[counterN++] = Ni[i]*Ni[j]; }; // find the position to start filling the stiffness matrix // writes all 36 components of the 8 by 8 stiffness Matrix considering symmetry k[counter+0] = k[counter+0] + WeightPerDetJ*(D[0]*N[0] + 2*D[4]*N[4] + D[2]*N[26]); k[counter+1] = k[counter+1] + WeightPerDetJ*(D[4]*N[0] + D[5]*N[26] + D[3]*N[4] + D[2]*N[4]); k[counter+2] = k[counter+2] + WeightPerDetJ*(D[2]*N[0] + 2*D[5]*N[4] + D[1]*N[26]); k[counter+3] = k[counter+3] + WeightPerDetJ*(D[0]*N[1] + D[4]*N[5] + D[4]*N[11] + D[2]*N[27]); k[counter+4] = k[counter+4] + WeightPerDetJ*(D[4]*N[1] + D[3]*N[11] + D[2]*N[5] + D[5]*N[27]); k[counter+5] = k[counter+5] + WeightPerDetJ*(D[0]*N[8] + 2*D[4]*N[12] + D[2]*N[30]); k[counter+6] = k[counter+6] + WeightPerDetJ*(D[4]*N[1] + D[3]*N[5] + D[2]*N[11] + D[5]*N[27]); k[counter+7] = k[counter+7] + WeightPerDetJ*(D[2]*N[1] + D[5]*N[5] + D[5]*N[11] + D[1]*N[27]); k[counter+8] = k[counter+8] + WeightPerDetJ*(D[4]*N[8] + D[5]*N[30] + D[3]*N[12] + D[2]*N[12]); k[counter+9] = k[counter+9] + WeightPerDetJ*(D[2]*N[8] + 2*D[5]*N[12] + D[1]*N[30]); k[counter+10] = k[counter+10] + WeightPerDetJ*(D[0]*N[2] + D[4]*N[6] + D[4]*N[17] + D[2]*N[28]); k[counter+11] = k[counter+11] + WeightPerDetJ*(D[4]*N[2] + D[3]*N[17] + D[2]*N[6] + D[5]*N[28]); k[counter+12] = k[counter+12] + WeightPerDetJ*(D[0]*N[9] + D[4]*N[13] + D[4]*N[18] + D[2]*N[31]); k[counter+13] = k[counter+13] + WeightPerDetJ*(D[4]*N[9] + D[3]*N[18] + D[2]*N[13] + D[5]*N[31]); k[counter+14] = k[counter+14] + WeightPerDetJ*(D[0]*N[15] + 2*D[4]*N[19] + D[2]*N[33]); k[counter+15] = k[counter+15] + WeightPerDetJ*(D[4]*N[2] + D[3]*N[6] + D[2]*N[17] + D[5]*N[28]); k[counter+16] = k[counter+16] + WeightPerDetJ*(D[2]*N[2] + D[5]*N[6] + D[5]*N[17] + D[1]*N[28]); k[counter+17] = k[counter+17] + WeightPerDetJ*(D[4]*N[9] + D[3]*N[13] + D[2]*N[18] + D[5]*N[31]); k[counter+18] = k[counter+18] + WeightPerDetJ*(D[2]*N[9] + D[5]*N[13] + D[5]*N[18] + D[1]*N[31]); k[counter+19] = k[counter+19] + WeightPerDetJ*(D[4]*N[15] + D[5]*N[33] + D[3]*N[19] + D[2]*N[19]); k[counter+20] = k[counter+20] + WeightPerDetJ*(D[2]*N[15] + 2*D[5]*N[19] + D[1]*N[33]); k[counter+21] = k[counter+21] + WeightPerDetJ*(D[0]*N[3] + D[4]*N[7] + D[4]*N[22] + D[2]*N[29]); k[counter+22] = k[counter+22] + WeightPerDetJ*(D[4]*N[3] + D[3]*N[22] + D[2]*N[7] + D[5]*N[29]); k[counter+23] = k[counter+23] + WeightPerDetJ*(D[0]*N[10] + D[4]*N[14] + D[4]*N[23] + D[2]*N[32]); k[counter+24] = k[counter+24] + WeightPerDetJ*(D[4]*N[10] + D[3]*N[23] + D[2]*N[14] + D[5]*N[32]); k[counter+25] = k[counter+25] + WeightPerDetJ*(D[0]*N[16] + D[4]*N[20] + D[4]*N[24] + D[2]*N[34]); k[counter+26] = k[counter+26] + WeightPerDetJ*(D[4]*N[16] + D[3]*N[24] + D[2]*N[20] + D[5]*N[34]); k[counter+27] = k[counter+27] + WeightPerDetJ*(D[0]*N[21] + 2*D[4]*N[25] + D[2]*N[35]); k[counter+28] = k[counter+28] + WeightPerDetJ*(D[4]*N[3] + D[3]*N[7] + D[2]*N[22] + D[5]*N[29]); k[counter+29] = k[counter+29] + WeightPerDetJ*(D[2]*N[3] + D[5]*N[7] + D[5]*N[22] + D[1]*N[29]); k[counter+30] = k[counter+30] + WeightPerDetJ*(D[4]*N[10] + D[3]*N[14] + D[2]*N[23] + D[5]*N[32]); k[counter+31] = k[counter+31] + WeightPerDetJ*(D[2]*N[10] + D[5]*N[14] + D[5]*N[23] + D[1]*N[32]); k[counter+32] = k[counter+32] + WeightPerDetJ*(D[4]*N[16] + D[3]*N[20] + D[2]*N[24] + D[5]*N[34]); k[counter+33] = k[counter+33] + WeightPerDetJ*(D[2]*N[16] + D[5]*N[20] + D[5]*N[24] + D[1]*N[34]); k[counter+34] = k[counter+34] + WeightPerDetJ*(D[4]*N[21] + D[5]*N[35] + D[3]*N[25] + D[2]*N[25]); k[counter+35] = k[counter+35] + WeightPerDetJ*(D[2]*N[21] + 2*D[5]*N[25] + D[1]*N[35]); } unsigned int count = counter; unsigned int xi, xj, yi, yj; for (unsigned int i = 0; i<8; i++) for (unsigned int j = 0; j<i+1; j++) { xi = i/2; yi = i%2-1; xj = j/2; yj = j%2-1; i_index[count] = (mesh[numberElement*4+xi]+1)*2+yi; j_index[count++] = (mesh[numberElement*4+xj]+1)*2+yj; } } StiffnessMatrixFirstOrder::StiffnessMatrixFirstOrder(Material& mat, Geometry& geo, unsigned int n) :StiffnessMatrix(mat,geo,n) { Log::Logger().Info("StiffnessMatrixFirstOrder Created by CPU"); sizeStiffMatPerEle = 36; stiffMatSize = numberOfElements*sizeStiffMatPerEle; simulationSize = numberOfElements; stiffMat = new Sparse(stiffMatSize,geometry->get_x_y_size()*2); for (unsigned int i = 0; i<stiffMat->valueSize; i++) stiffMat->value[i] = 0; cudaMallocManaged(&c,numberOfElements*6*sizeof(float)); }; StiffnessMatrixFirstOrder::~StiffnessMatrixFirstOrder() { Log::Logger().Info("StiffnessMatrixFirstOrder Deleted by CPU"); cudaFree(c); } int StiffnessMatrixFirstOrder::GetStiffnessMatrixSize() { return stiffMatSize; }
b8ff806186f0239825a16fd6dbb7eb6822991e14.hip
// !!! This is a file automatically generated by hipify!!! /* count the number of tuple matching criteria for join */ #include <stdio.h> #include <stdint.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "tuple.h" /** execution time in case jt[x]=lt[0] is faster than in case jt[x]=lt[x] **/ extern "C" { __global__ void count( int *lt, int *jt, int left ) { uint x = blockIdx.x*blockDim.x + threadIdx.x; jt[x] = lt[x]; //jt[x] = lt[0]; } }
b8ff806186f0239825a16fd6dbb7eb6822991e14.cu
/* count the number of tuple matching criteria for join */ #include <stdio.h> #include <stdint.h> #include <cuda.h> #include <sys/time.h> #include "tuple.h" /** execution time in case jt[x]=lt[0] is faster than in case jt[x]=lt[x] **/ extern "C" { __global__ void count( int *lt, int *jt, int left ) { uint x = blockIdx.x*blockDim.x + threadIdx.x; jt[x] = lt[x]; //jt[x] = lt[0]; } }
59bd269911fc710395564b7577154a735d45e522.hip
// !!! This is a file automatically generated by hipify!!! #include <wb.h> int main(int argc, char ** argv) { int deviceCount; wbArg_read(argc, argv); hipGetDeviceCount(&deviceCount); wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer for (int dev = 0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { wbLog(TRACE, "No CUDA GPU has been detected"); return -1; } else if (deviceCount == 1) { wbLog(TRACE, "There is 1 device supporting CUDA"); } else { wbLog(TRACE, "There are ", deviceCount, " devices supporting CUDA"); } } wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name); wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".", deviceProp.minor); wbLog(TRACE, " Maximum global memory size: ", deviceProp.totalGlobalMem); wbLog(TRACE, " Maximum constant memory size: ", deviceProp.totalConstMem); wbLog(TRACE, " Maximum shared memory size per block: ", deviceProp.sharedMemPerBlock); wbLog(TRACE, " Maximum block dimensions: ", deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1], " x ", deviceProp.maxThreadsDim[2]); wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0], " x ", deviceProp.maxGridSize[1], " x ", deviceProp.maxGridSize[2]); wbLog(TRACE, " Warp size: ", deviceProp.warpSize); } wbTime_stop(GPU, "Getting GPU Data."); return 0; }
59bd269911fc710395564b7577154a735d45e522.cu
#include <wb.h> int main(int argc, char ** argv) { int deviceCount; wbArg_read(argc, argv); cudaGetDeviceCount(&deviceCount); wbTime_start(GPU, "Getting GPU Data."); //@@ start a timer for (int dev = 0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); if (dev == 0) { if (deviceProp.major == 9999 && deviceProp.minor == 9999) { wbLog(TRACE, "No CUDA GPU has been detected"); return -1; } else if (deviceCount == 1) { wbLog(TRACE, "There is 1 device supporting CUDA"); } else { wbLog(TRACE, "There are ", deviceCount, " devices supporting CUDA"); } } wbLog(TRACE, "Device ", dev, " name: ", deviceProp.name); wbLog(TRACE, " Computational Capabilities: ", deviceProp.major, ".", deviceProp.minor); wbLog(TRACE, " Maximum global memory size: ", deviceProp.totalGlobalMem); wbLog(TRACE, " Maximum constant memory size: ", deviceProp.totalConstMem); wbLog(TRACE, " Maximum shared memory size per block: ", deviceProp.sharedMemPerBlock); wbLog(TRACE, " Maximum block dimensions: ", deviceProp.maxThreadsDim[0], " x ", deviceProp.maxThreadsDim[1], " x ", deviceProp.maxThreadsDim[2]); wbLog(TRACE, " Maximum grid dimensions: ", deviceProp.maxGridSize[0], " x ", deviceProp.maxGridSize[1], " x ", deviceProp.maxGridSize[2]); wbLog(TRACE, " Warp size: ", deviceProp.warpSize); } wbTime_stop(GPU, "Getting GPU Data."); return 0; }
ec0e02ad61933d3e9ca50b5a16e617ff68a274da.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #define N 1000000 __global__ void vector_add(float *out, float *a, float *b, int n) { for (int i = 0; i < n; i++) { out[i] = a[i] + b[i]; } } int main() { float *a, *b, *out; float *d_a, *d_b, *d_out; a = (float *) malloc(sizeof(float) * N); b = (float *) malloc(sizeof(float) * N); out = (float *) malloc(sizeof(float) * N); for (int i = 0; i < N; i++) { a[i] = 1.0f; b[i] = 2.0f; } hipMalloc((void **) &d_a, sizeof(float) * N); hipMalloc((void **) &d_b, sizeof(float) * N); hipMalloc((void **) &d_out, sizeof(float) * N); hipMemcpy(d_a, a, sizeof(float) * N, hipMemcpyHostToDevice); hipMemcpy(d_b, b, sizeof(float) * N, hipMemcpyHostToDevice); hipLaunchKernelGGL(( vector_add), dim3(1), dim3(1), 0, 0, d_out, d_a, d_b, N); hipMemcpy(out, d_out, sizeof(float) * N, hipMemcpyDeviceToHost); // for (int i = 0; i < N; i++) { // printf("%0.1f ", out[i]); // } hipFree(d_a); hipFree(d_b); hipFree(d_out); free(a); free(b); free(out); return 0; }
ec0e02ad61933d3e9ca50b5a16e617ff68a274da.cu
#include <stdio.h> #include <stdlib.h> #define N 1000000 __global__ void vector_add(float *out, float *a, float *b, int n) { for (int i = 0; i < n; i++) { out[i] = a[i] + b[i]; } } int main() { float *a, *b, *out; float *d_a, *d_b, *d_out; a = (float *) malloc(sizeof(float) * N); b = (float *) malloc(sizeof(float) * N); out = (float *) malloc(sizeof(float) * N); for (int i = 0; i < N; i++) { a[i] = 1.0f; b[i] = 2.0f; } cudaMalloc((void **) &d_a, sizeof(float) * N); cudaMalloc((void **) &d_b, sizeof(float) * N); cudaMalloc((void **) &d_out, sizeof(float) * N); cudaMemcpy(d_a, a, sizeof(float) * N, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, sizeof(float) * N, cudaMemcpyHostToDevice); vector_add<<<1, 1>>>(d_out, d_a, d_b, N); cudaMemcpy(out, d_out, sizeof(float) * N, cudaMemcpyDeviceToHost); // for (int i = 0; i < N; i++) { // printf("%0.1f ", out[i]); // } cudaFree(d_a); cudaFree(d_b); cudaFree(d_out); free(a); free(b); free(out); return 0; }
dd99819433cc98858aaee236acc7fa71eb13ce75.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_complex.h> #include <complex.h> #include <stdio.h> #include <omp.h> #define type double #define STR1(X) #X #define STR(X) STR1(X) #define STRINGIFY(X,Y) X ## Y #define CON(X,Y) STRINGIFY(X,Y) #define KDir kernels #include "includes/ourmacros.h" #define FNAME fvimatchg32.h #include "includes/macro.h" #undef FNAME void fvimatchg32CallerWrapper(int ndim, const type * A, type * B,const int size0, const int numblocks, const int numthreads , const int * __restrict__ lda_s, const int* __restrict__ ldb_s, const int* __restrict__ idx_s, type alpha, type beta) { // printf("\n***%d %d %d \n", idx_ss[1],idx_ss[2], numblocks/(idx_ss[1]*idx_ss[2])); //dim3 param3(idx_ss[1],idx_ss[2], numblocks/(idx_ss[1]*idx_ss[2])); dim3 thread_blocks(numblocks/1, 1, 1); switch(ndim) { EXPANDDIMS(fvimg32_kernel_, thread_blocks, numthreads,0, ( A, B, size0, lda_s,ldb_s,idx_s, alpha, beta)) default: { } } } extern "C" void fvimatchg32_transpose_kernel(int ndim, const type *A, type *B, const int *lda, const int *ldb, const int* params, const int * perm, type alpha, type beta) { // int numBlocks = computeNumBlocksCode ; #ifdef printd printf("\nA Dims: %d \t %d \t %d\t %d\t %d\n", lda[0], lda[1], lda[2], lda[3], lda[4]); printf("\nParams: %d \t %d \t %d\t %d\t %d\t %d\t %d\n", params[0], params[1], params[2], params[3], params[4], params[5], params[6]); printf("\nB Dims: %d \t %d \t %d\t %d\t %d\n", ldb[0], ldb[1], ldb[2], ldb[3], ldb[4]); printf("\n Perm: %d \t %d \t %d\t %d\t %d\n", perm[0], perm[1], perm[2], perm[3], perm[4]); #endif int numBlocks = params[6];//((size[1] + 8 -1)/8) * size[2] * ((size[3] + 8 -1)/8) * size[4] ; int *d_lda_s, *d_ldb_s, *d_idx_s; int lda_s[20], ldb_s[20], idx_s[20], temp[20]; lda_s[0] = 1; ldb_s[0] = 1; int i; lda_s[1] = lda_s[0] * lda[0]; ldb_s[1] = ldb_s[0] * ldb[0]; for(i = 1; i < ndim; i++) { idx_s[i] = ldb[i]; lda_s[i] = lda_s[i-1] * lda[i-1]; ldb_s[i] = ldb_s[i-1] * ldb[i-1]; } for(i = 1; i < ndim; i++) { #ifdef printd printf("%d ", idx_s[i]); #endif temp[i] = lda_s[perm[i]]; } #ifdef printd printf("\n"); #endif SAFECUDAMALLOC(&d_lda_s,ndim*sizeof(int)); SAFECUDAMALLOC(&d_ldb_s,ndim*sizeof(int)); SAFECUDAMALLOC(&d_idx_s,ndim*sizeof(int)); SAFECUDAMEMCPY(d_idx_s, idx_s,ndim*sizeof(int), hipMemcpyHostToDevice); SAFECUDAMEMCPY(d_lda_s, temp,ndim*sizeof(int), hipMemcpyHostToDevice); SAFECUDAMEMCPY(d_ldb_s, ldb_s,ndim*sizeof(int), hipMemcpyHostToDevice); #ifdef NOHTIME #include "includes/nohtimestart.h" #endif fvimatchg32CallerWrapper(ndim, A, B, lda[0], numBlocks, params[2] , d_lda_s,d_ldb_s,d_idx_s, alpha, beta); #ifdef NOHTIME #include "includes/nohtimestop.h" #endif hipDeviceSynchronize(); {hipError_t err = hipGetLastError(); if(err != hipSuccess){ printf("\nKernel ERROR in dCuKernel %s (line: %d)\n", hipGetErrorString(err), __LINE__); //exit(-1); }} hipFree(d_lda_s); hipFree(d_ldb_s); hipFree(d_idx_s); }
dd99819433cc98858aaee236acc7fa71eb13ce75.cu
#include <cuda_runtime.h> #include <cuComplex.h> #include <complex.h> #include <stdio.h> #include <omp.h> #define type double #define STR1(X) #X #define STR(X) STR1(X) #define STRINGIFY(X,Y) X ## Y #define CON(X,Y) STRINGIFY(X,Y) #define KDir kernels #include "includes/ourmacros.h" #define FNAME fvimatchg32.h #include "includes/macro.h" #undef FNAME void fvimatchg32CallerWrapper(int ndim, const type * A, type * B,const int size0, const int numblocks, const int numthreads , const int * __restrict__ lda_s, const int* __restrict__ ldb_s, const int* __restrict__ idx_s, type alpha, type beta) { // printf("\n***%d %d %d \n", idx_ss[1],idx_ss[2], numblocks/(idx_ss[1]*idx_ss[2])); //dim3 param3(idx_ss[1],idx_ss[2], numblocks/(idx_ss[1]*idx_ss[2])); dim3 thread_blocks(numblocks/1, 1, 1); switch(ndim) { EXPANDDIMS(fvimg32_kernel_, thread_blocks, numthreads,0, ( A, B, size0, lda_s,ldb_s,idx_s, alpha, beta)) default: { } } } extern "C" void fvimatchg32_transpose_kernel(int ndim, const type *A, type *B, const int *lda, const int *ldb, const int* params, const int * perm, type alpha, type beta) { // int numBlocks = computeNumBlocksCode ; #ifdef printd printf("\nA Dims: %d \t %d \t %d\t %d\t %d\n", lda[0], lda[1], lda[2], lda[3], lda[4]); printf("\nParams: %d \t %d \t %d\t %d\t %d\t %d\t %d\n", params[0], params[1], params[2], params[3], params[4], params[5], params[6]); printf("\nB Dims: %d \t %d \t %d\t %d\t %d\n", ldb[0], ldb[1], ldb[2], ldb[3], ldb[4]); printf("\n Perm: %d \t %d \t %d\t %d\t %d\n", perm[0], perm[1], perm[2], perm[3], perm[4]); #endif int numBlocks = params[6];//((size[1] + 8 -1)/8) * size[2] * ((size[3] + 8 -1)/8) * size[4] ; int *d_lda_s, *d_ldb_s, *d_idx_s; int lda_s[20], ldb_s[20], idx_s[20], temp[20]; lda_s[0] = 1; ldb_s[0] = 1; int i; lda_s[1] = lda_s[0] * lda[0]; ldb_s[1] = ldb_s[0] * ldb[0]; for(i = 1; i < ndim; i++) { idx_s[i] = ldb[i]; lda_s[i] = lda_s[i-1] * lda[i-1]; ldb_s[i] = ldb_s[i-1] * ldb[i-1]; } for(i = 1; i < ndim; i++) { #ifdef printd printf("%d ", idx_s[i]); #endif temp[i] = lda_s[perm[i]]; } #ifdef printd printf("\n"); #endif SAFECUDAMALLOC(&d_lda_s,ndim*sizeof(int)); SAFECUDAMALLOC(&d_ldb_s,ndim*sizeof(int)); SAFECUDAMALLOC(&d_idx_s,ndim*sizeof(int)); SAFECUDAMEMCPY(d_idx_s, idx_s,ndim*sizeof(int), cudaMemcpyHostToDevice); SAFECUDAMEMCPY(d_lda_s, temp,ndim*sizeof(int), cudaMemcpyHostToDevice); SAFECUDAMEMCPY(d_ldb_s, ldb_s,ndim*sizeof(int), cudaMemcpyHostToDevice); #ifdef NOHTIME #include "includes/nohtimestart.h" #endif fvimatchg32CallerWrapper(ndim, A, B, lda[0], numBlocks, params[2] , d_lda_s,d_ldb_s,d_idx_s, alpha, beta); #ifdef NOHTIME #include "includes/nohtimestop.h" #endif cudaDeviceSynchronize(); {cudaError_t err = cudaGetLastError(); if(err != cudaSuccess){ printf("\nKernel ERROR in dCuKernel %s (line: %d)\n", cudaGetErrorString(err), __LINE__); //exit(-1); }} cudaFree(d_lda_s); cudaFree(d_ldb_s); cudaFree(d_idx_s); }
4173346cb893037e1d9781e7fdfaf446db2fcb73.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> // Calculate the multiplication of two 32*32 matrices A and B on gpu and store the result in C. // Each block calculate one element of C. __global__ void Mul(int* d_A, int* d_B, int* d_C) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int num_threads = blockDim.x * gridDim.x; for (int i = tid; i < 32 * 32; i += num_threads) { int row = i / 32; int col = i % 32; d_C[row * 32 + col] = 0; // sum of d_A(row, i) * d_B(i, col) for(int j = 0; j < 32; j++) { d_C[row * 32 + col] += d_A[row * 32 + j] * d_B[j * 32 + col]; } } } int main() { int *A = (int*)malloc(32 * 32 * sizeof(int)); int *B = (int*)malloc(32 * 32 * sizeof(int)); for(int i = 0; i < 32 * 32; i++) { A[i] = 1; B[i] = 1; } //Allocate the memory in GPU to store the content of A,B,C int *d_A, *d_B, *d_C; hipMalloc((void **)&d_A, 32 * 32 * sizeof(int)); hipMalloc((void **)&d_B, 32 * 32 * sizeof(int)); hipMalloc((void **)&d_C, 32 * 32 * sizeof(int)); //Copy A, B to d_A,d_B hipMemcpy(d_A, A, 32 * 32 * sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_B, B, 32 * 32 * sizeof(int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( Mul), dim3(4),dim3(32), 0, 0, d_A, d_B, d_C); int *C = (int*)malloc(32 * 32 * sizeof(int)); hipMemcpy(C, d_C, 32 * 32 * sizeof(int), hipMemcpyDeviceToHost); //print the result for(int i = 0; i < 32; i++) { for(int j = 0; j < 32; j++) { std::cout << C[i * 32 + j] << " "; } std::cout << std::endl; } hipFree(d_A); hipFree(d_B); hipFree(d_C); free(A); free(B); free(C); return 0; }
4173346cb893037e1d9781e7fdfaf446db2fcb73.cu
#include <iostream> // Calculate the multiplication of two 32*32 matrices A and B on gpu and store the result in C. // Each block calculate one element of C. __global__ void Mul(int* d_A, int* d_B, int* d_C) { int tid = blockDim.x * blockIdx.x + threadIdx.x; int num_threads = blockDim.x * gridDim.x; for (int i = tid; i < 32 * 32; i += num_threads) { int row = i / 32; int col = i % 32; d_C[row * 32 + col] = 0; // sum of d_A(row, i) * d_B(i, col) for(int j = 0; j < 32; j++) { d_C[row * 32 + col] += d_A[row * 32 + j] * d_B[j * 32 + col]; } } } int main() { int *A = (int*)malloc(32 * 32 * sizeof(int)); int *B = (int*)malloc(32 * 32 * sizeof(int)); for(int i = 0; i < 32 * 32; i++) { A[i] = 1; B[i] = 1; } //Allocate the memory in GPU to store the content of A,B,C int *d_A, *d_B, *d_C; cudaMalloc((void **)&d_A, 32 * 32 * sizeof(int)); cudaMalloc((void **)&d_B, 32 * 32 * sizeof(int)); cudaMalloc((void **)&d_C, 32 * 32 * sizeof(int)); //Copy A, B to d_A,d_B cudaMemcpy(d_A, A, 32 * 32 * sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_B, B, 32 * 32 * sizeof(int), cudaMemcpyHostToDevice); Mul<<<4,32>>>(d_A, d_B, d_C); int *C = (int*)malloc(32 * 32 * sizeof(int)); cudaMemcpy(C, d_C, 32 * 32 * sizeof(int), cudaMemcpyDeviceToHost); //print the result for(int i = 0; i < 32; i++) { for(int j = 0; j < 32; j++) { std::cout << C[i * 32 + j] << " "; } std::cout << std::endl; } cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); free(A); free(B); free(C); return 0; }
949c3567405ff9fc9d9b20371ff1e43079389c36.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "cpy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; hipMalloc(&a, XSIZE*YSIZE); int *b = NULL; hipMalloc(&b, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( cpy), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( cpy), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( cpy), dim3(gridBlock),dim3(threadBlock), 0, 0, a,b,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
949c3567405ff9fc9d9b20371ff1e43079389c36.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "cpy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; int *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); int *b = NULL; cudaMalloc(&b, XSIZE*YSIZE); int n = XSIZE*YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); cpy<<<gridBlock,threadBlock>>>(a,b,n); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { cpy<<<gridBlock,threadBlock>>>(a,b,n); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { cpy<<<gridBlock,threadBlock>>>(a,b,n); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
579ee7b0de2929d64271d05dde2e7005ae3fbe05.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/types.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf/column/column_view.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/null_mask.hpp> #include <cudf/detail/valid_if.cuh> #include <cudf/detail/transform.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/nvtx/ranges.hpp> namespace cudf { namespace experimental { namespace detail { std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type> bools_to_mask(column_view const& input, rmm::mr::device_memory_resource * mr, hipStream_t stream) { CUDF_EXPECTS(input.type().id() == BOOL8, "Input is not of type bool"); if(input.size() == 0){ return std::make_pair(std::make_unique<rmm::device_buffer>(), 0); } auto input_device_view_ptr = column_device_view::create(input, stream); auto input_device_view = *input_device_view_ptr; auto pred = [] __device__ (experimental::bool8 element) { return element == true_v; }; if(input.nullable()) { // Nulls are considered false auto input_begin = make_null_replacement_iterator<experimental::bool8>(input_device_view, false_v); auto mask = detail::valid_if(input_begin, input_begin + input.size(), pred, stream, mr); return std::make_pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second); } else { auto mask = detail::valid_if(input_device_view.begin<experimental::bool8>(), input_device_view.end<experimental::bool8>(), pred, stream, mr); return std::make_pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second); } } }// namespace detail std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type> bools_to_mask(column_view const& input, rmm::mr::device_memory_resource * mr) { CUDF_FUNC_RANGE(); return detail::bools_to_mask(input, mr); } }// namespace experimental }// namespace cudf
579ee7b0de2929d64271d05dde2e7005ae3fbe05.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/types.hpp> #include <cudf/utilities/traits.hpp> #include <cudf/utilities/type_dispatcher.hpp> #include <cudf/column/column_view.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/null_mask.hpp> #include <cudf/detail/valid_if.cuh> #include <cudf/detail/transform.hpp> #include <cudf/detail/iterator.cuh> #include <cudf/detail/nvtx/ranges.hpp> namespace cudf { namespace experimental { namespace detail { std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type> bools_to_mask(column_view const& input, rmm::mr::device_memory_resource * mr, cudaStream_t stream) { CUDF_EXPECTS(input.type().id() == BOOL8, "Input is not of type bool"); if(input.size() == 0){ return std::make_pair(std::make_unique<rmm::device_buffer>(), 0); } auto input_device_view_ptr = column_device_view::create(input, stream); auto input_device_view = *input_device_view_ptr; auto pred = [] __device__ (experimental::bool8 element) { return element == true_v; }; if(input.nullable()) { // Nulls are considered false auto input_begin = make_null_replacement_iterator<experimental::bool8>(input_device_view, false_v); auto mask = detail::valid_if(input_begin, input_begin + input.size(), pred, stream, mr); return std::make_pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second); } else { auto mask = detail::valid_if(input_device_view.begin<experimental::bool8>(), input_device_view.end<experimental::bool8>(), pred, stream, mr); return std::make_pair(std::make_unique<rmm::device_buffer>(std::move(mask.first)), mask.second); } } }// namespace detail std::pair<std::unique_ptr<rmm::device_buffer>, cudf::size_type> bools_to_mask(column_view const& input, rmm::mr::device_memory_resource * mr) { CUDF_FUNC_RANGE(); return detail::bools_to_mask(input, mr); } }// namespace experimental }// namespace cudf
cdd989dc2606d5fafc4e32bf498831edf765f99a.hip
// !!! This is a file automatically generated by hipify!!! #include <atomic> #include <chrono> #include <CLI/CLI11.hpp> #include <hip/hip_runtime.h> #include <random> #include "common.hpp" using namespace std; using HistType = uint32_t; enum class Mode { CPU, OMP, OMP_NOATOMIC, CUDA, CUDA_NOATOMIC, CUDA_SHARED, }; enum class AtomicTypeCuda { NONE, STANDARD, SHARED, }; void computeHistogramCpu(const vector<uint8_t> &bytes, array<HistType, 256> &histogram) { for (uint8_t i : bytes) { histogram[i]++; } } void computeHistogramOmp(const vector<uint8_t> &bytes, array<HistType, 256> &histogram) { array<atomic<HistType>, 256> atomicHistogram; #pragma omp parallel for for (size_t i = 0; i < 256; ++i) { atomicHistogram[i] = 0u; } size_t len = bytes.size(); #pragma omp parallel for for (size_t i = 0; i < len; ++i) { atomicHistogram[bytes[i]]++; } #pragma omp parallel for for (size_t i = 0; i < 256; ++i) { histogram[i] = atomicHistogram[i]; } } void computeHistogramOmpNoAtomic(const vector<uint8_t> &bytes, array<HistType, 256> &histogram) { size_t len = bytes.size(); #pragma omp parallel for for (size_t i = 0; i < len; ++i) { histogram[bytes[i]]++; } } __global__ void _computeHistogramCudaNoAtomic(const uint8_t *__restrict__ bytes, size_t length, HistType *__restrict__ histogram) { size_t stride = blockDim.x * gridDim.x; for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < length; i += stride) { histogram[bytes[i]]++; } } __global__ void _computeHistogramCudaStandardAtomic(const uint8_t *__restrict__ bytes, size_t length, HistType *__restrict__ histogram) { size_t stride = blockDim.x * gridDim.x; for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < length; i += stride) { atomicAdd(&(histogram[bytes[i]]), 1u); } } __global__ void _computeHistogramCudaSharedAtomic(const uint8_t *__restrict__ bytes, size_t length, HistType *__restrict__ histogram) { __shared__ HistType temp[256]; temp[threadIdx.x] = 0; __syncthreads(); // Zero this block's temporary array size_t stride = blockDim.x * gridDim.x; for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < length; i += stride) { atomicAdd(&(temp[bytes[i]]), 1u); // Make a histogram for a fraction of the bytes } __syncthreads(); // Now add up the histograms atomicAdd(&(histogram[threadIdx.x]), temp[threadIdx.x]); // Lesson: Don't let too many threads touch the same memory addresses at once } float computeHistogramCuda(const vector<uint8_t> &bytes, array<HistType, 256> &histogram, AtomicTypeCuda atomic) { CudaEvent start; CudaEvent finish; hipDeviceProp_t deviceProperties; int device = 0; handle(hipEventRecord(start.event, 0)); handle(hipGetDevice(&device)); handle(hipGetDeviceProperties(&deviceProperties, device)); int blocks = deviceProperties.multiProcessorCount * 2; CudaMemory<uint8_t> cudaBytes(bytes.size()); CudaMemory<HistType> cudaHistogram(256); handle(hipMemcpy(cudaBytes.ptr, bytes.data(), bytes.size(), hipMemcpyHostToDevice)); handle(hipMemset(cudaHistogram.ptr, 0, 256 * sizeof(histogram[0]))); switch (atomic) { case AtomicTypeCuda::NONE: hipLaunchKernelGGL(( _computeHistogramCudaNoAtomic), dim3(blocks), dim3(256), 0, 0, cudaBytes.ptr, bytes.size(), cudaHistogram.ptr); break; case AtomicTypeCuda::STANDARD: hipLaunchKernelGGL(( _computeHistogramCudaStandardAtomic), dim3(blocks), dim3(256), 0, 0, cudaBytes.ptr, bytes.size(), cudaHistogram.ptr); break; case AtomicTypeCuda::SHARED: hipLaunchKernelGGL(( _computeHistogramCudaSharedAtomic), dim3(blocks), dim3(256), 0, 0, cudaBytes.ptr, bytes.size(), cudaHistogram.ptr); break; } handle(hipGetLastError()); handle(hipMemcpy(histogram.data(), cudaHistogram.ptr, 256 * sizeof(histogram[0]), hipMemcpyDeviceToHost)); float duration = 0.0; handle(hipEventRecord(finish.event, 0)); handle(hipEventSynchronize(finish.event)); handle(hipEventElapsedTime(&duration, start.event, finish.event)); return duration; } using random_type = minstd_rand; int main(int argc, char *argv[]) { CLI::App app("Example program that computes a histogram"); random_type::result_type seed = 0; size_t size = 4096; Mode mode = Mode::CPU; string modeString = "CPU"; try { app.add_option("--size", size, "Number of bytes to generate", true); app.add_option("--seed", seed, "Random seed", true); app.add_option("--mode", modeString, "Running mode (CPU, OMP, " "OMP_NOATOMIC, CUDA, CUDA_NOATOMIC, " "CUDA_SHARED)", true) ->check([](const string &m) { return (m == "CPU" || m == "OMP" || m == "OMP_NOATOMIC" || m == "CUDA" || m == "CUDA_NOATOMIC" || m == "CUDA_SHARED"); }); app.parse(argc, argv); if (modeString == "CPU") { mode = Mode::CPU; } else if (modeString == "OMP") { mode = Mode::OMP; } else if (modeString == "OMP_NOATOMIC") { mode = Mode::OMP_NOATOMIC; } else if (modeString == "CUDA") { mode = Mode::CUDA; } else if (modeString == "CUDA_NOATOMIC") { mode = Mode::CUDA_NOATOMIC; } else if (modeString == "CUDA_SHARED") { mode = Mode::CUDA_SHARED; } } catch (CLI::Error &e) { return app.exit(e); } array<HistType, 256> histogram; vector<uint8_t> bytes; uniform_int_distribution<int> distribution(0, 255); random_type randomEngine(seed); bytes.resize(size, 0); for (size_t i = 0; i < size; ++i) { bytes[i] = static_cast<uint8_t>(distribution(randomEngine)); } fill(histogram.begin(), histogram.end(), 0u); // Initialization float duration = 0.0f; auto cpuStart = chrono::high_resolution_clock::now(); decltype(cpuStart) cpuFinish; switch (mode) { case Mode::CPU: computeHistogramCpu(bytes, histogram); cpuFinish = chrono::high_resolution_clock::now(); duration = chrono::duration_cast<chrono::microseconds>(cpuFinish - cpuStart) .count() / 1000.0f; break; case Mode::OMP: computeHistogramOmp(bytes, histogram); cpuFinish = chrono::high_resolution_clock::now(); duration = chrono::duration_cast<chrono::microseconds>(cpuFinish - cpuStart) .count() / 1000.0f; break; case Mode::OMP_NOATOMIC: computeHistogramOmpNoAtomic(bytes, histogram); cpuFinish = chrono::high_resolution_clock::now(); duration = chrono::duration_cast<chrono::microseconds>(cpuFinish - cpuStart) .count() / 1000.0f; break; case Mode::CUDA: duration = computeHistogramCuda(bytes, histogram, AtomicTypeCuda::STANDARD); break; case Mode::CUDA_NOATOMIC: duration = computeHistogramCuda(bytes, histogram, AtomicTypeCuda::NONE); break; case Mode::CUDA_SHARED: duration = computeHistogramCuda(bytes, histogram, AtomicTypeCuda::SHARED); break; } size_t sum = accumulate(histogram.begin(), histogram.end(), 0u); for (int i = 0; i < 256; ++i) { cout << i << '\t' << histogram[i] << endl; } cout << duration << "ms" << endl; cout << "Total elements: " << sum << endl; if (sum != size) { cout << "WARNING: RACE CONDITIONS ENCOUNTERED!" << endl; } return 0; }
cdd989dc2606d5fafc4e32bf498831edf765f99a.cu
#include <atomic> #include <chrono> #include <CLI/CLI11.hpp> #include <cuda_runtime.h> #include <random> #include "common.hpp" using namespace std; using HistType = uint32_t; enum class Mode { CPU, OMP, OMP_NOATOMIC, CUDA, CUDA_NOATOMIC, CUDA_SHARED, }; enum class AtomicTypeCuda { NONE, STANDARD, SHARED, }; void computeHistogramCpu(const vector<uint8_t> &bytes, array<HistType, 256> &histogram) { for (uint8_t i : bytes) { histogram[i]++; } } void computeHistogramOmp(const vector<uint8_t> &bytes, array<HistType, 256> &histogram) { array<atomic<HistType>, 256> atomicHistogram; #pragma omp parallel for for (size_t i = 0; i < 256; ++i) { atomicHistogram[i] = 0u; } size_t len = bytes.size(); #pragma omp parallel for for (size_t i = 0; i < len; ++i) { atomicHistogram[bytes[i]]++; } #pragma omp parallel for for (size_t i = 0; i < 256; ++i) { histogram[i] = atomicHistogram[i]; } } void computeHistogramOmpNoAtomic(const vector<uint8_t> &bytes, array<HistType, 256> &histogram) { size_t len = bytes.size(); #pragma omp parallel for for (size_t i = 0; i < len; ++i) { histogram[bytes[i]]++; } } __global__ void _computeHistogramCudaNoAtomic(const uint8_t *__restrict__ bytes, size_t length, HistType *__restrict__ histogram) { size_t stride = blockDim.x * gridDim.x; for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < length; i += stride) { histogram[bytes[i]]++; } } __global__ void _computeHistogramCudaStandardAtomic(const uint8_t *__restrict__ bytes, size_t length, HistType *__restrict__ histogram) { size_t stride = blockDim.x * gridDim.x; for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < length; i += stride) { atomicAdd(&(histogram[bytes[i]]), 1u); } } __global__ void _computeHistogramCudaSharedAtomic(const uint8_t *__restrict__ bytes, size_t length, HistType *__restrict__ histogram) { __shared__ HistType temp[256]; temp[threadIdx.x] = 0; __syncthreads(); // Zero this block's temporary array size_t stride = blockDim.x * gridDim.x; for (size_t i = threadIdx.x + blockIdx.x * blockDim.x; i < length; i += stride) { atomicAdd(&(temp[bytes[i]]), 1u); // Make a histogram for a fraction of the bytes } __syncthreads(); // Now add up the histograms atomicAdd(&(histogram[threadIdx.x]), temp[threadIdx.x]); // Lesson: Don't let too many threads touch the same memory addresses at once } float computeHistogramCuda(const vector<uint8_t> &bytes, array<HistType, 256> &histogram, AtomicTypeCuda atomic) { CudaEvent start; CudaEvent finish; cudaDeviceProp deviceProperties; int device = 0; handle(cudaEventRecord(start.event, 0)); handle(cudaGetDevice(&device)); handle(cudaGetDeviceProperties(&deviceProperties, device)); int blocks = deviceProperties.multiProcessorCount * 2; CudaMemory<uint8_t> cudaBytes(bytes.size()); CudaMemory<HistType> cudaHistogram(256); handle(cudaMemcpy(cudaBytes.ptr, bytes.data(), bytes.size(), cudaMemcpyHostToDevice)); handle(cudaMemset(cudaHistogram.ptr, 0, 256 * sizeof(histogram[0]))); switch (atomic) { case AtomicTypeCuda::NONE: _computeHistogramCudaNoAtomic<<<blocks, 256>>>(cudaBytes.ptr, bytes.size(), cudaHistogram.ptr); break; case AtomicTypeCuda::STANDARD: _computeHistogramCudaStandardAtomic<<<blocks, 256>>>( cudaBytes.ptr, bytes.size(), cudaHistogram.ptr); break; case AtomicTypeCuda::SHARED: _computeHistogramCudaSharedAtomic<<<blocks, 256>>>( cudaBytes.ptr, bytes.size(), cudaHistogram.ptr); break; } handle(cudaGetLastError()); handle(cudaMemcpy(histogram.data(), cudaHistogram.ptr, 256 * sizeof(histogram[0]), cudaMemcpyDeviceToHost)); float duration = 0.0; handle(cudaEventRecord(finish.event, 0)); handle(cudaEventSynchronize(finish.event)); handle(cudaEventElapsedTime(&duration, start.event, finish.event)); return duration; } using random_type = minstd_rand; int main(int argc, char *argv[]) { CLI::App app("Example program that computes a histogram"); random_type::result_type seed = 0; size_t size = 4096; Mode mode = Mode::CPU; string modeString = "CPU"; try { app.add_option("--size", size, "Number of bytes to generate", true); app.add_option("--seed", seed, "Random seed", true); app.add_option("--mode", modeString, "Running mode (CPU, OMP, " "OMP_NOATOMIC, CUDA, CUDA_NOATOMIC, " "CUDA_SHARED)", true) ->check([](const string &m) { return (m == "CPU" || m == "OMP" || m == "OMP_NOATOMIC" || m == "CUDA" || m == "CUDA_NOATOMIC" || m == "CUDA_SHARED"); }); app.parse(argc, argv); if (modeString == "CPU") { mode = Mode::CPU; } else if (modeString == "OMP") { mode = Mode::OMP; } else if (modeString == "OMP_NOATOMIC") { mode = Mode::OMP_NOATOMIC; } else if (modeString == "CUDA") { mode = Mode::CUDA; } else if (modeString == "CUDA_NOATOMIC") { mode = Mode::CUDA_NOATOMIC; } else if (modeString == "CUDA_SHARED") { mode = Mode::CUDA_SHARED; } } catch (CLI::Error &e) { return app.exit(e); } array<HistType, 256> histogram; vector<uint8_t> bytes; uniform_int_distribution<int> distribution(0, 255); random_type randomEngine(seed); bytes.resize(size, 0); for (size_t i = 0; i < size; ++i) { bytes[i] = static_cast<uint8_t>(distribution(randomEngine)); } fill(histogram.begin(), histogram.end(), 0u); // Initialization float duration = 0.0f; auto cpuStart = chrono::high_resolution_clock::now(); decltype(cpuStart) cpuFinish; switch (mode) { case Mode::CPU: computeHistogramCpu(bytes, histogram); cpuFinish = chrono::high_resolution_clock::now(); duration = chrono::duration_cast<chrono::microseconds>(cpuFinish - cpuStart) .count() / 1000.0f; break; case Mode::OMP: computeHistogramOmp(bytes, histogram); cpuFinish = chrono::high_resolution_clock::now(); duration = chrono::duration_cast<chrono::microseconds>(cpuFinish - cpuStart) .count() / 1000.0f; break; case Mode::OMP_NOATOMIC: computeHistogramOmpNoAtomic(bytes, histogram); cpuFinish = chrono::high_resolution_clock::now(); duration = chrono::duration_cast<chrono::microseconds>(cpuFinish - cpuStart) .count() / 1000.0f; break; case Mode::CUDA: duration = computeHistogramCuda(bytes, histogram, AtomicTypeCuda::STANDARD); break; case Mode::CUDA_NOATOMIC: duration = computeHistogramCuda(bytes, histogram, AtomicTypeCuda::NONE); break; case Mode::CUDA_SHARED: duration = computeHistogramCuda(bytes, histogram, AtomicTypeCuda::SHARED); break; } size_t sum = accumulate(histogram.begin(), histogram.end(), 0u); for (int i = 0; i < 256; ++i) { cout << i << '\t' << histogram[i] << endl; } cout << duration << "ms" << endl; cout << "Total elements: " << sum << endl; if (sum != size) { cout << "WARNING: RACE CONDITIONS ENCOUNTERED!" << endl; } return 0; }
036d6ac556b8d715c3400f6f57b444f93fdab8a5.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_functions.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); unsigned int size_C = dimsC.x * dimsC.y; constantInit(h_C, size_C, 0); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } hipError_t error; error = hipMalloc((void **) &d_A, mem_size_A); if (error != hipSuccess) { printf("hipMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_B, mem_size_B); if (error != hipSuccess) { printf("hipMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMalloc((void **) &d_C, mem_size_C); if (error != hipSuccess) { printf("hipMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice); if (error != hipSuccess) { printf("hipMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel //if (block_size == 16) //{ // hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); //} //else //{ // hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); //} printf("done\n"); hipDeviceSynchronize(); // Allocate CUDA events that we'll use for timing hipEvent_t start; error = hipEventCreate(&start); if (error != hipSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } hipEvent_t stop; error = hipEventCreate(&stop); if (error != hipSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = hipEventRecord(start, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 1;//300; for (int j = 0; j < nIter; j++) { if (block_size == 16) { hipLaunchKernelGGL(( matrixMulCUDA<16>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); } else { hipLaunchKernelGGL(( matrixMulCUDA<32>), dim3(grid), dim3(threads) , 0, 0, d_C, d_A, d_B, dimsA.x, dimsB.x); } } // Record the stop event error = hipEventRecord(stop, NULL); if (error != hipSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = hipEventSynchronize(stop); if (error != hipSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = hipEventElapsedTime(&msecTotal, start, stop); if (error != hipSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", hipGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy result from device to host error = hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost); if (error != hipSuccess) { printf("hipMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6 ; // machine zero for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { double abs_err = fabs(h_C[i] - (dimsA.x * valB)); double dot_length = dimsA.x; double abs_val = fabs(h_C[i]); double rel_err = abs_err/abs_val/dot_length ; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); hipFree(d_A); hipFree(d_B); hipFree(d_C); printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n"); // hipDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling hipDeviceReset causes all profile data to be // flushed before the application exits hipDeviceReset(); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n"); printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n"); printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n"); exit(EXIT_SUCCESS); } // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **)argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **)argv, "device"); hipSetDevice(devID); } hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&devID); if (error != hipSuccess) { printf("hipGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Use a larger block size for Fermi and above int block_size = (deviceProp.major < 2) ? 16 : 32; dim3 dimsA(5*2*block_size, 5*2*block_size, 1); dim3 dimsB(5*4*block_size, 5*2*block_size, 1); // width of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "wA")) { dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA"); } // height of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "hA")) { dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA"); } // width of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "wB")) { dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB"); } // height of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "hB")) { dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB"); } if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
036d6ac556b8d715c3400f6f57b444f93fdab8a5.cu
/** * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /** * Matrix multiplication: C = A * B. * Host code. * * This sample implements matrix multiplication as described in Chapter 3 * of the programming guide. * It has been written for clarity of exposition to illustrate various CUDA * programming principles, not with the goal of providing the most * performant generic kernel for matrix multiplication. * * See also: * V. Volkov and J. Demmel, "Benchmarking GPUs to tune dense linear algebra," * in Proc. 2008 ACM/IEEE Conf. on Superconducting (SC '08), * Piscataway, NJ: IEEE Press, 2008, pp. Art. 31:1-11. */ // System includes #include <stdio.h> #include <assert.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_functions.h> /** * Matrix multiplication (CUDA Kernel) on the device: C = A * B * wA is A's width and wB is B's width */ template <int BLOCK_SIZE> __global__ void matrixMulCUDA(float *C, float *A, float *B, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix As[ty][tx] = A[a + wA * ty + tx]; Bs[ty][tx] = B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < BLOCK_SIZE; ++k) { Csub += As[ty][k] * Bs[k][tx]; } // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; C[c + wB * ty + tx] = Csub; } void constantInit(float *data, int size, float val) { for (int i = 0; i < size; ++i) { data[i] = val; } } /** * Run a simple test of matrix multiplication using CUDA */ int matrixMultiply(int argc, char **argv, int block_size, dim3 &dimsA, dim3 &dimsB) { // Allocate host memory for matrices A and B unsigned int size_A = dimsA.x * dimsA.y; unsigned int mem_size_A = sizeof(float) * size_A; float *h_A = (float *)malloc(mem_size_A); unsigned int size_B = dimsB.x * dimsB.y; unsigned int mem_size_B = sizeof(float) * size_B; float *h_B = (float *)malloc(mem_size_B); // Initialize host memory const float valB = 0.01f; constantInit(h_A, size_A, 1.0f); constantInit(h_B, size_B, valB); // Allocate device memory float *d_A, *d_B, *d_C; // Allocate host matrix C dim3 dimsC(dimsB.x, dimsA.y, 1); unsigned int mem_size_C = dimsC.x * dimsC.y * sizeof(float); float *h_C = (float *) malloc(mem_size_C); unsigned int size_C = dimsC.x * dimsC.y; constantInit(h_C, size_C, 0); if (h_C == NULL) { fprintf(stderr, "Failed to allocate host matrix C!\n"); exit(EXIT_FAILURE); } cudaError_t error; error = cudaMalloc((void **) &d_A, mem_size_A); if (error != cudaSuccess) { printf("cudaMalloc d_A returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_B, mem_size_B); if (error != cudaSuccess) { printf("cudaMalloc d_B returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMalloc((void **) &d_C, mem_size_C); if (error != cudaSuccess) { printf("cudaMalloc d_C returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // copy host memory to device error = cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_A,h_A) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } error = cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice); if (error != cudaSuccess) { printf("cudaMemcpy (d_B,h_B) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } // Setup execution parameters dim3 threads(block_size, block_size); dim3 grid(dimsB.x / threads.x, dimsA.y / threads.y); // Create and start timer printf("Computing result using CUDA Kernel...\n"); // Performs warmup operation using matrixMul CUDA kernel //if (block_size == 16) //{ // matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); //} //else //{ // matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); //} printf("done\n"); cudaDeviceSynchronize(); // Allocate CUDA events that we'll use for timing cudaEvent_t start; error = cudaEventCreate(&start); if (error != cudaSuccess) { fprintf(stderr, "Failed to create start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } cudaEvent_t stop; error = cudaEventCreate(&stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to create stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Record the start event error = cudaEventRecord(start, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record start event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Execute the kernel int nIter = 1;//300; for (int j = 0; j < nIter; j++) { if (block_size == 16) { matrixMulCUDA<16><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } else { matrixMulCUDA<32><<< grid, threads >>>(d_C, d_A, d_B, dimsA.x, dimsB.x); } } // Record the stop event error = cudaEventRecord(stop, NULL); if (error != cudaSuccess) { fprintf(stderr, "Failed to record stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Wait for the stop event to complete error = cudaEventSynchronize(stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to synchronize on the stop event (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } float msecTotal = 0.0f; error = cudaEventElapsedTime(&msecTotal, start, stop); if (error != cudaSuccess) { fprintf(stderr, "Failed to get time elapsed between events (error code %s)!\n", cudaGetErrorString(error)); exit(EXIT_FAILURE); } // Compute and print the performance float msecPerMatrixMul = msecTotal / nIter; double flopsPerMatrixMul = 2.0 * (double)dimsA.x * (double)dimsA.y * (double)dimsB.x; double gigaFlops = (flopsPerMatrixMul * 1.0e-9f) / (msecPerMatrixMul / 1000.0f); printf( "Performance= %.2f GFlop/s, Time= %.3f msec, Size= %.0f Ops, WorkgroupSize= %u threads/block\n", gigaFlops, msecPerMatrixMul, flopsPerMatrixMul, threads.x * threads.y); // Copy result from device to host error = cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost); if (error != cudaSuccess) { printf("cudaMemcpy (h_C,d_C) returned error code %d, line(%d)\n", error, __LINE__); exit(EXIT_FAILURE); } printf("Checking computed result for correctness: "); bool correct = true; // test relative error by the formula // |<x, y>_cpu - <x,y>_gpu|/<|x|, |y|> < eps double eps = 1.e-6 ; // machine zero for (int i = 0; i < (int)(dimsC.x * dimsC.y); i++) { double abs_err = fabs(h_C[i] - (dimsA.x * valB)); double dot_length = dimsA.x; double abs_val = fabs(h_C[i]); double rel_err = abs_err/abs_val/dot_length ; if (rel_err > eps) { printf("Error! Matrix[%05d]=%.8f, ref=%.8f error term is > %E\n", i, h_C[i], dimsA.x*valB, eps); correct = false; } } printf("%s\n", correct ? "Result = PASS" : "Result = FAIL"); // Clean up memory free(h_A); free(h_B); free(h_C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); printf("\nNote: For peak performance, please refer to the matrixMulCUBLAS example.\n"); // cudaDeviceReset causes the driver to clean up all state. While // not mandatory in normal operation, it is good practice. It is also // needed to ensure correct operation when the application is being // profiled. Calling cudaDeviceReset causes all profile data to be // flushed before the application exits cudaDeviceReset(); if (correct) { return EXIT_SUCCESS; } else { return EXIT_FAILURE; } } /** * Program main */ int main(int argc, char **argv) { printf("[Matrix Multiply Using CUDA] - Starting...\n"); if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?")) { printf("Usage -device=n (n >= 0 for deviceID)\n"); printf(" -wA=WidthA -hA=HeightA (Width x Height of Matrix A)\n"); printf(" -wB=WidthB -hB=HeightB (Width x Height of Matrix B)\n"); printf(" Note: Outer matrix dimensions of A & B matrices must be equal.\n"); exit(EXIT_SUCCESS); } // By default, we use device 0, otherwise we override the device ID based on what is provided at the command line int devID = 0; if (checkCmdLineFlag(argc, (const char **)argv, "device")) { devID = getCmdLineArgumentInt(argc, (const char **)argv, "device"); cudaSetDevice(devID); } cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&devID); if (error != cudaSuccess) { printf("cudaGetDevice returned error code %d, line(%d)\n", error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, devID); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error code %d, line(%d)\n", error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", devID, deviceProp.name, deviceProp.major, deviceProp.minor); } // Use a larger block size for Fermi and above int block_size = (deviceProp.major < 2) ? 16 : 32; dim3 dimsA(5*2*block_size, 5*2*block_size, 1); dim3 dimsB(5*4*block_size, 5*2*block_size, 1); // width of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "wA")) { dimsA.x = getCmdLineArgumentInt(argc, (const char **)argv, "wA"); } // height of Matrix A if (checkCmdLineFlag(argc, (const char **)argv, "hA")) { dimsA.y = getCmdLineArgumentInt(argc, (const char **)argv, "hA"); } // width of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "wB")) { dimsB.x = getCmdLineArgumentInt(argc, (const char **)argv, "wB"); } // height of Matrix B if (checkCmdLineFlag(argc, (const char **)argv, "hB")) { dimsB.y = getCmdLineArgumentInt(argc, (const char **)argv, "hB"); } if (dimsA.x != dimsB.y) { printf("Error: outer matrix dimensions must be equal. (%d != %d)\n", dimsA.x, dimsB.y); exit(EXIT_FAILURE); } printf("MatrixA(%d,%d), MatrixB(%d,%d)\n", dimsA.x, dimsA.y, dimsB.x, dimsB.y); int matrix_result = matrixMultiply(argc, argv, block_size, dimsA, dimsB); exit(matrix_result); }
74374589947baca210be56005c6f3649d36a6709.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parquet_gpu.h" #if (__CUDACC_VER_MAJOR__ >= 9) #define SHFL0(v) __shfl_sync(~0, v, 0) #define SHFL(v, t) __shfl_sync(~0, v, t) #define SYNCWARP() __syncwarp() #define BALLOT(v) __ballot_sync(~0, v) #else #define SHFL0(v) __shfl(v, 0) #define SHFL(v, t) __shfl(v, t) #define SYNCWARP() #define BALLOT(v) __ballot(v) #endif namespace parquet { namespace gpu { // Minimal thrift implementation for parsing page headers enum { ST_FLD_TRUE = 1, ST_FLD_FALSE = 2, ST_FLD_BYTE = 3, ST_FLD_I16 = 4, ST_FLD_I32 = 5, ST_FLD_I64 = 6, ST_FLD_DOUBLE = 7, ST_FLD_BINARY = 8, ST_FLD_LIST = 9, ST_FLD_SET = 10, ST_FLD_MAP = 11, ST_FLD_STRUCT = 12, }; static const __device__ __constant__ uint8_t g_list2struct[16] = { 0, 1, 2, ST_FLD_BYTE, ST_FLD_DOUBLE, 5, ST_FLD_I16, 7, ST_FLD_I32, 9, ST_FLD_I64, ST_FLD_BINARY, ST_FLD_STRUCT, ST_FLD_MAP, ST_FLD_SET, ST_FLD_LIST }; struct byte_stream_s { const uint8_t *cur; const uint8_t *end; const uint8_t *base; // Parsed symbols PageType page_type; PageInfo page; ColumnChunkDesc ck; }; inline __device__ unsigned int getb(byte_stream_s *bs) { return (bs->cur < bs->end) ? *bs->cur++ : 0; } inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt) { bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur)); bs->cur += bytecnt; } __device__ uint32_t get_u32(byte_stream_s *bs) { uint32_t v = 0, l = 0, c; do { c = getb(bs); v |= (c & 0x7f) << l; l += 7; } while (c & 0x80); return v; } inline __device__ int32_t get_i32(byte_stream_s *bs) { uint32_t u = get_u32(bs); return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); } __device__ void skip_struct_field(byte_stream_s *bs, int t) { int struct_depth = 0; int rep_cnt = 0; do { if (rep_cnt != 0) { rep_cnt--; } else if (struct_depth != 0) { int c; do { c = getb(bs); if (!c) --struct_depth; } while (!c && struct_depth); if (!struct_depth) break; t = c & 0xf; if (!(c & 0xf0)) get_i32(bs); } switch (t) { case ST_FLD_TRUE: case ST_FLD_FALSE: break; case ST_FLD_I16: case ST_FLD_I32: case ST_FLD_I64: get_u32(bs); break; case ST_FLD_DOUBLE: skip_bytes(bs, 8); break; case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break; case ST_FLD_LIST: case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled int c = getb(bs); int n = c >> 4; if (n == 0xf) n = get_u32(bs); t = g_list2struct[c & 0xf]; if (t == ST_FLD_STRUCT) struct_depth += n; else rep_cnt = n; } break; case ST_FLD_STRUCT: struct_depth++; break; } } while (rep_cnt || struct_depth); } #define PARQUET_BEGIN_STRUCT(fn) \ __device__ bool fn(byte_stream_s *bs) \ { \ int fld = 0; \ for (;;) \ { \ int c, t, f; \ c = getb(bs); \ if (!c) \ break; \ f = c >> 4; \ t = c & 0xf; \ fld = (f) ? fld+f : get_i32(bs); \ switch(fld) { \ #define PARQUET_FLD_ENUM(id, m, mt) \ case id: bs->m = (mt)get_i32(bs); if (t != ST_FLD_I32) return false; break; \ #define PARQUET_FLD_INT32(id, m) \ case id: bs->m = get_i32(bs); if (t != ST_FLD_I32) return false; break; \ #define PARQUET_FLD_STRUCT(id, m) \ case id: if (t != ST_FLD_STRUCT || !m(bs)) return false; break; \ #define PARQUET_END_STRUCT() \ default: \ skip_struct_field(bs, t); \ break; \ } \ } \ return true; \ } \ PARQUET_BEGIN_STRUCT(gpuParseDataPageHeader) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_ENUM(2, page.encoding, Encoding); PARQUET_FLD_ENUM(3, page.definition_level_encoding, Encoding); PARQUET_FLD_ENUM(4, page.repetition_level_encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParseDictionaryPageHeader) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_ENUM(2, page.encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParseDataPageHeaderV2) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_INT32(3, page.num_rows) PARQUET_FLD_ENUM(4, page.encoding, Encoding); PARQUET_FLD_ENUM(5, page.definition_level_encoding, Encoding); PARQUET_FLD_ENUM(6, page.repetition_level_encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParsePageHeader) PARQUET_FLD_ENUM(1, page_type, PageType) PARQUET_FLD_INT32(2, page.uncompressed_page_size) PARQUET_FLD_INT32(3, page.compressed_page_size) PARQUET_FLD_STRUCT(5, gpuParseDataPageHeader) PARQUET_FLD_STRUCT(7, gpuParseDictionaryPageHeader) PARQUET_FLD_STRUCT(8, gpuParseDataPageHeaderV2) PARQUET_END_STRUCT() /** * @brief Kernel for outputting page headers from the specified column chunks * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks **/ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks) { __shared__ byte_stream_s bs_g[4]; int t = threadIdx.x & 0x1f; int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5); byte_stream_s * const bs = &bs_g[threadIdx.x >> 5]; if (chunk < num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128 if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { ((uint32_t *)&bs->ck)[t] = ((const uint32_t *)&chunks[chunk])[t]; } } __syncthreads(); if (chunk < num_chunks) { size_t num_values, values_found; uint32_t data_page_count = 0; uint32_t dictionary_page_count = 0; int32_t max_num_pages; int32_t num_dict_pages = bs->ck.num_dict_pages; PageInfo *page_info; if (!t) { bs->base = bs->cur = bs->ck.compressed_data; bs->end = bs->base + bs->ck.compressed_size; bs->page.chunk_idx = chunk; bs->page.chunk_row = 0; bs->page.num_rows = 0; } num_values = bs->ck.num_values; page_info = bs->ck.page_info; num_dict_pages = bs->ck.num_dict_pages; max_num_pages = (page_info) ? bs->ck.max_num_pages : 0; values_found = 0; SYNCWARP(); while (values_found < num_values && bs->cur < bs->end) { int index_out = -1; if (t == 0) { bs->page.chunk_row += bs->page.num_rows; bs->page.num_rows = 0; if (gpuParsePageHeader(bs) && bs->page.compressed_page_size >= 0) { switch (bs->page_type) { case DATA_PAGE: // TODO: Unless the file only uses V2 page headers or has no complex nesting (num_rows == num_values), we can't infer num_rows at this time // -> we'll need another pass after decompression to parse the definition and repetition levels to infer the correct value of num_rows bs->page.num_rows = bs->page.num_values; // Assumes num_rows == num_values // Fall-through to V2 case DATA_PAGE_V2: index_out = num_dict_pages + data_page_count; data_page_count++; bs->page.flags = 0; values_found += bs->page.num_values; break; case DICTIONARY_PAGE: index_out = dictionary_page_count; dictionary_page_count++; bs->page.flags = PAGEINFO_FLAGS_DICTIONARY; break; default: index_out = -1; break; } bs->page.page_data = const_cast<uint8_t *>(bs->cur); bs->cur += bs->page.compressed_page_size; } else { bs->cur = bs->end; } } index_out = SHFL0(index_out); if (index_out >= 0 && index_out < max_num_pages) { // NOTE: Assumes that sizeof(PageInfo) <= 128 if (t < sizeof(PageInfo) / sizeof(uint32_t)) { ((uint32_t *)(page_info + index_out))[t] = ((const uint32_t *)&bs->page)[t]; } } num_values = SHFL0(num_values); SYNCWARP(); } if (t == 0) { chunks[chunk].num_data_pages = data_page_count; chunks[chunk].num_dict_pages = dictionary_page_count; } } } /** * @brief Kernel for building dictionary index for the specified column chunks * * This function builds an index to point to each dictionary entry * (string format is 4-byte little-endian string length followed by character * data). The index is a 32-bit integer which contains the offset of each string * relative to the beginning of the dictionary page data. * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks **/ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks) { __shared__ ColumnChunkDesc chunk_g[4]; int t = threadIdx.x & 0x1f; int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5); ColumnChunkDesc * const ck = &chunk_g[threadIdx.x >> 5]; if (chunk < num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128 if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { ((uint32_t *)ck)[t] = ((const uint32_t *)&chunks[chunk])[t]; } } __syncthreads(); if (chunk >= num_chunks) { return; } if (!t && ck->num_dict_pages > 0 && ck->str_dict_index) { // Data type to describe a string nvstrdesc_s *dict_index = ck->str_dict_index; const uint8_t *dict = ck->page_info[0].page_data; int dict_size = ck->page_info[0].uncompressed_page_size; int num_entries = ck->page_info[0].num_values; int pos = 0, cur = 0; for (int i = 0; i < num_entries; i++) { int len = 0; if (cur + 4 <= dict_size) { len = dict[cur+0] | (dict[cur+1] << 8) | (dict[cur+2] << 16) | (dict[cur+3] << 24); if (len >= 0 && cur + 4 + len <= dict_size) { pos = cur; cur = cur + 4 + len; } else { cur = dict_size; } } // TODO: Could store 8 entries in shared mem, then do a single warp-wide store dict_index[i].ptr = (const char *)(dict + pos + 4); dict_index[i].count = len; } } } hipError_t __host__ DecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks, hipStream_t stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block hipLaunchKernelGGL(( gpuDecodePageHeaders), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, num_chunks); return hipSuccess; } hipError_t __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks, hipStream_t stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block hipLaunchKernelGGL(( gpuBuildStringDictionaryIndex), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, num_chunks); return hipSuccess; } }; }; // parquet::gpu namespace
74374589947baca210be56005c6f3649d36a6709.cu
/* * Copyright (c) 2018, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "parquet_gpu.h" #if (__CUDACC_VER_MAJOR__ >= 9) #define SHFL0(v) __shfl_sync(~0, v, 0) #define SHFL(v, t) __shfl_sync(~0, v, t) #define SYNCWARP() __syncwarp() #define BALLOT(v) __ballot_sync(~0, v) #else #define SHFL0(v) __shfl(v, 0) #define SHFL(v, t) __shfl(v, t) #define SYNCWARP() #define BALLOT(v) __ballot(v) #endif namespace parquet { namespace gpu { // Minimal thrift implementation for parsing page headers enum { ST_FLD_TRUE = 1, ST_FLD_FALSE = 2, ST_FLD_BYTE = 3, ST_FLD_I16 = 4, ST_FLD_I32 = 5, ST_FLD_I64 = 6, ST_FLD_DOUBLE = 7, ST_FLD_BINARY = 8, ST_FLD_LIST = 9, ST_FLD_SET = 10, ST_FLD_MAP = 11, ST_FLD_STRUCT = 12, }; static const __device__ __constant__ uint8_t g_list2struct[16] = { 0, 1, 2, ST_FLD_BYTE, ST_FLD_DOUBLE, 5, ST_FLD_I16, 7, ST_FLD_I32, 9, ST_FLD_I64, ST_FLD_BINARY, ST_FLD_STRUCT, ST_FLD_MAP, ST_FLD_SET, ST_FLD_LIST }; struct byte_stream_s { const uint8_t *cur; const uint8_t *end; const uint8_t *base; // Parsed symbols PageType page_type; PageInfo page; ColumnChunkDesc ck; }; inline __device__ unsigned int getb(byte_stream_s *bs) { return (bs->cur < bs->end) ? *bs->cur++ : 0; } inline __device__ void skip_bytes(byte_stream_s *bs, size_t bytecnt) { bytecnt = min(bytecnt, (size_t)(bs->end - bs->cur)); bs->cur += bytecnt; } __device__ uint32_t get_u32(byte_stream_s *bs) { uint32_t v = 0, l = 0, c; do { c = getb(bs); v |= (c & 0x7f) << l; l += 7; } while (c & 0x80); return v; } inline __device__ int32_t get_i32(byte_stream_s *bs) { uint32_t u = get_u32(bs); return (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); } __device__ void skip_struct_field(byte_stream_s *bs, int t) { int struct_depth = 0; int rep_cnt = 0; do { if (rep_cnt != 0) { rep_cnt--; } else if (struct_depth != 0) { int c; do { c = getb(bs); if (!c) --struct_depth; } while (!c && struct_depth); if (!struct_depth) break; t = c & 0xf; if (!(c & 0xf0)) get_i32(bs); } switch (t) { case ST_FLD_TRUE: case ST_FLD_FALSE: break; case ST_FLD_I16: case ST_FLD_I32: case ST_FLD_I64: get_u32(bs); break; case ST_FLD_DOUBLE: skip_bytes(bs, 8); break; case ST_FLD_BINARY: skip_bytes(bs, get_u32(bs)); break; case ST_FLD_LIST: case ST_FLD_SET: { // NOTE: skipping a list of lists is not handled int c = getb(bs); int n = c >> 4; if (n == 0xf) n = get_u32(bs); t = g_list2struct[c & 0xf]; if (t == ST_FLD_STRUCT) struct_depth += n; else rep_cnt = n; } break; case ST_FLD_STRUCT: struct_depth++; break; } } while (rep_cnt || struct_depth); } #define PARQUET_BEGIN_STRUCT(fn) \ __device__ bool fn(byte_stream_s *bs) \ { \ int fld = 0; \ for (;;) \ { \ int c, t, f; \ c = getb(bs); \ if (!c) \ break; \ f = c >> 4; \ t = c & 0xf; \ fld = (f) ? fld+f : get_i32(bs); \ switch(fld) { \ #define PARQUET_FLD_ENUM(id, m, mt) \ case id: bs->m = (mt)get_i32(bs); if (t != ST_FLD_I32) return false; break; \ #define PARQUET_FLD_INT32(id, m) \ case id: bs->m = get_i32(bs); if (t != ST_FLD_I32) return false; break; \ #define PARQUET_FLD_STRUCT(id, m) \ case id: if (t != ST_FLD_STRUCT || !m(bs)) return false; break; \ #define PARQUET_END_STRUCT() \ default: \ skip_struct_field(bs, t); \ break; \ } \ } \ return true; \ } \ PARQUET_BEGIN_STRUCT(gpuParseDataPageHeader) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_ENUM(2, page.encoding, Encoding); PARQUET_FLD_ENUM(3, page.definition_level_encoding, Encoding); PARQUET_FLD_ENUM(4, page.repetition_level_encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParseDictionaryPageHeader) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_ENUM(2, page.encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParseDataPageHeaderV2) PARQUET_FLD_INT32(1, page.num_values) PARQUET_FLD_INT32(3, page.num_rows) PARQUET_FLD_ENUM(4, page.encoding, Encoding); PARQUET_FLD_ENUM(5, page.definition_level_encoding, Encoding); PARQUET_FLD_ENUM(6, page.repetition_level_encoding, Encoding); PARQUET_END_STRUCT() PARQUET_BEGIN_STRUCT(gpuParsePageHeader) PARQUET_FLD_ENUM(1, page_type, PageType) PARQUET_FLD_INT32(2, page.uncompressed_page_size) PARQUET_FLD_INT32(3, page.compressed_page_size) PARQUET_FLD_STRUCT(5, gpuParseDataPageHeader) PARQUET_FLD_STRUCT(7, gpuParseDictionaryPageHeader) PARQUET_FLD_STRUCT(8, gpuParseDataPageHeaderV2) PARQUET_END_STRUCT() /** * @brief Kernel for outputting page headers from the specified column chunks * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks **/ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuDecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks) { __shared__ byte_stream_s bs_g[4]; int t = threadIdx.x & 0x1f; int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5); byte_stream_s * const bs = &bs_g[threadIdx.x >> 5]; if (chunk < num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128 if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { ((uint32_t *)&bs->ck)[t] = ((const uint32_t *)&chunks[chunk])[t]; } } __syncthreads(); if (chunk < num_chunks) { size_t num_values, values_found; uint32_t data_page_count = 0; uint32_t dictionary_page_count = 0; int32_t max_num_pages; int32_t num_dict_pages = bs->ck.num_dict_pages; PageInfo *page_info; if (!t) { bs->base = bs->cur = bs->ck.compressed_data; bs->end = bs->base + bs->ck.compressed_size; bs->page.chunk_idx = chunk; bs->page.chunk_row = 0; bs->page.num_rows = 0; } num_values = bs->ck.num_values; page_info = bs->ck.page_info; num_dict_pages = bs->ck.num_dict_pages; max_num_pages = (page_info) ? bs->ck.max_num_pages : 0; values_found = 0; SYNCWARP(); while (values_found < num_values && bs->cur < bs->end) { int index_out = -1; if (t == 0) { bs->page.chunk_row += bs->page.num_rows; bs->page.num_rows = 0; if (gpuParsePageHeader(bs) && bs->page.compressed_page_size >= 0) { switch (bs->page_type) { case DATA_PAGE: // TODO: Unless the file only uses V2 page headers or has no complex nesting (num_rows == num_values), we can't infer num_rows at this time // -> we'll need another pass after decompression to parse the definition and repetition levels to infer the correct value of num_rows bs->page.num_rows = bs->page.num_values; // Assumes num_rows == num_values // Fall-through to V2 case DATA_PAGE_V2: index_out = num_dict_pages + data_page_count; data_page_count++; bs->page.flags = 0; values_found += bs->page.num_values; break; case DICTIONARY_PAGE: index_out = dictionary_page_count; dictionary_page_count++; bs->page.flags = PAGEINFO_FLAGS_DICTIONARY; break; default: index_out = -1; break; } bs->page.page_data = const_cast<uint8_t *>(bs->cur); bs->cur += bs->page.compressed_page_size; } else { bs->cur = bs->end; } } index_out = SHFL0(index_out); if (index_out >= 0 && index_out < max_num_pages) { // NOTE: Assumes that sizeof(PageInfo) <= 128 if (t < sizeof(PageInfo) / sizeof(uint32_t)) { ((uint32_t *)(page_info + index_out))[t] = ((const uint32_t *)&bs->page)[t]; } } num_values = SHFL0(num_values); SYNCWARP(); } if (t == 0) { chunks[chunk].num_data_pages = data_page_count; chunks[chunk].num_dict_pages = dictionary_page_count; } } } /** * @brief Kernel for building dictionary index for the specified column chunks * * This function builds an index to point to each dictionary entry * (string format is 4-byte little-endian string length followed by character * data). The index is a 32-bit integer which contains the offset of each string * relative to the beginning of the dictionary page data. * * @param[in] chunks List of column chunks * @param[in] num_chunks Number of column chunks **/ // blockDim {128,1,1} extern "C" __global__ void __launch_bounds__(128) gpuBuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks) { __shared__ ColumnChunkDesc chunk_g[4]; int t = threadIdx.x & 0x1f; int chunk = (blockIdx.x << 2) + (threadIdx.x >> 5); ColumnChunkDesc * const ck = &chunk_g[threadIdx.x >> 5]; if (chunk < num_chunks) { // NOTE: Assumes that sizeof(ColumnChunkDesc) <= 128 if (t < sizeof(ColumnChunkDesc) / sizeof(uint32_t)) { ((uint32_t *)ck)[t] = ((const uint32_t *)&chunks[chunk])[t]; } } __syncthreads(); if (chunk >= num_chunks) { return; } if (!t && ck->num_dict_pages > 0 && ck->str_dict_index) { // Data type to describe a string nvstrdesc_s *dict_index = ck->str_dict_index; const uint8_t *dict = ck->page_info[0].page_data; int dict_size = ck->page_info[0].uncompressed_page_size; int num_entries = ck->page_info[0].num_values; int pos = 0, cur = 0; for (int i = 0; i < num_entries; i++) { int len = 0; if (cur + 4 <= dict_size) { len = dict[cur+0] | (dict[cur+1] << 8) | (dict[cur+2] << 16) | (dict[cur+3] << 24); if (len >= 0 && cur + 4 + len <= dict_size) { pos = cur; cur = cur + 4 + len; } else { cur = dict_size; } } // TODO: Could store 8 entries in shared mem, then do a single warp-wide store dict_index[i].ptr = (const char *)(dict + pos + 4); dict_index[i].count = len; } } } cudaError_t __host__ DecodePageHeaders(ColumnChunkDesc *chunks, int32_t num_chunks, cudaStream_t stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block gpuDecodePageHeaders<<<dim_grid, dim_block, 0, stream>>>(chunks, num_chunks); return cudaSuccess; } cudaError_t __host__ BuildStringDictionaryIndex(ColumnChunkDesc *chunks, int32_t num_chunks, cudaStream_t stream) { dim3 dim_block(128, 1); dim3 dim_grid((num_chunks + 3) >> 2, 1); // 1 chunk per warp, 4 warps per block gpuBuildStringDictionaryIndex<<<dim_grid, dim_block, 0, stream>>>(chunks, num_chunks); return cudaSuccess; } }; }; // parquet::gpu namespace
96b3e9151f604d8aab154ec608ae106a7e9ba3f5.hip
// !!! This is a file automatically generated by hipify!!! /*------------------------------------------------------------------------- * * CUDA function for backrpojection using FDK weigts for CBCT * * * CODE by Ander Biguri * Optimized and modified by RB * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #define PI_2 1.57079632679489661923 #include <algorithm> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include "voxel_backprojection.hpp" #include "mex.h" #include <math.h> // https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror #define cudaCheckErrors(msg) \ do { \ hipError_t __err = hipGetLastError(); \ if (__err != hipSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",hipGetErrorString(__err));\ } \ } while (0) #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * *--->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ texture<float, hipTextureType2DLayered , hipReadModeElementType> tex; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were: // PROJ_PER_KERNEL = 32 or 16 (very similar times) // VOXELS_PER_THREAD = 8 // Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code. // (e.g. 16.2 s vs. ~62 s). const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck. const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck. // We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection: // deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec // So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel // (they will be updated in the main loop before each kernel call). __constant__ Point3D projParamsArrayDev[6*PROJ_PER_KERNEL]; // Dev means it is on device // We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above) Point3D projParamsArrayHost[6*PROJ_PER_KERNEL]; // Host means it is host memory // Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection) __constant__ float projSinCosArrayDev[3*PROJ_PER_KERNEL]; float projSinCosArrayHost[3*PROJ_PER_KERNEL]; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //______________________________________________________________________________ // // Function: kernelPixelBackprojectionFDK // // Description: Main FDK backprojection kernel //______________________________________________________________________________ __global__ void kernelPixelBackprojectionFDK(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections) { // Old kernel call signature: //hipLaunchKernelGGL(( kernelPixelBackprojectionFDK), dim3(grid),dim3(block), 0, 0, geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha); // We just read in most of the params from the constant memory instead of getting them from the param list. // This is because we now have MANY params, since single kernel processes more than one projection! /* __global__ void kernelPixelBackprojectionFDK(const Geometry geo, * float* image, * const int indAlpha, * const Point3D deltaX , * const Point3D deltaY, * const Point3D deltaZ, * const Point3D xyzOrigin, * const Point3D xyzOffset, * const Point3D uv0Offset, * const float sinalpha, * const float cosalpha){ */ unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y; unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x; // unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle //Make sure we dont go out of bounds if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ) return; // We'll keep a local auxiliary array of values of a column of voxels that this thread will update float voxelColumn[VOXELS_PER_THREAD]; // First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then // work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes int colIdx; #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX; voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. } // END copy 3D volume voxels to local array // Now iterate through projections #pragma unroll for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++) { // Get the current parameters from parameter arrays in constant memory. int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array // Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK. if(indAlpha>=totalNoOfProjections) break; Point3D deltaX = projParamsArrayDev[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaY = projParamsArrayDev[6*projNumber+1]; Point3D deltaZ = projParamsArrayDev[6*projNumber+2]; Point3D xyzOrigin = projParamsArrayDev[6*projNumber+3]; Point3D xyzOffset = projParamsArrayDev[6*projNumber+4]; Point3D S = projParamsArrayDev[6*projNumber+5]; float sinalpha = projSinCosArrayDev[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection float cosalpha = projSinCosArrayDev[3*projNumber+1]; float COR = projSinCosArrayDev[3*projNumber+2]; // Now iterate through Z in our voxel column FOR A GIVEN PROJECTION #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. // "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles. Point3D P; P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x); P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU; P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z); // This is the vector defining the line from the source to the Voxel float vectX,vectY,vectZ; vectX=(P.x -S.x); vectY=(P.y -S.y); vectZ=(P.z -S.z); // Get the coordinates in the detector UV where the mid point of the voxel is projected. float t=(geo.DSO-geo.DSD /*-DOD*/ - S.x)/vectX; float y,z; y=vectY*t+S.y; z=vectZ*t+S.z; float u,v; u=y+geo.nDetecU/2; v=z+geo.nDetecV/2; float weigth; float realx,realy; realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x; realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+COR; weigth=(geo.DSO+realy*sinalpha-realx*cosalpha)/geo.DSO; weigth=1/(weigth*weigth); // Get Value in the computed (U,V) and multiply by the corresponding weigth. // indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!) voxelColumn[colIdx]+=tex2DLayered(tex, v , u , indAlpha)*weigth; } // END iterating through column of voxels } // END iterating through multiple projections // And finally copy the updated local voxelColumn array back to our 3D volume (main memory) #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX; image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. // According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write. // We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is // better for avoiding memory congestion. } // END copy updated voxels from local array to our 3D volume } // END kernelPixelBackprojectionFDK //______________________________________________________________________________ // // Function: voxel_backprojection // // Description: Main host function for FDK backprojection (invokes the kernel) //______________________________________________________________________________ int voxel_backprojection(float const * const projections, Geometry geo, float* result,float const * const alphas, int nalpha) { /* * Allocate texture memory on the device */ // copy data to CUDA memory hipArray *d_projectiondata = 0; const hipExtent extent = make_hipExtent(geo.nDetecV,geo.nDetecU,nalpha); hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>(); hipMalloc3DArray(&d_projectiondata, &channelDesc, extent,hipArrayLayered); cudaCheckErrors("hipMalloc3D error 3D tex"); hipMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_hipPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_projectiondata; copyParams.extent = extent; copyParams.kind = hipMemcpyHostToDevice; hipMemcpy3D(&copyParams); cudaCheckErrors("hipMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = hipFilterModeLinear; tex.addressMode[0] = hipAddressModeBorder; tex.addressMode[1] = hipAddressModeBorder; tex.addressMode[2] = hipAddressModeBorder; hipBindTextureToArray(tex, d_projectiondata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); // Allocate result image memory size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float); float* dimage; hipMalloc((void**)&dimage, num_bytes); hipMemset(dimage,0,num_bytes); cudaCheckErrors("hipMalloc fail"); // If we are going to time bool timekernel=false; hipEvent_t start, stop; float elapsedTime; int divx,divy,divz; // RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y). // I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so // let's stick with the values from Zinsser and Keck. divx=16; divy=32; divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks! dim3 grid((geo.nVoxelX+divx-1)/divx, (geo.nVoxelY+divy-1)/divy, (geo.nVoxelZ+divz-1)/divz); dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1) ////////////////////////////////////////////////////////////////////////////////////// // Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// // Since we'll have multiple projections processed by a SINGLE kernel call, compute how many // kernel calls we'll need altogether. int noOfKernelCalls = (nalpha+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL for (unsigned int i=0; i<noOfKernelCalls; i++) { // Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it int j; for(j=0; j<PROJ_PER_KERNEL; j++) { int currProjNumber=i*PROJ_PER_KERNEL+j; if(currProjNumber>=nalpha) break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway. Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source; float sinalpha,cosalpha; geo.alpha=-alphas[currProjNumber*3];//we got 3 angles now. sinalpha=sin(geo.alpha); cosalpha=cos(geo.alpha); projSinCosArrayHost[3*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection projSinCosArrayHost[3*j+1]=cosalpha; projSinCosArrayHost[3*j+2]=geo.COR[currProjNumber]; computeDeltasCube(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source); offOrig.x=geo.offOrigX[currProjNumber]; offOrig.y=geo.offOrigY[currProjNumber]; offOrig.z=geo.offOrigZ[currProjNumber]; projParamsArrayHost[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection projParamsArrayHost[6*j+1]=deltaY; projParamsArrayHost[6*j+2]=deltaZ; projParamsArrayHost[6*j+3]=xyzOrigin; projParamsArrayHost[6*j+4]=offOrig; projParamsArrayHost[6*j+5]=source; } // END for (preparing params for kernel call) // Copy the prepared parameter arrays to constant memory to make it available for the kernel hipMemcpyToSymbol(projSinCosArrayDev, projSinCosArrayHost, sizeof(float)*3*PROJ_PER_KERNEL); hipMemcpyToSymbol(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*6*PROJ_PER_KERNEL); if (timekernel){ hipEventCreate(&start); hipEventRecord(start,0); } hipLaunchKernelGGL(( kernelPixelBackprojectionFDK), dim3(grid),dim3(block), 0, 0, geo,dimage,i,nalpha); cudaCheckErrors("Kernel fail"); if (timekernel) { hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); mexPrintf("%f\n" ,elapsedTime); cudaCheckErrors("cuda Timing fail"); } } // END for ////////////////////////////////////////////////////////////////////////////////////// // END Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// hipMemcpy(result, dimage, num_bytes, hipMemcpyDeviceToHost); cudaCheckErrors("hipMemcpy result fail"); hipUnbindTexture(tex); cudaCheckErrors("Unbind fail"); hipFree(dimage); hipFreeArray(d_projectiondata); cudaCheckErrors("hipFree d_imagedata fail"); hipDeviceReset(); // For the Nvidia Visual Profiler return 0; } // END voxel_backprojection //______________________________________________________________________________ // // Function: computeDeltasCube // // Description: Computes relative increments for each projection (volume rotation). // Increments get passed to the backprojection kernel. //______________________________________________________________________________ void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S) { Point3D P0, Px0,Py0,Pz0, source; // Get coords of Img(0,0,0) P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i]; P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i]; P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i]; // Get coors from next voxel in each direction Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x; Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y; Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ; // Rotate image (this is equivalent of rotating the source and detector) Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values! P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z; Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z; Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z; Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z; //detector offset P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i]; Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i]; Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i]; Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i]; //Detector Roll pitch Yaw // // // first, we need to offset everything so (0,0,0) is the center of the detector // Only X is required for that P.x=P.x+(geo.DSD-geo.DSO); Px.x=Px.x+(geo.DSD-geo.DSO); Py.x=Py.x+(geo.DSD-geo.DSO); Pz.x=Pz.x+(geo.DSD-geo.DSO); rollPitchYawT(geo,i,&P); rollPitchYawT(geo,i,&Px); rollPitchYawT(geo,i,&Py); rollPitchYawT(geo,i,&Pz); P.x=P.x-(geo.DSD-geo.DSO); Px.x=Px.x-(geo.DSD-geo.DSO); Py.x=Py.x-(geo.DSD-geo.DSO); Pz.x=Pz.x-(geo.DSD-geo.DSO); //Done for P, now source source.x=geo.DSD; //allready offseted for rotation source.y=-geo.offDetecU[i]; source.z=-geo.offDetecV[i]; rollPitchYawT(geo,i,&source); source.x=source.x-(geo.DSD-geo.DSO);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z; // mexPrintf("%f,%f,%f\n",source.x,source.y,source.z); // Scale coords so detector pixels are 1x1 P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU; Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU; Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU; Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU; source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU; // get deltas of the changes in voxels deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z; deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z; deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z; *xyzorigin=P; *S=source; } // END computeDeltasCube void rollPitchYawT(Geometry geo,int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y -sin(geo.dPitch[i])*auxPoint.z; point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z; point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; }
96b3e9151f604d8aab154ec608ae106a7e9ba3f5.cu
/*------------------------------------------------------------------------- * * CUDA function for backrpojection using FDK weigts for CBCT * * * CODE by Ander Biguri * Optimized and modified by RB * --------------------------------------------------------------------------- * --------------------------------------------------------------------------- * Copyright (c) 2015, University of Bath and CERN- European Organization for * Nuclear Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. * --------------------------------------------------------------------------- * * Contact: [email protected] * Codes : https://github.com/CERN/TIGRE * --------------------------------------------------------------------------- */ #define PI_2 1.57079632679489661923 #include <algorithm> #include <cuda_runtime_api.h> #include <cuda.h> #include "voxel_backprojection.hpp" #include "mex.h" #include <math.h> // https://stackoverflow.com/questions/16282136/is-there-a-cuda-equivalent-of-perror #define cudaCheckErrors(msg) \ do { \ cudaError_t __err = cudaGetLastError(); \ if (__err != cudaSuccess) { \ mexPrintf("%s \n",msg);\ mexErrMsgIdAndTxt("CBCT:CUDA:Atb",cudaGetErrorString(__err));\ } \ } while (0) #define MAXTREADS 1024 /*GEOMETRY DEFINITION * * Detector plane, behind * |-----------------------------| * | | * | | * | | * | | * | +--------+ | * | / /| | * A Z | / / |*D | * | | +--------+ | | * | | | | | | * | | | *O | + | * *--->y | | | / | * / | | |/ | * V X | +--------+ | * |-----------------------------| * * *S * * * * * **/ texture<float, cudaTextureType2DLayered , cudaReadModeElementType> tex; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // The optimal values of two constants obtained by RB on NVIDIA Quadro K2200 (4 GB RAM, 640 CUDA cores) for 512^3 volume and 512^3 projections (512 proj, each 512 x 512) were: // PROJ_PER_KERNEL = 32 or 16 (very similar times) // VOXELS_PER_THREAD = 8 // Speedup of the entire FDK backprojection (not only kernel run, also memcpy etc.) was nearly 4x relative to the original (single projection, single voxel per thread) code. // (e.g. 16.2 s vs. ~62 s). const int PROJ_PER_KERNEL = 32; // Number of 2D projections to be analyzed by a single thread. This can be tweaked to see what works best. 32 was the optimal value in the paper by Zinsser and Keck. const int VOXELS_PER_THREAD = 8; // Number of voxels to be computed by s single thread. Can be tweaked to see what works best. 4 was the optimal value in the paper by Zinsser and Keck. // We have PROJ_PER_KERNEL projections and we need 6 parameters for each projection: // deltaX, deltaY, deltaZ, xyzOrigin, offOrig, offDetec // So we need to keep PROJ_PER_KERNEL*6 values in our deltas array FOR EACH CALL to our main kernel // (they will be updated in the main loop before each kernel call). __constant__ Point3D projParamsArrayDev[6*PROJ_PER_KERNEL]; // Dev means it is on device // We also need a corresponding array on the host side to be filled before each kernel call, then copied to the device (array in constant memory above) Point3D projParamsArrayHost[6*PROJ_PER_KERNEL]; // Host means it is host memory // Now we also need to store sinAlpha and cosAlpha for each projection (two floats per projection) __constant__ float projSinCosArrayDev[3*PROJ_PER_KERNEL]; float projSinCosArrayHost[3*PROJ_PER_KERNEL]; //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // END RB, 10/31/2016: Add constant memory arrays to store parameters for all projections to be analyzed during a single kernel call //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// //______________________________________________________________________________ // // Function: kernelPixelBackprojectionFDK // // Description: Main FDK backprojection kernel //______________________________________________________________________________ __global__ void kernelPixelBackprojectionFDK(const Geometry geo, float* image,const int currProjSetNumber, const int totalNoOfProjections) { // Old kernel call signature: // kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,deltaX,deltaY,deltaZ,xyzOrigin,offOrig,offDetec,sinalpha,cosalpha); // We just read in most of the params from the constant memory instead of getting them from the param list. // This is because we now have MANY params, since single kernel processes more than one projection! /* __global__ void kernelPixelBackprojectionFDK(const Geometry geo, * float* image, * const int indAlpha, * const Point3D deltaX , * const Point3D deltaY, * const Point3D deltaZ, * const Point3D xyzOrigin, * const Point3D xyzOffset, * const Point3D uv0Offset, * const float sinalpha, * const float cosalpha){ */ unsigned long indY = blockIdx.y * blockDim.y + threadIdx.y; unsigned long indX = blockIdx.x * blockDim.x + threadIdx.x; // unsigned long startIndZ = blockIdx.z * blockDim.z + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle unsigned long startIndZ = blockIdx.z * VOXELS_PER_THREAD + threadIdx.z; // This is only STARTING z index of the column of voxels that the thread will handle //Make sure we dont go out of bounds if (indX>=geo.nVoxelX | indY>=geo.nVoxelY |startIndZ>=geo.nVoxelZ) return; // We'll keep a local auxiliary array of values of a column of voxels that this thread will update float voxelColumn[VOXELS_PER_THREAD]; // First we need to copy the curent 3D volume values from the column to our auxiliary array so that we can then // work on them (update them by computing values from multiple projections) locally - avoiding main memory reads/writes int colIdx; #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the updated values go back to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX; voxelColumn[colIdx] = image[idx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. } // END copy 3D volume voxels to local array // Now iterate through projections #pragma unroll for(int projNumber=0; projNumber<PROJ_PER_KERNEL; projNumber++) { // Get the current parameters from parameter arrays in constant memory. int indAlpha = currProjSetNumber*PROJ_PER_KERNEL+projNumber; // This is the ABSOLUTE projection number in the projection array // Our currImageVal will be updated by hovewer many projections we had left in the "remainder" - that's OK. if(indAlpha>=totalNoOfProjections) break; Point3D deltaX = projParamsArrayDev[6*projNumber]; // 6*projNumber because we have 6 Point3D values per projection Point3D deltaY = projParamsArrayDev[6*projNumber+1]; Point3D deltaZ = projParamsArrayDev[6*projNumber+2]; Point3D xyzOrigin = projParamsArrayDev[6*projNumber+3]; Point3D xyzOffset = projParamsArrayDev[6*projNumber+4]; Point3D S = projParamsArrayDev[6*projNumber+5]; float sinalpha = projSinCosArrayDev[3*projNumber]; // 2*projNumber because we have 2 float (sin or cos angle) values per projection float cosalpha = projSinCosArrayDev[3*projNumber+1]; float COR = projSinCosArrayDev[3*projNumber+2]; // Now iterate through Z in our voxel column FOR A GIVEN PROJECTION #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. // "XYZ" in the scaled coordinate system of the current point. The image is rotated with the projection angles. Point3D P; P.x=(xyzOrigin.x+indX*deltaX.x+indY*deltaY.x+indZ*deltaZ.x); P.y=(xyzOrigin.y+indX*deltaX.y+indY*deltaY.y+indZ*deltaZ.y)-COR/geo.dDetecU; P.z=(xyzOrigin.z+indX*deltaX.z+indY*deltaY.z+indZ*deltaZ.z); // This is the vector defining the line from the source to the Voxel float vectX,vectY,vectZ; vectX=(P.x -S.x); vectY=(P.y -S.y); vectZ=(P.z -S.z); // Get the coordinates in the detector UV where the mid point of the voxel is projected. float t=(geo.DSO-geo.DSD /*-DOD*/ - S.x)/vectX; float y,z; y=vectY*t+S.y; z=vectZ*t+S.z; float u,v; u=y+geo.nDetecU/2; v=z+geo.nDetecV/2; float weigth; float realx,realy; realx=-geo.sVoxelX/2+geo.dVoxelX/2 +indX*geo.dVoxelX +xyzOffset.x; realy=-geo.sVoxelY/2+geo.dVoxelY/2 +indY*geo.dVoxelY +xyzOffset.y+COR; weigth=(geo.DSO+realy*sinalpha-realx*cosalpha)/geo.DSO; weigth=1/(weigth*weigth); // Get Value in the computed (U,V) and multiply by the corresponding weigth. // indAlpha is the ABSOLUTE number of projection in the projection array (NOT the current number of projection set!) voxelColumn[colIdx]+=tex2DLayered(tex, v , u , indAlpha)*weigth; } // END iterating through column of voxels } // END iterating through multiple projections // And finally copy the updated local voxelColumn array back to our 3D volume (main memory) #pragma unroll for(colIdx=0; colIdx<VOXELS_PER_THREAD; colIdx++) { unsigned long indZ = startIndZ + colIdx; // If we are out of bounds, break the loop. The voxelColumn array will be updated partially, but it is OK, because we won't // be trying to copy the out of bounds values back to the 3D volume anyway (bounds checks will be done in the final loop where the values go to the main volume) if(indZ>=geo.nVoxelZ) break; // break the loop. unsigned long long idx =indZ*geo.nVoxelX*geo.nVoxelY+indY*geo.nVoxelX + indX; image[idx] = voxelColumn[colIdx]; // Read the current volume value that we'll update by computing values from MULTIPLE projections (not just one) // We'll be updating the local (register) variable, avoiding reads/writes from the slow main memory. // According to references (Papenhausen), doing = is better than +=, since += requires main memory read followed by a write. // We did all the reads into the local array at the BEGINNING of this kernel. According to Papenhausen, this type of read-write split is // better for avoiding memory congestion. } // END copy updated voxels from local array to our 3D volume } // END kernelPixelBackprojectionFDK //______________________________________________________________________________ // // Function: voxel_backprojection // // Description: Main host function for FDK backprojection (invokes the kernel) //______________________________________________________________________________ int voxel_backprojection(float const * const projections, Geometry geo, float* result,float const * const alphas, int nalpha) { /* * Allocate texture memory on the device */ // copy data to CUDA memory cudaArray *d_projectiondata = 0; const cudaExtent extent = make_cudaExtent(geo.nDetecV,geo.nDetecU,nalpha); cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>(); cudaMalloc3DArray(&d_projectiondata, &channelDesc, extent,cudaArrayLayered); cudaCheckErrors("cudaMalloc3D error 3D tex"); cudaMemcpy3DParms copyParams = { 0 }; copyParams.srcPtr = make_cudaPitchedPtr((void*)projections, extent.width*sizeof(float), extent.width, extent.height); copyParams.dstArray = d_projectiondata; copyParams.extent = extent; copyParams.kind = cudaMemcpyHostToDevice; cudaMemcpy3D(&copyParams); cudaCheckErrors("cudaMemcpy3D fail"); // Configure texture options tex.normalized = false; tex.filterMode = cudaFilterModeLinear; tex.addressMode[0] = cudaAddressModeBorder; tex.addressMode[1] = cudaAddressModeBorder; tex.addressMode[2] = cudaAddressModeBorder; cudaBindTextureToArray(tex, d_projectiondata, channelDesc); cudaCheckErrors("3D texture memory bind fail"); // Allocate result image memory size_t num_bytes = geo.nVoxelX*geo.nVoxelY*geo.nVoxelZ * sizeof(float); float* dimage; cudaMalloc((void**)&dimage, num_bytes); cudaMemset(dimage,0,num_bytes); cudaCheckErrors("cudaMalloc fail"); // If we are going to time bool timekernel=false; cudaEvent_t start, stop; float elapsedTime; int divx,divy,divz; // RB: Use the optimal (in their tests) block size from paper by Zinsser and Keck (16 in x and 32 in y). // I tried different sizes and shapes of blocks (tiles), but it does not appear to significantly affect trhoughput, so // let's stick with the values from Zinsser and Keck. divx=16; divy=32; divz=VOXELS_PER_THREAD; // We now only have 32 x 16 threads per block (flat tile, see below), BUT each thread works on a Z column of VOXELS_PER_THREAD voxels, so we effectively need fewer blocks! dim3 grid((geo.nVoxelX+divx-1)/divx, (geo.nVoxelY+divy-1)/divy, (geo.nVoxelZ+divz-1)/divz); dim3 block(divx,divy,1); // Note that we have 1 in the Z size, not divz, since each thread works on a vertical set of VOXELS_PER_THREAD voxels (so we only need a "flat" tile of threads, with depth of 1) ////////////////////////////////////////////////////////////////////////////////////// // Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// // Since we'll have multiple projections processed by a SINGLE kernel call, compute how many // kernel calls we'll need altogether. int noOfKernelCalls = (nalpha+PROJ_PER_KERNEL-1)/PROJ_PER_KERNEL; // We'll take care of bounds checking inside the loop if nalpha is not divisible by PROJ_PER_KERNEL for (unsigned int i=0; i<noOfKernelCalls; i++) { // Now we need to generate and copy all data for PROJ_PER_KERNEL projections to constant memory so that our kernel can use it int j; for(j=0; j<PROJ_PER_KERNEL; j++) { int currProjNumber=i*PROJ_PER_KERNEL+j; if(currProjNumber>=nalpha) break; // Exit the loop. Even when we leave the param arrays only partially filled, this is OK, since the kernel will check bounds anyway. Point3D deltaX,deltaY,deltaZ,xyzOrigin, offOrig, /*offDetec,*/source; float sinalpha,cosalpha; geo.alpha=-alphas[currProjNumber*3];//we got 3 angles now. sinalpha=sin(geo.alpha); cosalpha=cos(geo.alpha); projSinCosArrayHost[3*j]=sinalpha; // 2*j because we have 2 float (sin or cos angle) values per projection projSinCosArrayHost[3*j+1]=cosalpha; projSinCosArrayHost[3*j+2]=geo.COR[currProjNumber]; computeDeltasCube(geo,geo.alpha,currProjNumber,&xyzOrigin,&deltaX,&deltaY,&deltaZ,&source); offOrig.x=geo.offOrigX[currProjNumber]; offOrig.y=geo.offOrigY[currProjNumber]; offOrig.z=geo.offOrigZ[currProjNumber]; projParamsArrayHost[6*j]=deltaX; // 6*j because we have 6 Point3D values per projection projParamsArrayHost[6*j+1]=deltaY; projParamsArrayHost[6*j+2]=deltaZ; projParamsArrayHost[6*j+3]=xyzOrigin; projParamsArrayHost[6*j+4]=offOrig; projParamsArrayHost[6*j+5]=source; } // END for (preparing params for kernel call) // Copy the prepared parameter arrays to constant memory to make it available for the kernel cudaMemcpyToSymbol(projSinCosArrayDev, projSinCosArrayHost, sizeof(float)*3*PROJ_PER_KERNEL); cudaMemcpyToSymbol(projParamsArrayDev, projParamsArrayHost, sizeof(Point3D)*6*PROJ_PER_KERNEL); if (timekernel){ cudaEventCreate(&start); cudaEventRecord(start,0); } kernelPixelBackprojectionFDK<<<grid,block>>>(geo,dimage,i,nalpha); cudaCheckErrors("Kernel fail"); if (timekernel) { cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); mexPrintf("%f\n" ,elapsedTime); cudaCheckErrors("cuda Timing fail"); } } // END for ////////////////////////////////////////////////////////////////////////////////////// // END Main reconstruction loop: go through projections (rotation angles) and backproject ////////////////////////////////////////////////////////////////////////////////////// cudaMemcpy(result, dimage, num_bytes, cudaMemcpyDeviceToHost); cudaCheckErrors("cudaMemcpy result fail"); cudaUnbindTexture(tex); cudaCheckErrors("Unbind fail"); cudaFree(dimage); cudaFreeArray(d_projectiondata); cudaCheckErrors("cudaFree d_imagedata fail"); cudaDeviceReset(); // For the Nvidia Visual Profiler return 0; } // END voxel_backprojection //______________________________________________________________________________ // // Function: computeDeltasCube // // Description: Computes relative increments for each projection (volume rotation). // Increments get passed to the backprojection kernel. //______________________________________________________________________________ void computeDeltasCube(Geometry geo, float alpha,int i, Point3D* xyzorigin, Point3D* deltaX, Point3D* deltaY, Point3D* deltaZ,Point3D* S) { Point3D P0, Px0,Py0,Pz0, source; // Get coords of Img(0,0,0) P0.x=-(geo.sVoxelX/2-geo.dVoxelX/2)+geo.offOrigX[i]; P0.y=-(geo.sVoxelY/2-geo.dVoxelY/2)+geo.offOrigY[i]; P0.z=-(geo.sVoxelZ/2-geo.dVoxelZ/2)+geo.offOrigZ[i]; // Get coors from next voxel in each direction Px0.x=P0.x+geo.dVoxelX; Py0.x=P0.x; Pz0.x=P0.x; Px0.y=P0.y; Py0.y=P0.y+geo.dVoxelY; Pz0.y=P0.y; Px0.z=P0.z; Py0.z=P0.z; Pz0.z=P0.z+geo.dVoxelZ; // Rotate image (this is equivalent of rotating the source and detector) Point3D P, Px,Py,Pz; // We need other auxiliar variables to be able to perform the rotation, or we would overwrite values! P.x =P0.x *cos(alpha)-P0.y *sin(alpha); P.y =P0.x *sin(alpha)+P0.y *cos(alpha); P.z =P0.z; Px.x=Px0.x*cos(alpha)-Px0.y*sin(alpha); Px.y=Px0.x*sin(alpha)+Px0.y*cos(alpha); Px.z=Px0.z; Py.x=Py0.x*cos(alpha)-Py0.y*sin(alpha); Py.y=Py0.x*sin(alpha)+Py0.y*cos(alpha); Py.z=Py0.z; Pz.x=Pz0.x*cos(alpha)-Pz0.y*sin(alpha); Pz.y=Pz0.x*sin(alpha)+Pz0.y*cos(alpha); Pz.z=Pz0.z; //detector offset P.z =P.z-geo.offDetecV[i]; P.y =P.y-geo.offDetecU[i]; Px.z =Px.z-geo.offDetecV[i]; Px.y =Px.y-geo.offDetecU[i]; Py.z =Py.z-geo.offDetecV[i]; Py.y =Py.y-geo.offDetecU[i]; Pz.z =Pz.z-geo.offDetecV[i]; Pz.y =Pz.y-geo.offDetecU[i]; //Detector Roll pitch Yaw // // // first, we need to offset everything so (0,0,0) is the center of the detector // Only X is required for that P.x=P.x+(geo.DSD-geo.DSO); Px.x=Px.x+(geo.DSD-geo.DSO); Py.x=Py.x+(geo.DSD-geo.DSO); Pz.x=Pz.x+(geo.DSD-geo.DSO); rollPitchYawT(geo,i,&P); rollPitchYawT(geo,i,&Px); rollPitchYawT(geo,i,&Py); rollPitchYawT(geo,i,&Pz); P.x=P.x-(geo.DSD-geo.DSO); Px.x=Px.x-(geo.DSD-geo.DSO); Py.x=Py.x-(geo.DSD-geo.DSO); Pz.x=Pz.x-(geo.DSD-geo.DSO); //Done for P, now source source.x=geo.DSD; //allready offseted for rotation source.y=-geo.offDetecU[i]; source.z=-geo.offDetecV[i]; rollPitchYawT(geo,i,&source); source.x=source.x-(geo.DSD-geo.DSO);// source.y=source.y-auxOff.y; source.z=source.z-auxOff.z; // mexPrintf("%f,%f,%f\n",source.x,source.y,source.z); // Scale coords so detector pixels are 1x1 P.z =P.z /geo.dDetecV; P.y =P.y/geo.dDetecU; Px.z=Px.z/geo.dDetecV; Px.y=Px.y/geo.dDetecU; Py.z=Py.z/geo.dDetecV; Py.y=Py.y/geo.dDetecU; Pz.z=Pz.z/geo.dDetecV; Pz.y=Pz.y/geo.dDetecU; source.z=source.z/geo.dDetecV; source.y=source.y/geo.dDetecU; // get deltas of the changes in voxels deltaX->x=Px.x-P.x; deltaX->y=Px.y-P.y; deltaX->z=Px.z-P.z; deltaY->x=Py.x-P.x; deltaY->y=Py.y-P.y; deltaY->z=Py.z-P.z; deltaZ->x=Pz.x-P.x; deltaZ->y=Pz.y-P.y; deltaZ->z=Pz.z-P.z; *xyzorigin=P; *S=source; } // END computeDeltasCube void rollPitchYawT(Geometry geo,int i, Point3D* point){ Point3D auxPoint; auxPoint.x=point->x; auxPoint.y=point->y; auxPoint.z=point->z; point->x=cos(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.x +sin(geo.dRoll[i])*cos(geo.dPitch[i])*auxPoint.y -sin(geo.dPitch[i])*auxPoint.z; point->y=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) - sin(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*sin(geo.dYaw[i]) + cos(geo.dRoll[i])*cos(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*sin(geo.dYaw[i])*auxPoint.z; point->z=(cos(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) + sin(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.x +(sin(geo.dRoll[i])*sin(geo.dPitch[i])*cos(geo.dYaw[i]) - cos(geo.dRoll[i])*sin(geo.dYaw[i]))*auxPoint.y +cos(geo.dPitch[i])*cos(geo.dYaw[i])*auxPoint.z; }
52944cd99f4410b8ed2dac1d2ea6e29b1acc3a66.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ void hconv_fprop_K64_N64( float* param_Sum, unsigned short* param_O, const unsigned short* param_I, const unsigned short* param_F, float param_alpha, float param_beta, int param_flags, int param_offset_K, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ ) { __shared__ float share[ 64*8*2 + 64*8*2 + 8]; *param_Sum = share[0]; }
52944cd99f4410b8ed2dac1d2ea6e29b1acc3a66.cu
extern "C" __global__ void hconv_fprop_K64_N64( float* param_Sum, unsigned short* param_O, const unsigned short* param_I, const unsigned short* param_F, float param_alpha, float param_beta, int param_flags, int param_offset_K, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_CRST, int param_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ ) { __shared__ float share[ 64*8*2 + 64*8*2 + 8]; *param_Sum = share[0]; }
7bcaaa995391c7e096eedd3558239665a4253709.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * spmm_csr_driver.cu * Copyright (C) 2020 * Aravind SUKUMARAN RAJAM (asr) <[email protected]> * * Distributed under terms of the GNU LGPL3 license. */ #include "mm_helper.hpp" #include "sparse_representation.hpp" #include <iostream> void check_dmat(double* a, double *b, unsigned int n, unsigned int K, bool quit_on_err = true ) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int k = 0; k < K; ++k) { if(std::abs(a[i * K + k] - b[i * K + k]) > 1e-1) { std::cerr << "Possible error at " << i << std::endl; if(quit_on_err) { exit(-1); } } } } if(quit_on_err) std::cout << "Verification succeeded\n"; else std::cout << "Check error messages to see if verification succeeded. (No error msg == success)\n"; } static unsigned int g_seed = 0X4B1D; inline int fastrand() { g_seed = (214013 * g_seed + 2531011); return (g_seed >> 16) & 0x7FFF; } void init_dmat(double *a, unsigned int n, unsigned int K, double offset) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int k = 0; k < K; ++k) { a[i * K + k] = i * K + k + offset; //a[i * K + j] = fastrand() + offset; } } } void print_dmat(double *a, unsigned int n, unsigned int K) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int j = 0; j < K; ++j) { std::cout << a[i * K + j] << ' '; } std::cout << '\n'; } } void print_CSR(CSR &mat) { for (unsigned int r = 0; r < mat.nrows; ++r) { unsigned int row_start = mat.row_indx[r]; unsigned int row_end = mat.row_indx[r + 1]; for (unsigned int j = row_start; j < row_end; ++j) { unsigned int col_id = mat.col_id[j]; double val = mat.values[j]; std::cout << r << ' ' << col_id << ' ' << val << '\n'; } } } void host_csr_spmm(CSR &mat, double * dmat_in, double * dmat_out, unsigned int K) { for (unsigned int r = 0; r < mat.nrows; ++r) { unsigned int row_start = mat.row_indx[r]; unsigned int row_end = mat.row_indx[r + 1]; for (unsigned int k = 0; k < K; ++k) { dmat_out[r * K + k] = 0; } for (unsigned int j = row_start; j < row_end; ++j) { unsigned int col_id = mat.col_id[j]; double val = mat.values[j]; for (unsigned int k = 0; k < K; ++k) { dmat_out[r * K + k] += val * dmat_in[col_id * K + k]; } } } } __global__ void dev_csr_spmm(CSR mat, double* mat_in, double* mat_out, unsigned int K) { int row = blockIdx.x * blockDim.x + threadIdx.x; if(row < mat.nrows) { unsigned int row_start = mat.row_indx[row]; unsigned int row_end = mat.row_indx[row + 1]; //printf("%d\n", row_end - row_start); for(unsigned int k = 0; k < K; k++) { mat_out[row * K + k] = 0; } // printf("%d %d %d\n", row, row_start, row_end); for(unsigned int j = row_start; j < row_end; j++) { //printf("are we here! %d\n", j); unsigned int col_id = mat.col_id[j]; double value = mat.values[j]; for (unsigned int k = 0; k < K; ++k) { mat_out[row * K + k] += value * mat_in[col_id * K + k]; } } } } int main(int argc, char *argv[]) { if(argc < 4) { std::cerr << "usage ./exec M N S" << std::endl; exit(-1); } unsigned int S = std::atoi(argv[3]); unsigned int N = std::atoi(argv[2]); unsigned int M = std::atoi(argv[1]); std::string s; int nnz = 0; for(int i = 1; i <= M; i ++) { for(int j = 1; j <= N; j++) { if(fastrand() % S == 0) { nnz+=1; s += std::to_string(i) + " " + std::to_string(j) + " " + std::to_string(fastrand() % 3) + "\n"; } } } std::string header = "\%testfile\n" + std::to_string(M) + " " + std::to_string(N) + " " + std::to_string(nnz) + "\n"; s = header + s; std::cout << s; return 0; }
7bcaaa995391c7e096eedd3558239665a4253709.cu
/* * spmm_csr_driver.cu * Copyright (C) 2020 * Aravind SUKUMARAN RAJAM (asr) <[email protected]> * * Distributed under terms of the GNU LGPL3 license. */ #include "mm_helper.hpp" #include "sparse_representation.hpp" #include <iostream> void check_dmat(double* a, double *b, unsigned int n, unsigned int K, bool quit_on_err = true ) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int k = 0; k < K; ++k) { if(std::abs(a[i * K + k] - b[i * K + k]) > 1e-1) { std::cerr << "Possible error at " << i << std::endl; if(quit_on_err) { exit(-1); } } } } if(quit_on_err) std::cout << "Verification succeeded\n"; else std::cout << "Check error messages to see if verification succeeded. (No error msg == success)\n"; } static unsigned int g_seed = 0X4B1D; inline int fastrand() { g_seed = (214013 * g_seed + 2531011); return (g_seed >> 16) & 0x7FFF; } void init_dmat(double *a, unsigned int n, unsigned int K, double offset) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int k = 0; k < K; ++k) { a[i * K + k] = i * K + k + offset; //a[i * K + j] = fastrand() + offset; } } } void print_dmat(double *a, unsigned int n, unsigned int K) { for (unsigned int i = 0; i < n; ++i) { for (unsigned int j = 0; j < K; ++j) { std::cout << a[i * K + j] << ' '; } std::cout << '\n'; } } void print_CSR(CSR &mat) { for (unsigned int r = 0; r < mat.nrows; ++r) { unsigned int row_start = mat.row_indx[r]; unsigned int row_end = mat.row_indx[r + 1]; for (unsigned int j = row_start; j < row_end; ++j) { unsigned int col_id = mat.col_id[j]; double val = mat.values[j]; std::cout << r << ' ' << col_id << ' ' << val << '\n'; } } } void host_csr_spmm(CSR &mat, double * dmat_in, double * dmat_out, unsigned int K) { for (unsigned int r = 0; r < mat.nrows; ++r) { unsigned int row_start = mat.row_indx[r]; unsigned int row_end = mat.row_indx[r + 1]; for (unsigned int k = 0; k < K; ++k) { dmat_out[r * K + k] = 0; } for (unsigned int j = row_start; j < row_end; ++j) { unsigned int col_id = mat.col_id[j]; double val = mat.values[j]; for (unsigned int k = 0; k < K; ++k) { dmat_out[r * K + k] += val * dmat_in[col_id * K + k]; } } } } __global__ void dev_csr_spmm(CSR mat, double* mat_in, double* mat_out, unsigned int K) { int row = blockIdx.x * blockDim.x + threadIdx.x; if(row < mat.nrows) { unsigned int row_start = mat.row_indx[row]; unsigned int row_end = mat.row_indx[row + 1]; //printf("%d\n", row_end - row_start); for(unsigned int k = 0; k < K; k++) { mat_out[row * K + k] = 0; } // printf("%d %d %d\n", row, row_start, row_end); for(unsigned int j = row_start; j < row_end; j++) { //printf("are we here! %d\n", j); unsigned int col_id = mat.col_id[j]; double value = mat.values[j]; for (unsigned int k = 0; k < K; ++k) { mat_out[row * K + k] += value * mat_in[col_id * K + k]; } } } } int main(int argc, char *argv[]) { if(argc < 4) { std::cerr << "usage ./exec M N S" << std::endl; exit(-1); } unsigned int S = std::atoi(argv[3]); unsigned int N = std::atoi(argv[2]); unsigned int M = std::atoi(argv[1]); std::string s; int nnz = 0; for(int i = 1; i <= M; i ++) { for(int j = 1; j <= N; j++) { if(fastrand() % S == 0) { nnz+=1; s += std::to_string(i) + " " + std::to_string(j) + " " + std::to_string(fastrand() % 3) + "\n"; } } } std::string header = "\%testfile\n" + std::to_string(M) + " " + std::to_string(N) + " " + std::to_string(nnz) + "\n"; s = header + s; std::cout << s; return 0; }
44a1c73fb08e4b70981c627f8c8c361426a907d9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <set> #include <vector> #include <assert.h> #include <rocblas.h> #include <cutil_inline.h> #include <stdlib.h> #include <stdio.h> #include <fstream> #include <iostream> #include <algorithm> #include <typeinfo> #include <nvmatrix.cuh> #include <nvmatrix_operators.cuh> #include <map> using namespace std; /* * Device random number generator pointers. */ //map<int,hiprandGenerator_t> NVMatrix::rndGen; map<int,hiprandState_t*> NVMatrix::rndDevStates; pthread_mutex_t* NVMatrix::_rndMutex = makeMutex(); pthread_mutex_t* NVMatrix::makeMutex() { pthread_mutex_t* m = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(m, NULL); return m; } NVMatrix::NVMatrix(const CudaNdarray * view, int numRows, int numCols, const char * msg) { if (!CudaNdarray_is_c_contiguous(view)) { printf("Non contiguous input: %s\n", msg); printf("Dims: "); for (int i=0; i < view->nd; i++) printf("%d ",CudaNdarray_HOST_STRIDES(view)[i]); printf("\n"); assert(false); } //printf("%d %d\n",numRows, numCols); //Check that view actually contains numRows * numCols elements const int * dims = CudaNdarray_HOST_DIMS(view); int total = 1; for (int i = 0; i < view->nd; i++) { total *= dims[i]; } if (total != numRows * numCols) { fprintf(stderr, "NVMatrix asked to make a view of a CudaNdarray with %d elements",total); fprintf(stderr, " but told to arrange these in a %d x %d rectangle (of total size %d).\n", numRows, numCols, numRows * numCols); fprintf(stderr, "CudaNdarray dims: "); for (int i = 0; i < view->nd; i++) fprintf(stderr, "%d ", dims[i]); fprintf(stderr, "\n"); assert(false); } //Make the view _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _ownsData = false; _isTrans = false; _devData = view->devdata; _stride = getLeadingDim(); } void NVMatrix::_init(int numRows, int numCols, int stride, bool isTrans) { _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _ownsData = true; _isTrans = isTrans; _devData = NULL; if (_numElements > 0) { hipblasAlloc(_numElements, sizeof(float), (void**) &_devData); checkCublasError("!!!! device memory allocation error\n"); } _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::NVMatrix() { _init(0, 0, -1, false); } NVMatrix::NVMatrix(bool isTrans) { _init(0, 0, -1, isTrans); } NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) { _init(numRows, numCols, -1, isTrans); } /* NVMatrix::NVMatrix(const Matrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { copyFromHost(like); } } */ NVMatrix::NVMatrix(const NVMatrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { like.copy(*this); } } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. */ NVMatrix::NVMatrix(const NVMatrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. NVMatrix::NVMatrix(const Matrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, false); } */ NVMatrix::NVMatrix(float* devData, int numRows, int numCols, int stride, bool isTrans) : _numRows(numRows), _numCols(numCols), _numElements(numRows*numCols), _ownsData(false), _devData(devData), _isTrans(isTrans) { _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::~NVMatrix() { if(_ownsData && _numElements > 0) { // This line was modified by Ian Goodfellow to use device_free // so that theano may keep track of device memory usage int status = device_free(_devData); if (status != 0) { fprintf(stderr, "!!!! memory free error\n"); exit(EXIT_FAILURE); } } } /* void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeDeviceMatrix) { if (resizeDeviceMatrix) { resize(hostMatrix); } copyFromHost(hostMatrix); } void NVMatrix::copyFromHost(const Matrix& hostMatrix) { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); setTrans(hostMatrix.isTrans()); if (getNumElements() > 0) { cublasStatus status = hipblasSetMatrix(hostMatrix.getLeadingDim(), hostMatrix.getFollowingDim(), sizeof(float), hostMatrix.getData(), hostMatrix.getLeadingDim(), _devData, _stride); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (write)\n"); exit( EXIT_FAILURE); } } } void NVMatrix::copyToHost(Matrix& hostMatrix) const { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); hostMatrix.setTrans(_isTrans); if (getNumElements() > 0) { // printf("rows: %d, cols: %d, stride: %d\n", getNumRows(), getNumCols(), getStride()); cublasStatus status = hipblasGetMatrix(getLeadingDim(),getFollowingDim(), sizeof(float), _devData, getStride(), hostMatrix.getData(), hostMatrix.getLeadingDim()); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (read)\n"); exit( EXIT_FAILURE); } } } void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const { if (resizeTarget) { hostMatrix.resize(_numRows, _numCols); } copyToHost(hostMatrix); } */ void NVMatrix::copy(NVMatrix& dest) const { dest.resize(*this); copy(dest, 0, -1, 0, -1, 0, 0); } NVMatrix& NVMatrix::copy() const { NVMatrix* c = new NVMatrix(); copy(*c); return *c; } void NVMatrix::rightMult(const NVMatrix &b, float scaleAB, NVMatrix &target) const { assert(isContiguous() && b.isContiguous() && target.isContiguous()); // assert(&target != &b); assert(_numCols == b.getNumRows()); if(&target != this) { target.resize(_numRows, b.getNumCols()); target.setTrans(true); } assert(target.getNumRows() == _numRows); assert(target.getNumCols() == b.getNumCols()); if(_numRows % 64 != 0 || _numCols % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- hipblasSgemm performance may suffer."); } hipblasSgemm(getTransChar(), b.getTransChar(), _numRows, b.getNumCols(), _numCols, scaleAB, _devData, getLeadingDim(), b.getDevData(), b.getLeadingDim(), 0, target.getDevData(), getNumRows()); checkCublasError("hipblasSgemm failed"); // hipDeviceSynchronize(); } void NVMatrix::rightMult(const NVMatrix &b, float scaleAB) { rightMult(b, scaleAB, *this); } void NVMatrix::rightMult(const NVMatrix &b, NVMatrix& target) const { rightMult(b, 1, target); } /* * This will only work if this matrix is in column-major order! In other words, * if isTrans() returns true. */ void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b, float scaleThis, float scaleAB) { if (scaleThis == 0) { a.rightMult(b, scaleAB, *this); return; } assert(isContiguous()); assert(a.getNumCols() == b.getNumRows()); assert(this->getNumRows() == a.getNumRows()); assert(this->getNumCols() == b.getNumCols()); assert(_isTrans); if(a.getNumRows() % 64 != 0 || a.getNumCols() % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- hipblasSgemm performance may suffer."); } hipblasSgemm(a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(), scaleAB, a.getDevData(), a.getLeadingDim(), b.getDevData(), b.getLeadingDim(), scaleThis, _devData, getLeadingDim()); checkCublasError("hipblasSgemm failed"); // hipDeviceSynchronize(); } void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b) { addProduct(a, b, 1, 1); } template <class Randomizer> void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && target.isContiguous()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); hipLaunchKernelGGL(( kUnaryRandomize), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kUnaryRandomize: Kernel execution failed"); } template <class Randomizer> void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && data2.isContiguous() && target.isContiguous()); assert(isSameDims(data2)); assert(isTrans() == data2.isTrans()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); hipLaunchKernelGGL(( kBinaryRandomize), dim3(NUM_RND_BLOCKS),dim3(NUM_RND_THREADS_PER_BLOCK), 0, 0, getDevData(), data2.getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kBinaryRandomize: Kernel execution failed"); } /* Function removed by Ian Goodfellow. We do not need this function in theano and it uses hipMalloc directly. If you need to enable it, modify it to use device_malloc instead. Otherwise, theano will not be able to keep track of how much memory is used on the device. void NVMatrix::initRandom(unsigned long long seed) { assert(!isRndInitialized()); pthread_mutex_lock(_rndMutex); int d = getDeviceID(); rndDevStates[d] = NULL; CUDA_CALL(hipMalloc((void **)&rndDevStates[d], NUM_RND_STREAMS * sizeof(hiprandState_t))); pthread_mutex_unlock(_rndMutex); kSetupCurand<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(getCurandState(), 1 + seed*2); // so there's no chance it'll be correlated with the other one cutilCheckMsg("initRandom: Kernel execution failed"); } void NVMatrix::initRandom() { NVMatrix::initRandom(time(0)); } */ hiprandState_t* NVMatrix::getCurandState() { pthread_mutex_lock(_rndMutex); int d = getDeviceID(); assert(rndDevStates.count(d) != 0); hiprandState_t* r = rndDevStates[d]; pthread_mutex_unlock(_rndMutex); return r; } int NVMatrix::getDeviceID() { int d; hipGetDevice(&d); return d; } bool NVMatrix::isRndInitialized() { pthread_mutex_lock(_rndMutex); bool b = rndDevStates.count(getDeviceID()) != 0; pthread_mutex_unlock(_rndMutex); return b; } /* Function removed by Ian Goodfellow due to not needing it and it using hipFree instead of device_free void NVMatrix::destroyRandom() { assert(isRndInitialized()); int d = getDeviceID(); pthread_mutex_lock(_rndMutex); CUDA_CALL(hipFree(rndDevStates[d])); rndDevStates.erase(d); pthread_mutex_unlock(_rndMutex); } */ void NVMatrix::binarizeProbs() { binarizeProbs(*this); } void NVMatrix::binarizeProbs(NVMatrix& target) { _unaryRandomize(target, BinarizeUnaryRandomizer()); } void NVMatrix::randomizeUniform() { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(hiprandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, UniformUnaryRandomizer()); } void NVMatrix::randomizeGaussian() { randomizeGaussian(1); } void NVMatrix::randomizeGaussian(float stdev) { randomizeGaussian(0, stdev); } void NVMatrix::randomizeGaussian(float mean, float stdev) { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(hiprandGenerateNormal(rndGen, _devData, getNumElements(), mean, stdev)); _unaryRandomize(*this, GaussianUnaryRandomizer(mean, stdev)); } /* * Kind of a hack since we don't actually need the contents of this matrix for it, * so we don't really need a binary randomizer. */ void NVMatrix::randomizeGaussian(NVMatrix& stdevs) { _binaryRandomize(stdevs, *this, GaussianBinaryRandomizer()); } void NVMatrix::addGaussianNoise() { addGaussianNoise(1); } void NVMatrix::addGaussianNoise(float stdev) { addGaussianNoise(stdev, *this); } void NVMatrix::addGaussianNoise(float stdev, NVMatrix& target) { _unaryRandomize(target, AddGaussianUnaryRandomizer(stdev)); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var) { addGaussianNoise(stdevs, var, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs) { addGaussianNoise(stdevs, false, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target) { if (var) { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<true>()); } else { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<false>()); } } void NVMatrix::biggerThan(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::BiggerThan(), b, target); } void NVMatrix::biggerThan(NVMatrix& b) { biggerThan(b, *this); } void NVMatrix::equals(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Equals(), b, target); } void NVMatrix::equals(NVMatrix& m) { equals(m, *this); } void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::BiggerThan(), vec, target); } void NVMatrix::biggerThanVector(NVMatrix& vec) { biggerThanVector(vec, *this); } void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const { assert(startRow >= 0 && startRow < _numRows); assert(endRow > startRow && endRow <= _numRows); assert(startCol >= 0 && startCol < _numCols); assert(endCol > startCol && endCol <= _numCols); } /* * The only place where stride is supported for now! * Will ALWAYS return a view of the original data, sometimes non-contiguous. */ NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); if (!isTrans()) { return *new NVMatrix(this->_devData + startRow * _stride + startCol, endRow - startRow, endCol - startCol, _stride, false); } return *new NVMatrix(this->_devData + startCol * _stride + startRow, endRow - startRow, endCol - startCol, _stride, true); } /* this will NEVER return a view */ void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); int sliceRows = endRow - startRow, sliceCols = endCol - startCol; if (target.getNumRows() != sliceRows || target.getNumCols() != sliceCols) { target.resize(sliceRows, sliceCols); } this->copy(target, startRow, endRow, startCol, endCol, 0, 0); } NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const { return slice(startRow, endRow, 0, -1); } void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const { slice(startRow, endRow, 0, -1, target); } NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const { return slice(0, -1, startCol, endCol); } void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const { slice(0, -1, startCol, endCol, target); } /* * Guaranteed to not change the data if the number of elements doesn't change. * So you can use this to "reshape" a matrix. */ bool NVMatrix::resize(int numRows, int numCols) { bool reallocated = false; if (numRows != _numRows || numCols != _numCols) { // this assertion was removed by Ian Goodfellow because it seems to come too early // assert(_ownsData); if (_numElements != numRows * numCols) { assert(_ownsData); // assert moved here by Ian Goodfellow if (_numElements > 0) { // free old memory // This line was modified by Ian Goodfellow to use device_free so theano may track device memory usage accurately int status = device_free(_devData); if (status != 0) { fprintf(stderr, "!!!! memory free error: %X\n", status); exit(EXIT_FAILURE); } } if (numRows * numCols > 0) { // allocate new memory cublasStatus status = hipblasAlloc(numCols * numRows, sizeof(float), (void**) &_devData); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device memory allocation error\n"); exit(EXIT_FAILURE); } } else { _devData = NULL; } reallocated = true; } _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _stride = getLeadingDim(); } return reallocated; } bool NVMatrix::resize(const NVMatrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } /* bool NVMatrix::resize(const Matrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } */ void NVMatrix::reshape(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); _numRows = numRows; _numCols = numCols; _stride = getLeadingDim(); } NVMatrix& NVMatrix::reshaped(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); return *new NVMatrix(_devData, numRows, numCols, -1, _isTrans); } void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow, int srcStartCol, int srcEndCol, int destStartRow, int destStartCol) const { srcEndRow = srcEndRow < 0 ? _numRows : srcEndRow; srcEndCol = srcEndCol < 0 ? _numCols : srcEndCol; NVMatrix* srcSlice = &slice(srcStartRow, srcEndRow, srcStartCol, srcEndCol); NVMatrix* destSlice = &dest.slice(destStartRow, destStartRow + srcEndRow - srcStartRow, destStartCol, destStartCol + srcEndCol - srcStartCol); srcSlice->apply(NVMatrixOps::Identity(), *destSlice); delete srcSlice; delete destSlice; } NVMatrix& NVMatrix::getTranspose() { return *new NVMatrix(_devData, _numCols, _numRows, _stride, !_isTrans);; } void NVMatrix::transpose(NVMatrix& target) { flipTrans(target); target.setTrans(!target.isTrans()); target.reshape(target.getNumCols(), target.getNumRows()); } void NVMatrix::transpose() { int tmp = _numCols; _numCols = _numRows; _numRows = tmp; _isTrans = !_isTrans; } bool NVMatrix::transpose(bool trans) { bool oldTrans = _isTrans; if (oldTrans != trans) { transpose(); } return oldTrans; } /* * Flips the ordering of the matrix from row-major to column-major and vice versa. * This creates temporary storage -- not a cheap operation. * * This is not equivalent to a "hard transpose". The resultant matrix still has * the same dimensions, its layout in memory just changes. */ NVMatrix& NVMatrix::flipTrans() { NVMatrix* meTrans = new NVMatrix(*this); flipTrans(*meTrans); return *meTrans; } void NVMatrix::flipTrans(NVMatrix& target) { assert(&target != this); target.resize(_numRows, _numCols); target.setTrans(!isTrans()); apply(NVMatrixOps::Identity(), target); } void NVMatrix::squaredDiff(NVMatrix& b) { squaredDiff(b, *this); } void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::SquaredDiff(), b, target); } void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) { if (scaleA == 0) { b.scale(scaleB, target); return; } if (scaleA == 1 && scaleB == 1) { // slight optimization applyBinary(NVMatrixBinaryOps::Add(), b, target); } else { applyBinary(NVMatrixBinaryOps::WeightedAdd(scaleA, scaleB), b, target); } } void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) { add(b, 1, scaleB, target); } void NVMatrix::add(NVMatrix& b, NVMatrix& target) { add(b, 1, target); } void NVMatrix::add(NVMatrix& b, float scaleB) { add(b, scaleB, *this); } void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) { add(b, scaleA, scaleB, *this); } void NVMatrix::add(NVMatrix& b) { add(b, 1, *this); } void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) { add(b, -1, target); } void NVMatrix::subtract(NVMatrix& b) { add(b, -1); } void NVMatrix::eltwiseMult(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Multiply(), b, target); } void NVMatrix::eltwiseMult(NVMatrix& b) { eltwiseMult(b, *this); } void NVMatrix::eltwiseDivide(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Divide(), b, target); } void NVMatrix::eltwiseDivide(NVMatrix& b) { eltwiseDivide(b, *this); } void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) { assert(isContiguous() && target.isContiguous()); assert(timesX > 0 && timesY > 0); target.resize(_numRows*timesY, _numCols*timesX); target.setTrans(_isTrans); if(!isTrans()) { hipLaunchKernelGGL(( kTile), dim3(NUM_TILE_BLOCKS),dim3(NUM_TILE_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, _numCols, _numRows, target._numCols, target._numRows); } else { hipLaunchKernelGGL(( kTile), dim3(NUM_TILE_BLOCKS),dim3(NUM_TILE_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, _numRows, _numCols, target._numRows, target._numCols); } cutilCheckMsg("Kernel execution failed"); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::WeightedAdd(1, scaleVec), vec, target); } void NVMatrix::addVector(NVMatrix& vec) { addVector(vec, 1, *this); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec) { addVector(vec, scaleVec, *this); } void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) { addVector(vec, 1, target); } void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Equals(), vec, target); } void NVMatrix::equalsVector(NVMatrix& vec) { equalsVector(vec, *this); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Multiply(), vec, target); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec) { eltwiseMultByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec) { eltwiseDivideByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Divide(), vec, target); } /* * num threads per block is ignored when summing rows (axis=1) because * it has to be a power of 2. * * TODO: this is a mess, fix it. it works pretty fast but it's too ugly. * TODO: this function is _really_ bad for very long aggregations of few columns. */ template<class Agg, class BinaryOp> void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp op) { assert(axis == 0 || axis == 1); assert(isContiguous() && target.isContiguous()); assert(&target != this); int width = _isTrans ? _numRows : _numCols; int height = _isTrans ? _numCols : _numRows; target.setTrans(_isTrans); assert(width > 0); assert(height > 0); if(axis == 0 && !_isTrans || axis == 1 && _isTrans) { //col sum target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1); int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK); assert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width); assert(numBlocks < NUM_BLOCKS_MAX); hipLaunchKernelGGL(( kDumbAggCols<Agg, BinaryOp>), dim3(numBlocks),dim3(NUM_SUM_COLS_THREADS_PER_BLOCK), 0, 0, _devData, target._devData, width, height, agg, op); cutilCheckMsg("kDumbAggCols: Kernel execution failed"); } else { // row sum target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1); if (width > 1) { if (height >= 16384) { // linear aggregation int numBlocksX = 1; int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y); int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X; int numThreadsY = AGG_SHORT_ROWS_THREADS_Y; while (numBlocksY > NUM_BLOCKS_MAX) { numBlocksY = DIVUP(numBlocksY,2); numBlocksX *= 2; } dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); if(width <= 16) { if(width <= 4) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 4>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 8) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 8>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 12) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 12>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 1, 16>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } } else if(width <= 32) { hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 2, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 48){ hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 3, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else if(width <= 64){ hipLaunchKernelGGL(( kAggShortRows<Agg, BinaryOp, 4, AGG_SHORT_ROWS_THREADS_X>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } else { hipLaunchKernelGGL(( kAggShortRows2<Agg, BinaryOp>), dim3(grid), dim3(threads), 0, 0, _devData, target._devData,width, height, agg, op); } } else { if (width >= 512) { dim3 threads(AWR_NUM_THREADS); dim3 blocks(1, ::min(1024, height)); hipLaunchKernelGGL(( kAggRows_wholerow_nosync), dim3(blocks), dim3(threads), 0, 0, _devData, target._devData, width, height, agg, op); // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, ::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); } else { // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, ::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); NVMatrix *prevSum = this; while (prevSum->getLeadingDim() > 1) { int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512))); int numThreadsY = 1; int numBlocksX = DIVUP(width, 2*numThreadsX); int numBlocksY = ::min(height, NUM_BLOCKS_MAX); NVMatrix *nvSumAccum = target.getFollowingDim() == height && target.getLeadingDim() == numBlocksX ? &target : new NVMatrix(height, numBlocksX, false); dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); assert(numBlocksX <= NUM_BLOCKS_MAX); assert(numBlocksY <= NUM_BLOCKS_MAX); if(width <= 64) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 32>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 128) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 64>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 256) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 128>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 512) { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 256>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else { hipLaunchKernelGGL(( kAggRows<Agg, BinaryOp, 512>), dim3(grid), dim3(threads), 0, 0, prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } cutilCheckMsg("agg rows: Kernel execution failed"); hipDeviceSynchronize(); width = numBlocksX; // only true in reduction agg, but for linear agg this doesn't matter anyway if (prevSum != this) { delete prevSum; } prevSum = nvSumAccum; } } } } else { copy(target); } } } void NVMatrix::inRangeInc(float lower, float upper) { inRangeInc(lower, upper, *this); } void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<false>(lower, upper), target); } void NVMatrix::inRangeExc(float lower, float upper) { inRangeExc(lower, upper, *this); } void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<true>(lower, upper), target); } void NVMatrix::biggerThanScalar(float scalar) { biggerThanScalar(scalar, *this); } void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::BiggerThanScalar(scalar), target); } void NVMatrix::smallerThanScalar(float scalar) { smallerThanScalar(scalar, *this); } void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::SmallerThanScalar(scalar), target); } void NVMatrix::addScalar(float scaleThis, float scalar, NVMatrix& target) { apply(NVMatrixOps::WeightedAddScalar(scaleThis, scalar), target); } void NVMatrix::addScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::AddScalar(scalar), target); } void NVMatrix::addScalar(float scalar) { addScalar(scalar, *this); } void NVMatrix::minWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MinWithScalar(scalar), target); } void NVMatrix::minWithScalar(float scalar) { minWithScalar(scalar, *this); } void NVMatrix::maxWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MaxWithScalar(scalar), target); } void NVMatrix::maxWithScalar(float scalar) { maxWithScalar(scalar, *this); } void NVMatrix::pow(float p, NVMatrix& target) { apply(NVMatrixOps::Pow(p), target); } void NVMatrix::pow(float p) { pow(p, *this); } void NVMatrix::scale(float _scale) { scale(_scale, *this); } void NVMatrix::scale(float _scale, NVMatrix& target) { if (_scale != 1 || &target != this) { // optimize away scale by 1 apply(NVMatrixOps::MultByScalar(_scale), target); } } template<class Agg, class BinaryOp> NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp op) { NVMatrix *sumVec = new NVMatrix(); _aggregate<Agg, BinaryOp>(axis, *sumVec, agg, op); return *sumVec; } void NVMatrix::max(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum) { if (scaleThis != 0) { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::WeightedAdd(scaleThis, scaleSum)); } else { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::SecondScaled(scaleSum)); } } void NVMatrix::sum(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } /* void NVMatrix::min(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::max(int axis) { return _aggregate(axis, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::sum(int axis) { return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::min(int axis) { return _aggregate(axis, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } */ void NVMatrix::_sum_setParams(int n, dim3* blocks, dim3* threads, int* numCols) { int logn = int(ceil(log(double(n)) / log(2))); *numCols = DIVUP(n, logn); int numThreads = *numCols; *blocks = dim3(DIVUP(numThreads, DP_BLOCKSIZE)); *threads = dim3(DP_BLOCKSIZE); } /* float NVMatrix::mean() { return sum() / getNumElements(); } float NVMatrix::sum() { return _totalAgg(NVMatrixAggs::Sum()); } float NVMatrix::max() { return _totalAgg(NVMatrixAggs::Max()); } float NVMatrix::min() { return _totalAgg(NVMatrixAggs::Min()); } template<class Agg> float NVMatrix::_totalAgg(Agg agg) { assert(isContiguous()); dim3 blocks, threads; int numCols; // Sum most of it on GPU NVMatrix* src = this; for (NVMatrix* target = NULL; src->getNumElements() > CPUSUM_MAX; src = target) { _sum_setParams(src->getNumElements(), &blocks, &threads, &numCols); target = new NVMatrix(1, blocks.x); kTotalAgg<<<blocks, threads>>>(src->getDevData(), target->getDevData(), numCols, src->getNumElements(), agg); cutilCheckMsg("kTotalAgg: Kernel execution failed"); hipDeviceSynchronize(); // not really necessary? delete (src == this ? NULL : src); } Matrix srcCPU(src->getNumRows(), src->getNumCols()); src->copyToHost(srcCPU); if (src->getNumElements() > 1) { // Sum remainder on CPU delete (src == this ? NULL : src); if (typeid(Agg) == typeid(NVMatrixAggs::Sum)) { return srcCPU.sum(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Max)) { return srcCPU.max(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Min)) { return srcCPU.min(); } else { assert(false); } } return srcCPU(0,0); } */ /* * Fast dot product only for matrices with same transposedness. float NVMatrix::dotProduct(NVMatrix& b) { assert(isContiguous() && b.isContiguous()); assert(isSameDims(b)); assert(isTrans() == b.isTrans()); // see? dim3 blocks, threads; int numCols; _sum_setParams(getNumElements(), &blocks, &threads, &numCols); NVMatrix target(1, blocks.x); kDotProduct_r<<<blocks, threads>>>(getDevData(), b.getDevData(), target.getDevData(), numCols, getNumElements()); cutilCheckMsg("kDotProduct: Kernel execution failed"); hipDeviceSynchronize(); return target.sum(); } float NVMatrix::norm2() { return dotProduct(*this); } float NVMatrix::norm() { return sqrt(norm2()); } */ /* void NVMatrix::print(int startRow, int rows, int startCol, int cols) const { hipDeviceSynchronize(); Matrix hm = Matrix(_numRows, _numCols); copyToHost(hm); hm.print(startRow, rows, startCol, cols); } void NVMatrix::print(int rows, int cols) const { print(0, rows, 0, cols); } */ void NVMatrix::printShape(const char* name) const { printf("%s: %dx%d\n", name, _numRows, _numCols); }
44a1c73fb08e4b70981c627f8c8c361426a907d9.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <set> #include <vector> #include <assert.h> #include <cublas.h> #include <cutil_inline.h> #include <stdlib.h> #include <stdio.h> #include <fstream> #include <iostream> #include <algorithm> #include <typeinfo> #include <nvmatrix.cuh> #include <nvmatrix_operators.cuh> #include <map> using namespace std; /* * Device random number generator pointers. */ //map<int,curandGenerator_t> NVMatrix::rndGen; map<int,curandState*> NVMatrix::rndDevStates; pthread_mutex_t* NVMatrix::_rndMutex = makeMutex(); pthread_mutex_t* NVMatrix::makeMutex() { pthread_mutex_t* m = (pthread_mutex_t*) malloc(sizeof(pthread_mutex_t)); pthread_mutex_init(m, NULL); return m; } NVMatrix::NVMatrix(const CudaNdarray * view, int numRows, int numCols, const char * msg) { if (!CudaNdarray_is_c_contiguous(view)) { printf("Non contiguous input: %s\n", msg); printf("Dims: "); for (int i=0; i < view->nd; i++) printf("%d ",CudaNdarray_HOST_STRIDES(view)[i]); printf("\n"); assert(false); } //printf("%d %d\n",numRows, numCols); //Check that view actually contains numRows * numCols elements const int * dims = CudaNdarray_HOST_DIMS(view); int total = 1; for (int i = 0; i < view->nd; i++) { total *= dims[i]; } if (total != numRows * numCols) { fprintf(stderr, "NVMatrix asked to make a view of a CudaNdarray with %d elements",total); fprintf(stderr, " but told to arrange these in a %d x %d rectangle (of total size %d).\n", numRows, numCols, numRows * numCols); fprintf(stderr, "CudaNdarray dims: "); for (int i = 0; i < view->nd; i++) fprintf(stderr, "%d ", dims[i]); fprintf(stderr, "\n"); assert(false); } //Make the view _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _ownsData = false; _isTrans = false; _devData = view->devdata; _stride = getLeadingDim(); } void NVMatrix::_init(int numRows, int numCols, int stride, bool isTrans) { _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _ownsData = true; _isTrans = isTrans; _devData = NULL; if (_numElements > 0) { cublasAlloc(_numElements, sizeof(float), (void**) &_devData); checkCublasError("!!!! device memory allocation error\n"); } _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::NVMatrix() { _init(0, 0, -1, false); } NVMatrix::NVMatrix(bool isTrans) { _init(0, 0, -1, isTrans); } NVMatrix::NVMatrix(int numRows, int numCols, bool isTrans) { _init(numRows, numCols, -1, isTrans); } /* NVMatrix::NVMatrix(const Matrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { copyFromHost(like); } } */ NVMatrix::NVMatrix(const NVMatrix& like, bool copy) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); if (copy) { like.copy(*this); } } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. */ NVMatrix::NVMatrix(const NVMatrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, like.isTrans()); } /* * Initializes NVMatrix with same dimensions as given matrix but * does not copy any data. NVMatrix::NVMatrix(const Matrix& like) { _init(like.getNumRows(), like.getNumCols(), -1, false); } */ NVMatrix::NVMatrix(float* devData, int numRows, int numCols, int stride, bool isTrans) : _numRows(numRows), _numCols(numCols), _numElements(numRows*numCols), _ownsData(false), _devData(devData), _isTrans(isTrans) { _stride = stride < 0 ? getLeadingDim() : stride; } NVMatrix::~NVMatrix() { if(_ownsData && _numElements > 0) { // This line was modified by Ian Goodfellow to use device_free // so that theano may keep track of device memory usage int status = device_free(_devData); if (status != 0) { fprintf(stderr, "!!!! memory free error\n"); exit(EXIT_FAILURE); } } } /* void NVMatrix::copyFromHost(const Matrix& hostMatrix, bool resizeDeviceMatrix) { if (resizeDeviceMatrix) { resize(hostMatrix); } copyFromHost(hostMatrix); } void NVMatrix::copyFromHost(const Matrix& hostMatrix) { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); setTrans(hostMatrix.isTrans()); if (getNumElements() > 0) { cublasStatus status = cublasSetMatrix(hostMatrix.getLeadingDim(), hostMatrix.getFollowingDim(), sizeof(float), hostMatrix.getData(), hostMatrix.getLeadingDim(), _devData, _stride); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (write)\n"); exit( EXIT_FAILURE); } } } void NVMatrix::copyToHost(Matrix& hostMatrix) const { // assert(getStride() == getLeadingDim()); assert(isSameDims(hostMatrix)); hostMatrix.setTrans(_isTrans); if (getNumElements() > 0) { // printf("rows: %d, cols: %d, stride: %d\n", getNumRows(), getNumCols(), getStride()); cublasStatus status = cublasGetMatrix(getLeadingDim(),getFollowingDim(), sizeof(float), _devData, getStride(), hostMatrix.getData(), hostMatrix.getLeadingDim()); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device access error (read)\n"); exit( EXIT_FAILURE); } } } void NVMatrix::copyToHost(Matrix& hostMatrix, bool resizeTarget) const { if (resizeTarget) { hostMatrix.resize(_numRows, _numCols); } copyToHost(hostMatrix); } */ void NVMatrix::copy(NVMatrix& dest) const { dest.resize(*this); copy(dest, 0, -1, 0, -1, 0, 0); } NVMatrix& NVMatrix::copy() const { NVMatrix* c = new NVMatrix(); copy(*c); return *c; } void NVMatrix::rightMult(const NVMatrix &b, float scaleAB, NVMatrix &target) const { assert(isContiguous() && b.isContiguous() && target.isContiguous()); // assert(&target != &b); assert(_numCols == b.getNumRows()); if(&target != this) { target.resize(_numRows, b.getNumCols()); target.setTrans(true); } assert(target.getNumRows() == _numRows); assert(target.getNumCols() == b.getNumCols()); if(_numRows % 64 != 0 || _numCols % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- cublasSgemm performance may suffer."); } cublasSgemm(getTransChar(), b.getTransChar(), _numRows, b.getNumCols(), _numCols, scaleAB, _devData, getLeadingDim(), b.getDevData(), b.getLeadingDim(), 0, target.getDevData(), getNumRows()); checkCublasError("cublasSgemm failed"); // cudaThreadSynchronize(); } void NVMatrix::rightMult(const NVMatrix &b, float scaleAB) { rightMult(b, scaleAB, *this); } void NVMatrix::rightMult(const NVMatrix &b, NVMatrix& target) const { rightMult(b, 1, target); } /* * This will only work if this matrix is in column-major order! In other words, * if isTrans() returns true. */ void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b, float scaleThis, float scaleAB) { if (scaleThis == 0) { a.rightMult(b, scaleAB, *this); return; } assert(isContiguous()); assert(a.getNumCols() == b.getNumRows()); assert(this->getNumRows() == a.getNumRows()); assert(this->getNumCols() == b.getNumCols()); assert(_isTrans); if(a.getNumRows() % 64 != 0 || a.getNumCols() % 64 != 0 || b.getNumCols() % 64 != 0) { WARN("Matrix dimensions not divisible by 64 -- cublasSgemm performance may suffer."); } cublasSgemm(a.getTransChar(), b.getTransChar(), a.getNumRows(), b.getNumCols(), a.getNumCols(), scaleAB, a.getDevData(), a.getLeadingDim(), b.getDevData(), b.getLeadingDim(), scaleThis, _devData, getLeadingDim()); checkCublasError("cublasSgemm failed"); // cudaThreadSynchronize(); } void NVMatrix::addProduct(const NVMatrix& a, const NVMatrix &b) { addProduct(a, b, 1, 1); } template <class Randomizer> void NVMatrix::_unaryRandomize(NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && target.isContiguous()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); kUnaryRandomize<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kUnaryRandomize: Kernel execution failed"); } template <class Randomizer> void NVMatrix::_binaryRandomize(NVMatrix& data2, NVMatrix& target, Randomizer rnd) { assert(isRndInitialized()); assert(isContiguous() && data2.isContiguous() && target.isContiguous()); assert(isSameDims(data2)); assert(isTrans() == data2.isTrans()); if (!isSameDims(target)) { target.resize(*this); } assert(isTrans() == target.isTrans()); kBinaryRandomize<<<NUM_RND_BLOCKS,NUM_RND_THREADS_PER_BLOCK>>>(getDevData(), data2.getDevData(), target.getDevData(), getCurandState(), getNumElements(), rnd); cutilCheckMsg("kBinaryRandomize: Kernel execution failed"); } /* Function removed by Ian Goodfellow. We do not need this function in theano and it uses cudaMalloc directly. If you need to enable it, modify it to use device_malloc instead. Otherwise, theano will not be able to keep track of how much memory is used on the device. void NVMatrix::initRandom(unsigned long long seed) { assert(!isRndInitialized()); pthread_mutex_lock(_rndMutex); int d = getDeviceID(); rndDevStates[d] = NULL; CUDA_CALL(cudaMalloc((void **)&rndDevStates[d], NUM_RND_STREAMS * sizeof(curandState))); pthread_mutex_unlock(_rndMutex); kSetupCurand<<<NUM_RND_BLOCKS, NUM_RND_THREADS_PER_BLOCK>>>(getCurandState(), 1 + seed*2); // so there's no chance it'll be correlated with the other one cutilCheckMsg("initRandom: Kernel execution failed"); } void NVMatrix::initRandom() { NVMatrix::initRandom(time(0)); } */ curandState* NVMatrix::getCurandState() { pthread_mutex_lock(_rndMutex); int d = getDeviceID(); assert(rndDevStates.count(d) != 0); curandState* r = rndDevStates[d]; pthread_mutex_unlock(_rndMutex); return r; } int NVMatrix::getDeviceID() { int d; cudaGetDevice(&d); return d; } bool NVMatrix::isRndInitialized() { pthread_mutex_lock(_rndMutex); bool b = rndDevStates.count(getDeviceID()) != 0; pthread_mutex_unlock(_rndMutex); return b; } /* Function removed by Ian Goodfellow due to not needing it and it using cudaFree instead of device_free void NVMatrix::destroyRandom() { assert(isRndInitialized()); int d = getDeviceID(); pthread_mutex_lock(_rndMutex); CUDA_CALL(cudaFree(rndDevStates[d])); rndDevStates.erase(d); pthread_mutex_unlock(_rndMutex); } */ void NVMatrix::binarizeProbs() { binarizeProbs(*this); } void NVMatrix::binarizeProbs(NVMatrix& target) { _unaryRandomize(target, BinarizeUnaryRandomizer()); } void NVMatrix::randomizeUniform() { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(curandGenerateUniform(rndGen, _devData, getNumElements())); _unaryRandomize(*this, UniformUnaryRandomizer()); } void NVMatrix::randomizeGaussian() { randomizeGaussian(1); } void NVMatrix::randomizeGaussian(float stdev) { randomizeGaussian(0, stdev); } void NVMatrix::randomizeGaussian(float mean, float stdev) { assert(isContiguous()); assert(isRndInitialized()); // CURAND_CALL(curandGenerateNormal(rndGen, _devData, getNumElements(), mean, stdev)); _unaryRandomize(*this, GaussianUnaryRandomizer(mean, stdev)); } /* * Kind of a hack since we don't actually need the contents of this matrix for it, * so we don't really need a binary randomizer. */ void NVMatrix::randomizeGaussian(NVMatrix& stdevs) { _binaryRandomize(stdevs, *this, GaussianBinaryRandomizer()); } void NVMatrix::addGaussianNoise() { addGaussianNoise(1); } void NVMatrix::addGaussianNoise(float stdev) { addGaussianNoise(stdev, *this); } void NVMatrix::addGaussianNoise(float stdev, NVMatrix& target) { _unaryRandomize(target, AddGaussianUnaryRandomizer(stdev)); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var) { addGaussianNoise(stdevs, var, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs) { addGaussianNoise(stdevs, false, *this); } void NVMatrix::addGaussianNoise(NVMatrix& stdevs, bool var, NVMatrix& target) { if (var) { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<true>()); } else { _binaryRandomize(stdevs, target, AddGaussianBinaryRandomizer<false>()); } } void NVMatrix::biggerThan(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::BiggerThan(), b, target); } void NVMatrix::biggerThan(NVMatrix& b) { biggerThan(b, *this); } void NVMatrix::equals(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Equals(), b, target); } void NVMatrix::equals(NVMatrix& m) { equals(m, *this); } void NVMatrix::biggerThanVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::BiggerThan(), vec, target); } void NVMatrix::biggerThanVector(NVMatrix& vec) { biggerThanVector(vec, *this); } void NVMatrix::_checkBounds(int startRow, int endRow, int startCol, int endCol) const { assert(startRow >= 0 && startRow < _numRows); assert(endRow > startRow && endRow <= _numRows); assert(startCol >= 0 && startCol < _numCols); assert(endCol > startCol && endCol <= _numCols); } /* * The only place where stride is supported for now! * Will ALWAYS return a view of the original data, sometimes non-contiguous. */ NVMatrix& NVMatrix::slice(int startRow, int endRow, int startCol, int endCol) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); if (!isTrans()) { return *new NVMatrix(this->_devData + startRow * _stride + startCol, endRow - startRow, endCol - startCol, _stride, false); } return *new NVMatrix(this->_devData + startCol * _stride + startRow, endRow - startRow, endCol - startCol, _stride, true); } /* this will NEVER return a view */ void NVMatrix::slice(int startRow, int endRow, int startCol, int endCol, NVMatrix& target) const { endRow = endRow < 0 ? this->_numRows : endRow; endCol = endCol < 0 ? this->_numCols : endCol; _checkBounds(startRow, endRow, startCol, endCol); int sliceRows = endRow - startRow, sliceCols = endCol - startCol; if (target.getNumRows() != sliceRows || target.getNumCols() != sliceCols) { target.resize(sliceRows, sliceCols); } this->copy(target, startRow, endRow, startCol, endCol, 0, 0); } NVMatrix& NVMatrix::sliceRows(int startRow, int endRow) const { return slice(startRow, endRow, 0, -1); } void NVMatrix::sliceRows(int startRow, int endRow, NVMatrix& target) const { slice(startRow, endRow, 0, -1, target); } NVMatrix& NVMatrix::sliceCols(int startCol, int endCol) const { return slice(0, -1, startCol, endCol); } void NVMatrix::sliceCols(int startCol, int endCol, NVMatrix& target) const { slice(0, -1, startCol, endCol, target); } /* * Guaranteed to not change the data if the number of elements doesn't change. * So you can use this to "reshape" a matrix. */ bool NVMatrix::resize(int numRows, int numCols) { bool reallocated = false; if (numRows != _numRows || numCols != _numCols) { // this assertion was removed by Ian Goodfellow because it seems to come too early // assert(_ownsData); if (_numElements != numRows * numCols) { assert(_ownsData); // assert moved here by Ian Goodfellow if (_numElements > 0) { // free old memory // This line was modified by Ian Goodfellow to use device_free so theano may track device memory usage accurately int status = device_free(_devData); if (status != 0) { fprintf(stderr, "!!!! memory free error: %X\n", status); exit(EXIT_FAILURE); } } if (numRows * numCols > 0) { // allocate new memory cublasStatus status = cublasAlloc(numCols * numRows, sizeof(float), (void**) &_devData); if (status != CUBLAS_STATUS_SUCCESS) { fprintf(stderr, "!!!! device memory allocation error\n"); exit(EXIT_FAILURE); } } else { _devData = NULL; } reallocated = true; } _numRows = numRows; _numCols = numCols; _numElements = numRows * numCols; _stride = getLeadingDim(); } return reallocated; } bool NVMatrix::resize(const NVMatrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } /* bool NVMatrix::resize(const Matrix& like) { setTrans(like.isTrans()); return resize(like.getNumRows(), like.getNumCols()); } */ void NVMatrix::reshape(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); _numRows = numRows; _numCols = numCols; _stride = getLeadingDim(); } NVMatrix& NVMatrix::reshaped(int numRows, int numCols) { assert(isContiguous()); assert(_numElements == numRows*numCols); return *new NVMatrix(_devData, numRows, numCols, -1, _isTrans); } void NVMatrix::copy(NVMatrix &dest, int srcStartRow, int srcEndRow, int srcStartCol, int srcEndCol, int destStartRow, int destStartCol) const { srcEndRow = srcEndRow < 0 ? _numRows : srcEndRow; srcEndCol = srcEndCol < 0 ? _numCols : srcEndCol; NVMatrix* srcSlice = &slice(srcStartRow, srcEndRow, srcStartCol, srcEndCol); NVMatrix* destSlice = &dest.slice(destStartRow, destStartRow + srcEndRow - srcStartRow, destStartCol, destStartCol + srcEndCol - srcStartCol); srcSlice->apply(NVMatrixOps::Identity(), *destSlice); delete srcSlice; delete destSlice; } NVMatrix& NVMatrix::getTranspose() { return *new NVMatrix(_devData, _numCols, _numRows, _stride, !_isTrans);; } void NVMatrix::transpose(NVMatrix& target) { flipTrans(target); target.setTrans(!target.isTrans()); target.reshape(target.getNumCols(), target.getNumRows()); } void NVMatrix::transpose() { int tmp = _numCols; _numCols = _numRows; _numRows = tmp; _isTrans = !_isTrans; } bool NVMatrix::transpose(bool trans) { bool oldTrans = _isTrans; if (oldTrans != trans) { transpose(); } return oldTrans; } /* * Flips the ordering of the matrix from row-major to column-major and vice versa. * This creates temporary storage -- not a cheap operation. * * This is not equivalent to a "hard transpose". The resultant matrix still has * the same dimensions, its layout in memory just changes. */ NVMatrix& NVMatrix::flipTrans() { NVMatrix* meTrans = new NVMatrix(*this); flipTrans(*meTrans); return *meTrans; } void NVMatrix::flipTrans(NVMatrix& target) { assert(&target != this); target.resize(_numRows, _numCols); target.setTrans(!isTrans()); apply(NVMatrixOps::Identity(), target); } void NVMatrix::squaredDiff(NVMatrix& b) { squaredDiff(b, *this); } void NVMatrix::squaredDiff(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::SquaredDiff(), b, target); } void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB, NVMatrix& target) { if (scaleA == 0) { b.scale(scaleB, target); return; } if (scaleA == 1 && scaleB == 1) { // slight optimization applyBinary(NVMatrixBinaryOps::Add(), b, target); } else { applyBinary(NVMatrixBinaryOps::WeightedAdd(scaleA, scaleB), b, target); } } void NVMatrix::add(NVMatrix& b, float scaleB, NVMatrix& target) { add(b, 1, scaleB, target); } void NVMatrix::add(NVMatrix& b, NVMatrix& target) { add(b, 1, target); } void NVMatrix::add(NVMatrix& b, float scaleB) { add(b, scaleB, *this); } void NVMatrix::add(NVMatrix& b, float scaleA, float scaleB) { add(b, scaleA, scaleB, *this); } void NVMatrix::add(NVMatrix& b) { add(b, 1, *this); } void NVMatrix::subtract(NVMatrix& b, NVMatrix& target) { add(b, -1, target); } void NVMatrix::subtract(NVMatrix& b) { add(b, -1); } void NVMatrix::eltwiseMult(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Multiply(), b, target); } void NVMatrix::eltwiseMult(NVMatrix& b) { eltwiseMult(b, *this); } void NVMatrix::eltwiseDivide(NVMatrix& b, NVMatrix& target) { applyBinary(NVMatrixBinaryOps::Divide(), b, target); } void NVMatrix::eltwiseDivide(NVMatrix& b) { eltwiseDivide(b, *this); } void NVMatrix::tile(int timesY, int timesX, NVMatrix& target) { assert(isContiguous() && target.isContiguous()); assert(timesX > 0 && timesY > 0); target.resize(_numRows*timesY, _numCols*timesX); target.setTrans(_isTrans); if(!isTrans()) { kTile<<<NUM_TILE_BLOCKS,NUM_TILE_THREADS_PER_BLOCK>>>(_devData, target._devData, _numCols, _numRows, target._numCols, target._numRows); } else { kTile<<<NUM_TILE_BLOCKS,NUM_TILE_THREADS_PER_BLOCK>>>(_devData, target._devData, _numRows, _numCols, target._numRows, target._numCols); } cutilCheckMsg("Kernel execution failed"); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::WeightedAdd(1, scaleVec), vec, target); } void NVMatrix::addVector(NVMatrix& vec) { addVector(vec, 1, *this); } void NVMatrix::addVector(NVMatrix& vec, float scaleVec) { addVector(vec, scaleVec, *this); } void NVMatrix::addVector(NVMatrix& vec, NVMatrix& target) { addVector(vec, 1, target); } void NVMatrix::equalsVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Equals(), vec, target); } void NVMatrix::equalsVector(NVMatrix& vec) { equalsVector(vec, *this); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Multiply(), vec, target); } void NVMatrix::eltwiseMultByVector(NVMatrix& vec) { eltwiseMultByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec) { eltwiseDivideByVector(vec, *this); } void NVMatrix::eltwiseDivideByVector(NVMatrix& vec, NVMatrix& target) { applyBinaryV(NVMatrixBinaryOps::Divide(), vec, target); } /* * num threads per block is ignored when summing rows (axis=1) because * it has to be a power of 2. * * TODO: this is a mess, fix it. it works pretty fast but it's too ugly. * TODO: this function is _really_ bad for very long aggregations of few columns. */ template<class Agg, class BinaryOp> void NVMatrix::_aggregate(int axis, NVMatrix& target, Agg agg, BinaryOp op) { assert(axis == 0 || axis == 1); assert(isContiguous() && target.isContiguous()); assert(&target != this); int width = _isTrans ? _numRows : _numCols; int height = _isTrans ? _numCols : _numRows; target.setTrans(_isTrans); assert(width > 0); assert(height > 0); if(axis == 0 && !_isTrans || axis == 1 && _isTrans) { //col sum target.resize(!_isTrans ? 1 : _numRows, !_isTrans ? _numCols : 1); int numBlocks = DIVUP(width, NUM_SUM_COLS_THREADS_PER_BLOCK); assert(numBlocks * NUM_SUM_COLS_THREADS_PER_BLOCK >= width); assert(numBlocks < NUM_BLOCKS_MAX); kDumbAggCols<Agg, BinaryOp><<<numBlocks,NUM_SUM_COLS_THREADS_PER_BLOCK>>>(_devData, target._devData, width, height, agg, op); cutilCheckMsg("kDumbAggCols: Kernel execution failed"); } else { // row sum target.resize(_isTrans ? 1 : _numRows, _isTrans ? _numCols : 1); if (width > 1) { if (height >= 16384) { // linear aggregation int numBlocksX = 1; int numBlocksY = DIVUP(height, AGG_SHORT_ROWS_THREADS_Y*AGG_SHORT_ROWS_LOOPS_Y); int numThreadsX = width <= 4 ? 4 : width <= 8 ? 8 : width <= 12 ? 12 : width <= 16 ? 16 : AGG_SHORT_ROWS_THREADS_X; int numThreadsY = AGG_SHORT_ROWS_THREADS_Y; while (numBlocksY > NUM_BLOCKS_MAX) { numBlocksY = DIVUP(numBlocksY,2); numBlocksX *= 2; } dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); if(width <= 16) { if(width <= 4) { kAggShortRows<Agg, BinaryOp, 1, 4><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 8) { kAggShortRows<Agg, BinaryOp, 1, 8><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 12) { kAggShortRows<Agg, BinaryOp, 1, 12><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else { kAggShortRows<Agg, BinaryOp, 1, 16><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } } else if(width <= 32) { kAggShortRows<Agg, BinaryOp, 2, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 48){ kAggShortRows<Agg, BinaryOp, 3, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else if(width <= 64){ kAggShortRows<Agg, BinaryOp, 4, AGG_SHORT_ROWS_THREADS_X><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } else { kAggShortRows2<Agg, BinaryOp><<<grid, threads>>>(_devData, target._devData,width, height, agg, op); } } else { if (width >= 512) { dim3 threads(AWR_NUM_THREADS); dim3 blocks(1, std::min(1024, height)); kAggRows_wholerow_nosync<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, std::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); } else { // dim3 threads(AWR_NUM_THREADS); // dim3 blocks(1, std::min(1024, height)); // kAggRows_wholerow<<<blocks, threads>>>(_devData, target._devData, width, height, agg, op); NVMatrix *prevSum = this; while (prevSum->getLeadingDim() > 1) { int numThreadsX = width <= 64 ? 32 : (width <= 128 ? 64 : (width <= 256 ? 128 : (width <= 512 ? 256 : 512))); int numThreadsY = 1; int numBlocksX = DIVUP(width, 2*numThreadsX); int numBlocksY = std::min(height, NUM_BLOCKS_MAX); NVMatrix *nvSumAccum = target.getFollowingDim() == height && target.getLeadingDim() == numBlocksX ? &target : new NVMatrix(height, numBlocksX, false); dim3 grid(numBlocksX, numBlocksY), threads(numThreadsX, numThreadsY); assert(numBlocksX <= NUM_BLOCKS_MAX); assert(numBlocksY <= NUM_BLOCKS_MAX); if(width <= 64) { kAggRows<Agg, BinaryOp, 32><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 128) { kAggRows<Agg, BinaryOp, 64><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 256) { kAggRows<Agg, BinaryOp, 128><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else if(width <= 512) { kAggRows<Agg, BinaryOp, 256><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } else { kAggRows<Agg, BinaryOp, 512><<<grid, threads>>>(prevSum->_devData, nvSumAccum->_devData, width, height, nvSumAccum->getLeadingDim(), agg, op); } cutilCheckMsg("agg rows: Kernel execution failed"); cudaThreadSynchronize(); width = numBlocksX; // only true in reduction agg, but for linear agg this doesn't matter anyway if (prevSum != this) { delete prevSum; } prevSum = nvSumAccum; } } } } else { copy(target); } } } void NVMatrix::inRangeInc(float lower, float upper) { inRangeInc(lower, upper, *this); } void NVMatrix::inRangeInc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<false>(lower, upper), target); } void NVMatrix::inRangeExc(float lower, float upper) { inRangeExc(lower, upper, *this); } void NVMatrix::inRangeExc(float lower, float upper, NVMatrix& target) { apply(NVMatrixOps::InRange<true>(lower, upper), target); } void NVMatrix::biggerThanScalar(float scalar) { biggerThanScalar(scalar, *this); } void NVMatrix::biggerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::BiggerThanScalar(scalar), target); } void NVMatrix::smallerThanScalar(float scalar) { smallerThanScalar(scalar, *this); } void NVMatrix::smallerThanScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::SmallerThanScalar(scalar), target); } void NVMatrix::addScalar(float scaleThis, float scalar, NVMatrix& target) { apply(NVMatrixOps::WeightedAddScalar(scaleThis, scalar), target); } void NVMatrix::addScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::AddScalar(scalar), target); } void NVMatrix::addScalar(float scalar) { addScalar(scalar, *this); } void NVMatrix::minWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MinWithScalar(scalar), target); } void NVMatrix::minWithScalar(float scalar) { minWithScalar(scalar, *this); } void NVMatrix::maxWithScalar(float scalar, NVMatrix& target) { apply(NVMatrixOps::MaxWithScalar(scalar), target); } void NVMatrix::maxWithScalar(float scalar) { maxWithScalar(scalar, *this); } void NVMatrix::pow(float p, NVMatrix& target) { apply(NVMatrixOps::Pow(p), target); } void NVMatrix::pow(float p) { pow(p, *this); } void NVMatrix::scale(float _scale) { scale(_scale, *this); } void NVMatrix::scale(float _scale, NVMatrix& target) { if (_scale != 1 || &target != this) { // optimize away scale by 1 apply(NVMatrixOps::MultByScalar(_scale), target); } } template<class Agg, class BinaryOp> NVMatrix& NVMatrix::_aggregate(int axis, Agg agg, BinaryOp op) { NVMatrix *sumVec = new NVMatrix(); _aggregate<Agg, BinaryOp>(axis, *sumVec, agg, op); return *sumVec; } void NVMatrix::max(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } void NVMatrix::addSum(NVMatrix& a, int axis, float scaleThis, float scaleSum) { if (scaleThis != 0) { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::WeightedAdd(scaleThis, scaleSum)); } else { a._aggregate(axis, *this, NVMatrixAggs::Sum(), NVMatrixBinaryOps::SecondScaled(scaleSum)); } } void NVMatrix::sum(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } /* void NVMatrix::min(int axis, NVMatrix& target) { _aggregate(axis, target, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::max(int axis) { return _aggregate(axis, NVMatrixAggs::Max(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::sum(int axis) { return _aggregate(axis, NVMatrixAggs::Sum(), NVMatrixBinaryOps::Second()); } NVMatrix& NVMatrix::min(int axis) { return _aggregate(axis, NVMatrixAggs::Min(), NVMatrixBinaryOps::Second()); } */ void NVMatrix::_sum_setParams(int n, dim3* blocks, dim3* threads, int* numCols) { int logn = int(ceil(log(double(n)) / log(2))); *numCols = DIVUP(n, logn); int numThreads = *numCols; *blocks = dim3(DIVUP(numThreads, DP_BLOCKSIZE)); *threads = dim3(DP_BLOCKSIZE); } /* float NVMatrix::mean() { return sum() / getNumElements(); } float NVMatrix::sum() { return _totalAgg(NVMatrixAggs::Sum()); } float NVMatrix::max() { return _totalAgg(NVMatrixAggs::Max()); } float NVMatrix::min() { return _totalAgg(NVMatrixAggs::Min()); } template<class Agg> float NVMatrix::_totalAgg(Agg agg) { assert(isContiguous()); dim3 blocks, threads; int numCols; // Sum most of it on GPU NVMatrix* src = this; for (NVMatrix* target = NULL; src->getNumElements() > CPUSUM_MAX; src = target) { _sum_setParams(src->getNumElements(), &blocks, &threads, &numCols); target = new NVMatrix(1, blocks.x); kTotalAgg<<<blocks, threads>>>(src->getDevData(), target->getDevData(), numCols, src->getNumElements(), agg); cutilCheckMsg("kTotalAgg: Kernel execution failed"); cudaThreadSynchronize(); // not really necessary? delete (src == this ? NULL : src); } Matrix srcCPU(src->getNumRows(), src->getNumCols()); src->copyToHost(srcCPU); if (src->getNumElements() > 1) { // Sum remainder on CPU delete (src == this ? NULL : src); if (typeid(Agg) == typeid(NVMatrixAggs::Sum)) { return srcCPU.sum(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Max)) { return srcCPU.max(); } else if (typeid(Agg) == typeid(NVMatrixAggs::Min)) { return srcCPU.min(); } else { assert(false); } } return srcCPU(0,0); } */ /* * Fast dot product only for matrices with same transposedness. float NVMatrix::dotProduct(NVMatrix& b) { assert(isContiguous() && b.isContiguous()); assert(isSameDims(b)); assert(isTrans() == b.isTrans()); // see? dim3 blocks, threads; int numCols; _sum_setParams(getNumElements(), &blocks, &threads, &numCols); NVMatrix target(1, blocks.x); kDotProduct_r<<<blocks, threads>>>(getDevData(), b.getDevData(), target.getDevData(), numCols, getNumElements()); cutilCheckMsg("kDotProduct: Kernel execution failed"); cudaThreadSynchronize(); return target.sum(); } float NVMatrix::norm2() { return dotProduct(*this); } float NVMatrix::norm() { return sqrt(norm2()); } */ /* void NVMatrix::print(int startRow, int rows, int startCol, int cols) const { cudaThreadSynchronize(); Matrix hm = Matrix(_numRows, _numCols); copyToHost(hm); hm.print(startRow, rows, startCol, cols); } void NVMatrix::print(int rows, int cols) const { print(0, rows, 0, cols); } */ void NVMatrix::printShape(const char* name) const { printf("%s: %dx%d\n", name, _numRows, _numCols); }
1086fa410620f0796df7bde53df140e9077d8eed.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void sumArraysOnGPU(float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; }
1086fa410620f0796df7bde53df140e9077d8eed.cu
#include "includes.h" __global__ void sumArraysOnGPU(float *A, float *B, float *C) { int i = blockIdx.x * blockDim.x + threadIdx.x; C[i] = A[i] + B[i]; }
9f5e0c1eebef0b9f316041365224d15cf3d3a953.hip
// !!! This is a file automatically generated by hipify!!! // This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_FUNC cxx11_tensor_complex #define EIGEN_USE_GPU #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #include <hip/hip_fp16.h> #endif #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; void test_cuda_nullary() { Tensor<std::complex<float>, 1, 0, int> in1(2); Tensor<std::complex<float>, 1, 0, int> in2(2); in1.setRandom(); in2.setRandom(); std::size_t float_bytes = in1.size() * sizeof(float); std::size_t complex_bytes = in1.size() * sizeof(std::complex<float>); std::complex<float>* d_in1; std::complex<float>* d_in2; float* d_out2; hipMalloc((void**)(&d_in1), complex_bytes); hipMalloc((void**)(&d_in2), complex_bytes); hipMalloc((void**)(&d_out2), float_bytes); hipMemcpy(d_in1, in1.data(), complex_bytes, hipMemcpyHostToDevice); hipMemcpy(d_in2, in2.data(), complex_bytes, hipMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in1( d_in1, 2); Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in2( d_in2, 2); Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_out2( d_out2, 2); gpu_in1.device(gpu_device) = gpu_in1.constant(std::complex<float>(3.14f, 2.7f)); gpu_out2.device(gpu_device) = gpu_in2.abs(); Tensor<std::complex<float>, 1, 0, int> new1(2); Tensor<float, 1, 0, int> new2(2); assert(hipMemcpyAsync(new1.data(), d_in1, complex_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipMemcpyAsync(new2.data(), d_out2, float_bytes, hipMemcpyDeviceToHost, gpu_device.stream()) == hipSuccess); assert(hipStreamSynchronize(gpu_device.stream()) == hipSuccess); for (int i = 0; i < 2; ++i) { VERIFY_IS_APPROX(new1(i), std::complex<float>(3.14f, 2.7f)); VERIFY_IS_APPROX(new2(i), std::abs(in2(i))); } hipFree(d_in1); hipFree(d_in2); hipFree(d_out2); } void test_cxx11_tensor_complex() { CALL_SUBTEST(test_cuda_nullary()); }
9f5e0c1eebef0b9f316041365224d15cf3d3a953.cu
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2016 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #define EIGEN_TEST_NO_LONGDOUBLE #define EIGEN_TEST_FUNC cxx11_tensor_complex #define EIGEN_USE_GPU #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 70500 #include <cuda_fp16.h> #endif #include "main.h" #include <unsupported/Eigen/CXX11/Tensor> using Eigen::Tensor; void test_cuda_nullary() { Tensor<std::complex<float>, 1, 0, int> in1(2); Tensor<std::complex<float>, 1, 0, int> in2(2); in1.setRandom(); in2.setRandom(); std::size_t float_bytes = in1.size() * sizeof(float); std::size_t complex_bytes = in1.size() * sizeof(std::complex<float>); std::complex<float>* d_in1; std::complex<float>* d_in2; float* d_out2; cudaMalloc((void**)(&d_in1), complex_bytes); cudaMalloc((void**)(&d_in2), complex_bytes); cudaMalloc((void**)(&d_out2), float_bytes); cudaMemcpy(d_in1, in1.data(), complex_bytes, cudaMemcpyHostToDevice); cudaMemcpy(d_in2, in2.data(), complex_bytes, cudaMemcpyHostToDevice); Eigen::CudaStreamDevice stream; Eigen::GpuDevice gpu_device(&stream); Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in1( d_in1, 2); Eigen::TensorMap<Eigen::Tensor<std::complex<float>, 1, 0, int>, Eigen::Aligned> gpu_in2( d_in2, 2); Eigen::TensorMap<Eigen::Tensor<float, 1, 0, int>, Eigen::Aligned> gpu_out2( d_out2, 2); gpu_in1.device(gpu_device) = gpu_in1.constant(std::complex<float>(3.14f, 2.7f)); gpu_out2.device(gpu_device) = gpu_in2.abs(); Tensor<std::complex<float>, 1, 0, int> new1(2); Tensor<float, 1, 0, int> new2(2); assert(cudaMemcpyAsync(new1.data(), d_in1, complex_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaMemcpyAsync(new2.data(), d_out2, float_bytes, cudaMemcpyDeviceToHost, gpu_device.stream()) == cudaSuccess); assert(cudaStreamSynchronize(gpu_device.stream()) == cudaSuccess); for (int i = 0; i < 2; ++i) { VERIFY_IS_APPROX(new1(i), std::complex<float>(3.14f, 2.7f)); VERIFY_IS_APPROX(new2(i), std::abs(in2(i))); } cudaFree(d_in1); cudaFree(d_in2); cudaFree(d_out2); } void test_cxx11_tensor_complex() { CALL_SUBTEST(test_cuda_nullary()); }
e8c5d8dc7438ba28554a77d20a0e26ce00bd15d3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Demonstrate CUTLASS debugging tool for dumping fragments and shared memory */ /////////////////////////////////////////////////////////////////////////////////////////////////// // Standard Library includes #include <iostream> // // CUTLASS includes // #include "cutlass/aligned_buffer.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h" #include "cutlass/util/debug.h" #include "cutlass/util/device_dump.h" #define EXAMPLE_MATRIX_ROW 64 #define EXAMPLE_MATRIX_COL 32 /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, typename GmemIterator, typename SmemIterator> __global__ void kernel_dump(typename GmemIterator::Params params, typename GmemIterator::TensorRef ref) { extern __shared__ Element shared_storage[]; // Construct the global iterator and load the data to the fragments. int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; GmemIterator gmem_iterator(params, ref.data(), {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}, tb_thread_id); typename GmemIterator::Fragment frag; frag.clear(); gmem_iterator.load(frag); // Call dump_fragment() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nAll threads dump all the elements:\n"); cutlass::debug::dump_fragment(frag); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps all the elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements with a stride of 8:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16, /*S = */ 8); // Construct the shared iterator and store the data to the shared memory. SmemIterator smem_iterator( typename SmemIterator::TensorRef( {shared_storage, SmemIterator::Layout::packed( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL})}), tb_thread_id); smem_iterator.store(frag); // Call dump_shmem() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements:\n"); cutlass::debug::dump_shmem(shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements with a stride of 8:\n"); cutlass::debug::dump_shmem( shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL, /*S = */ 8); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point for dump_reg_shmem example. // // usage: // // 02_dump_reg_shmem // int main() { // Initialize a 64x32 column major matrix with sequential data (1,2,3...). using Element = cutlass::half_t; using Layout = cutlass::layout::ColumnMajor; cutlass::HostTensor<Element, Layout> matrix( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}); cutlass::reference::host::BlockFillSequential(matrix.host_data(), matrix.capacity()); // Dump the matrix. std::cout << "Matrix:\n" << matrix.host_view() << "\n"; // Copy the matrix to the device. matrix.sync_device(); // Define a global iterator, a shared iterator and their thread map. using ThreadMap = cutlass::transform::PitchLinearWarpRakedThreadMap< cutlass::layout::PitchLinearShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, 32, cutlass::layout::PitchLinearShape<8, 4>, 8>; using GmemIterator = cutlass::transform::threadblock::PredicatedTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, Layout, 1, ThreadMap>; typename GmemIterator::Params params(matrix.layout()); using SmemIterator = cutlass::transform::threadblock::RegularTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<16, 64>, 1, ThreadMap>; dim3 grid(1, 1); dim3 block(32, 1, 1); int smem_size = int(sizeof(Element) * EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); hipLaunchKernelGGL(( kernel_dump<Element, GmemIterator, SmemIterator>) , dim3(grid), dim3(block), smem_size, 0, params, matrix.device_ref()); hipError_t result = hipDeviceSynchronize(); if (result != hipSuccess) { std::cout << "Failed" << std::endl; } return (result == hipSuccess ? 0 : -1); } ///////////////////////////////////////////////////////////////////////////////////////////////////
e8c5d8dc7438ba28554a77d20a0e26ce00bd15d3.cu
/*************************************************************************************************** * Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without *modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright notice, *this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright *notice, this list of conditions and the following disclaimer in the *documentation and/or other materials provided with the distribution. * * Neither the name of the NVIDIA CORPORATION nor the names of its *contributors may be used to endorse or promote products derived from this *software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE *DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE FOR ANY DIRECT, *INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, *DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY *OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TOR (INCLUDING *NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, *EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * **************************************************************************************************/ /*! \file \brief Demonstrate CUTLASS debugging tool for dumping fragments and shared memory */ /////////////////////////////////////////////////////////////////////////////////////////////////// // Standard Library includes #include <iostream> // // CUTLASS includes // #include "cutlass/aligned_buffer.h" #include "cutlass/gemm/gemm.h" #include "cutlass/layout/matrix.h" #include "cutlass/matrix_shape.h" #include "cutlass/numeric_types.h" #include "cutlass/core_io.h" #include "cutlass/util/host_tensor.h" #include "cutlass/util/tensor_view_io.h" #include "cutlass/util/reference/host/gemm.h" #include "cutlass/util/reference/host/tensor_compare.h" #include "cutlass/util/reference/host/tensor_fill.h" #include "cutlass/transform/pitch_linear_thread_map.h" #include "cutlass/transform/threadblock/predicated_tile_iterator.h" #include "cutlass/transform/threadblock/regular_tile_iterator_tensor_op.h" #include "cutlass/util/debug.h" #include "cutlass/util/device_dump.h" #define EXAMPLE_MATRIX_ROW 64 #define EXAMPLE_MATRIX_COL 32 /////////////////////////////////////////////////////////////////////////////////////////////////// template <typename Element, typename GmemIterator, typename SmemIterator> __global__ void kernel_dump(typename GmemIterator::Params params, typename GmemIterator::TensorRef ref) { extern __shared__ Element shared_storage[]; // Construct the global iterator and load the data to the fragments. int tb_thread_id = threadIdx.y * blockDim.x + threadIdx.x; GmemIterator gmem_iterator(params, ref.data(), {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}, tb_thread_id); typename GmemIterator::Fragment frag; frag.clear(); gmem_iterator.load(frag); // Call dump_fragment() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nAll threads dump all the elements:\n"); cutlass::debug::dump_fragment(frag); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps all the elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nFirst thread dumps first 16 elements with a stride of 8:\n"); cutlass::debug::dump_fragment(frag, /*N = */ 1, /*M = */ 16, /*S = */ 8); // Construct the shared iterator and store the data to the shared memory. SmemIterator smem_iterator( typename SmemIterator::TensorRef( {shared_storage, SmemIterator::Layout::packed( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL})}), tb_thread_id); smem_iterator.store(frag); // Call dump_shmem() with different parameters. if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements:\n"); cutlass::debug::dump_shmem(shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); if (threadIdx.x == 0 && blockIdx.x == 0) printf("\nDump all the elements with a stride of 8:\n"); cutlass::debug::dump_shmem( shared_storage, EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL, /*S = */ 8); } /////////////////////////////////////////////////////////////////////////////////////////////////// /// Entry point for dump_reg_shmem example. // // usage: // // 02_dump_reg_shmem // int main() { // Initialize a 64x32 column major matrix with sequential data (1,2,3...). using Element = cutlass::half_t; using Layout = cutlass::layout::ColumnMajor; cutlass::HostTensor<Element, Layout> matrix( {EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL}); cutlass::reference::host::BlockFillSequential(matrix.host_data(), matrix.capacity()); // Dump the matrix. std::cout << "Matrix:\n" << matrix.host_view() << "\n"; // Copy the matrix to the device. matrix.sync_device(); // Define a global iterator, a shared iterator and their thread map. using ThreadMap = cutlass::transform::PitchLinearWarpRakedThreadMap< cutlass::layout::PitchLinearShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, 32, cutlass::layout::PitchLinearShape<8, 4>, 8>; using GmemIterator = cutlass::transform::threadblock::PredicatedTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, Layout, 1, ThreadMap>; typename GmemIterator::Params params(matrix.layout()); using SmemIterator = cutlass::transform::threadblock::RegularTileIterator< cutlass::MatrixShape<EXAMPLE_MATRIX_ROW, EXAMPLE_MATRIX_COL>, Element, cutlass::layout::ColumnMajorTensorOpMultiplicandCongruous<16, 64>, 1, ThreadMap>; dim3 grid(1, 1); dim3 block(32, 1, 1); int smem_size = int(sizeof(Element) * EXAMPLE_MATRIX_ROW * EXAMPLE_MATRIX_COL); kernel_dump<Element, GmemIterator, SmemIterator> <<<grid, block, smem_size, 0>>>(params, matrix.device_ref()); cudaError_t result = cudaDeviceSynchronize(); if (result != cudaSuccess) { std::cout << "Failed" << std::endl; } return (result == cudaSuccess ? 0 : -1); } ///////////////////////////////////////////////////////////////////////////////////////////////////
b71d7d4af144051b665c273a71f1e9969b1e8098.hip
// !!! This is a file automatically generated by hipify!!! #include "Prerequisites.cuh" #include "Generics.cuh" #include "Helper.cuh" #include "Masking.cuh" namespace gtom { ////////////////////////////////////// //Equivalent of TOM's tom_dev method// ////////////////////////////////////// template <class Tmask> void d_Dev(tfloat* d_input, imgstats5* d_output, size_t elements, Tmask* d_mask, int batch) { size_t denseelements = elements; tfloat* d_denseinput = d_input; if (d_mask != NULL) { size_t* d_mapforward = NULL; d_MaskSparseToDense(d_mask, &d_mapforward, NULL, denseelements, elements); if (denseelements == 0) throw; tfloat* d_remapped; hipMalloc((void**)&d_remapped, denseelements * batch * sizeof(tfloat)); d_Remap(d_input, d_mapforward, d_remapped, denseelements, elements, (tfloat)0, batch); d_denseinput = d_remapped; } tfloat* d_mins; hipMalloc((void**)&d_mins, batch * sizeof(tfloat)); tfloat* d_maxs; hipMalloc((void**)&d_maxs, batch * sizeof(tfloat)); tfloat* d_means; hipMalloc((void**)&d_means, batch * sizeof(tfloat)); tfloat* d_meancentered; hipMalloc((void**)&d_meancentered, denseelements * batch * sizeof(tfloat)); tfloat* d_vars; hipMalloc((void**)&d_vars, batch * sizeof(tfloat)); tfloat* d_devs; hipMalloc((void**)&d_devs, batch * sizeof(tfloat)); d_SumMinMax(d_denseinput, d_means, d_mins, d_maxs, denseelements, batch); d_MultiplyByScalar(d_means, d_means, batch, (tfloat)1 / (tfloat)denseelements); d_SquaredDistanceFromScalar(d_denseinput, d_means, d_meancentered, denseelements, batch); d_Sum(d_meancentered, d_vars, denseelements, batch); d_MultiplyByScalar(d_vars, d_vars, batch, (tfloat)1 / (tfloat)denseelements); d_Sqrt(d_vars, d_devs, batch); tfloat** h_fields = (tfloat**)malloc(5 * sizeof(tfloat*)); h_fields[0] = d_means; h_fields[1] = d_mins; h_fields[2] = d_maxs; h_fields[3] = d_devs; h_fields[4] = d_vars; tfloat** d_fields = (tfloat**)CudaMallocFromHostArray(h_fields, 5 * sizeof(tfloat*)); d_JoinInterleaved<tfloat, 5>(d_fields, (tfloat*)d_output, batch); if (d_denseinput != d_input) hipFree(d_denseinput); hipFree(d_fields); hipFree(d_means); hipFree(d_mins); hipFree(d_maxs); hipFree(d_meancentered); hipFree(d_vars); hipFree(d_devs); } template void d_Dev<tfloat>(tfloat* d_input, imgstats5* d_output, size_t elements, tfloat* d_mask, int batch); template void d_Dev<int>(tfloat* d_input, imgstats5* d_output, size_t elements, int* d_mask, int batch); template void d_Dev<char>(tfloat* d_input, imgstats5* d_output, size_t elements, char* d_mask, int batch); template void d_Dev<bool>(tfloat* d_input, imgstats5* d_output, size_t elements, bool* d_mask, int batch); }
b71d7d4af144051b665c273a71f1e9969b1e8098.cu
#include "Prerequisites.cuh" #include "Generics.cuh" #include "Helper.cuh" #include "Masking.cuh" namespace gtom { ////////////////////////////////////// //Equivalent of TOM's tom_dev method// ////////////////////////////////////// template <class Tmask> void d_Dev(tfloat* d_input, imgstats5* d_output, size_t elements, Tmask* d_mask, int batch) { size_t denseelements = elements; tfloat* d_denseinput = d_input; if (d_mask != NULL) { size_t* d_mapforward = NULL; d_MaskSparseToDense(d_mask, &d_mapforward, NULL, denseelements, elements); if (denseelements == 0) throw; tfloat* d_remapped; cudaMalloc((void**)&d_remapped, denseelements * batch * sizeof(tfloat)); d_Remap(d_input, d_mapforward, d_remapped, denseelements, elements, (tfloat)0, batch); d_denseinput = d_remapped; } tfloat* d_mins; cudaMalloc((void**)&d_mins, batch * sizeof(tfloat)); tfloat* d_maxs; cudaMalloc((void**)&d_maxs, batch * sizeof(tfloat)); tfloat* d_means; cudaMalloc((void**)&d_means, batch * sizeof(tfloat)); tfloat* d_meancentered; cudaMalloc((void**)&d_meancentered, denseelements * batch * sizeof(tfloat)); tfloat* d_vars; cudaMalloc((void**)&d_vars, batch * sizeof(tfloat)); tfloat* d_devs; cudaMalloc((void**)&d_devs, batch * sizeof(tfloat)); d_SumMinMax(d_denseinput, d_means, d_mins, d_maxs, denseelements, batch); d_MultiplyByScalar(d_means, d_means, batch, (tfloat)1 / (tfloat)denseelements); d_SquaredDistanceFromScalar(d_denseinput, d_means, d_meancentered, denseelements, batch); d_Sum(d_meancentered, d_vars, denseelements, batch); d_MultiplyByScalar(d_vars, d_vars, batch, (tfloat)1 / (tfloat)denseelements); d_Sqrt(d_vars, d_devs, batch); tfloat** h_fields = (tfloat**)malloc(5 * sizeof(tfloat*)); h_fields[0] = d_means; h_fields[1] = d_mins; h_fields[2] = d_maxs; h_fields[3] = d_devs; h_fields[4] = d_vars; tfloat** d_fields = (tfloat**)CudaMallocFromHostArray(h_fields, 5 * sizeof(tfloat*)); d_JoinInterleaved<tfloat, 5>(d_fields, (tfloat*)d_output, batch); if (d_denseinput != d_input) cudaFree(d_denseinput); cudaFree(d_fields); cudaFree(d_means); cudaFree(d_mins); cudaFree(d_maxs); cudaFree(d_meancentered); cudaFree(d_vars); cudaFree(d_devs); } template void d_Dev<tfloat>(tfloat* d_input, imgstats5* d_output, size_t elements, tfloat* d_mask, int batch); template void d_Dev<int>(tfloat* d_input, imgstats5* d_output, size_t elements, int* d_mask, int batch); template void d_Dev<char>(tfloat* d_input, imgstats5* d_output, size_t elements, char* d_mask, int batch); template void d_Dev<bool>(tfloat* d_input, imgstats5* d_output, size_t elements, bool* d_mask, int batch); }
df083e2c977a422dfadc81d80909999a07e88d48.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "Utilities.cuh" using namespace std; #define NUM_THREADS 32 #define NUM_BLOCKS 16 #define NUM_STREAMS 3 __global__ void kernel(const int *in, int *out, int N) { int start = blockIdx.x * blockDim.x + threadIdx.x; int end = N; for (int i = start; i < end; i += blockDim.x * gridDim.x) { out[i] = in[i] * in[i]; } } int main() { const int N = 6000000; // --- Host side input data allocation and initialization. Registering host memory as page-locked (required for asynch hipMemcpyAsync). int *h_in = new int[N]; for(int i = 0; i < N; i++) h_in[i] = 5; gpuErrchk(hipHostRegister(h_in, N * sizeof(int), hipHostRegisterPortable)); // --- Host side input data allocation and initialization. Registering host memory as page-locked (required for asynch hipMemcpyAsync). int *h_out = new int[N]; for(int i = 0; i < N; i++) h_out[i] = 0; gpuErrchk(hipHostRegister(h_out, N * sizeof(int), hipHostRegisterPortable)); // --- Host side check results vector allocation and initialization int *h_checkResults = new int[N]; for(int i = 0; i < N; i++) h_checkResults[i] = h_in[i] * h_in[i]; // --- Device side input data allocation. int *d_in = 0; gpuErrchk(hipMalloc((void **)&d_in, N * sizeof(int))); // --- Device side output data allocation. int *d_out = 0; gpuErrchk( hipMalloc((void **)&d_out, N * sizeof(int))); int streamSize = N / NUM_STREAMS; size_t streamMemSize = N * sizeof(int) / NUM_STREAMS; // --- Set kernel launch configuration dim3 nThreads = dim3(NUM_THREADS,1,1); dim3 nBlocks = dim3(NUM_BLOCKS, 1,1); dim3 subKernelBlock = dim3((int)ceil((float)nBlocks.x / 2)); // --- Create CUDA streams hipStream_t streams[NUM_STREAMS]; for(int i = 0; i < NUM_STREAMS; i++) gpuErrchk(hipStreamCreate(&streams[i])); /**************************/ /* BREADTH-FIRST APPROACH */ /**************************/ int offset = 0; hipMemcpyAsync(&d_in[offset], &h_in[offset], streamMemSize, hipMemcpyHostToDevice, 0); for(int i = 1; i < NUM_STREAMS; i++) { int offset = i * streamSize; hipMemcpyAsync(&d_in[offset], &h_in[offset], streamMemSize, hipMemcpyHostToDevice, streams[i]); } hipLaunchKernelGGL(( kernel), dim3(subKernelBlock), dim3(nThreads), 0, 0, &d_in[offset], &d_out[offset], streamSize/2); hipLaunchKernelGGL(( kernel), dim3(subKernelBlock), dim3(nThreads), 0, 0, &d_in[offset + streamSize/2], &d_out[offset + streamSize/2], streamSize/2); for(int i = 1; i < NUM_STREAMS; i++) { int offset = i * streamSize; hipLaunchKernelGGL(( kernel), dim3(subKernelBlock), dim3(nThreads), 0, streams[i], &d_in[offset], &d_out[offset], streamSize/2); hipLaunchKernelGGL(( kernel), dim3(subKernelBlock), dim3(nThreads), 0, streams[i], &d_in[offset + streamSize/2], &d_out[offset + streamSize/2], streamSize/2); } for(int i = 1; i < NUM_STREAMS; i++) { int offset = i * streamSize; hipMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, hipMemcpyDeviceToHost, streams[i]); } hipMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, hipMemcpyDeviceToHost, 0); for(int i = 1; i < NUM_STREAMS; i++) { int offset = i * streamSize; hipMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, hipMemcpyDeviceToHost, 0); } for(int i = 0; i < NUM_STREAMS; i++) gpuErrchk(hipStreamSynchronize(streams[i])); gpuErrchk(hipDeviceSynchronize()); // --- Release resources gpuErrchk(hipHostUnregister(h_in)); gpuErrchk(hipHostUnregister(h_out)); gpuErrchk(hipFree(d_in)); gpuErrchk(hipFree(d_out)); for(int i = 0; i < NUM_STREAMS; i++) gpuErrchk(hipStreamDestroy(streams[i])); hipDeviceReset(); // --- GPU output check int sum = 0; for(int i = 0; i < N; i++) sum += h_checkResults[i] - h_out[i]; cout << "Error between CPU and GPU: " << sum << endl; delete[] h_in; delete[] h_out; delete[] h_checkResults; return 0; }
df083e2c977a422dfadc81d80909999a07e88d48.cu
#include <iostream> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include "Utilities.cuh" using namespace std; #define NUM_THREADS 32 #define NUM_BLOCKS 16 #define NUM_STREAMS 3 __global__ void kernel(const int *in, int *out, int N) { int start = blockIdx.x * blockDim.x + threadIdx.x; int end = N; for (int i = start; i < end; i += blockDim.x * gridDim.x) { out[i] = in[i] * in[i]; } } int main() { const int N = 6000000; // --- Host side input data allocation and initialization. Registering host memory as page-locked (required for asynch cudaMemcpyAsync). int *h_in = new int[N]; for(int i = 0; i < N; i++) h_in[i] = 5; gpuErrchk(cudaHostRegister(h_in, N * sizeof(int), cudaHostRegisterPortable)); // --- Host side input data allocation and initialization. Registering host memory as page-locked (required for asynch cudaMemcpyAsync). int *h_out = new int[N]; for(int i = 0; i < N; i++) h_out[i] = 0; gpuErrchk(cudaHostRegister(h_out, N * sizeof(int), cudaHostRegisterPortable)); // --- Host side check results vector allocation and initialization int *h_checkResults = new int[N]; for(int i = 0; i < N; i++) h_checkResults[i] = h_in[i] * h_in[i]; // --- Device side input data allocation. int *d_in = 0; gpuErrchk(cudaMalloc((void **)&d_in, N * sizeof(int))); // --- Device side output data allocation. int *d_out = 0; gpuErrchk( cudaMalloc((void **)&d_out, N * sizeof(int))); int streamSize = N / NUM_STREAMS; size_t streamMemSize = N * sizeof(int) / NUM_STREAMS; // --- Set kernel launch configuration dim3 nThreads = dim3(NUM_THREADS,1,1); dim3 nBlocks = dim3(NUM_BLOCKS, 1,1); dim3 subKernelBlock = dim3((int)ceil((float)nBlocks.x / 2)); // --- Create CUDA streams cudaStream_t streams[NUM_STREAMS]; for(int i = 0; i < NUM_STREAMS; i++) gpuErrchk(cudaStreamCreate(&streams[i])); /**************************/ /* BREADTH-FIRST APPROACH */ /**************************/ int offset = 0; cudaMemcpyAsync(&d_in[offset], &h_in[offset], streamMemSize, cudaMemcpyHostToDevice, 0); for(int i = 1; i < NUM_STREAMS; i++) { int offset = i * streamSize; cudaMemcpyAsync(&d_in[offset], &h_in[offset], streamMemSize, cudaMemcpyHostToDevice, streams[i]); } kernel<<<subKernelBlock, nThreads>>>(&d_in[offset], &d_out[offset], streamSize/2); kernel<<<subKernelBlock, nThreads>>>(&d_in[offset + streamSize/2], &d_out[offset + streamSize/2], streamSize/2); for(int i = 1; i < NUM_STREAMS; i++) { int offset = i * streamSize; kernel<<<subKernelBlock, nThreads, 0, streams[i]>>>(&d_in[offset], &d_out[offset], streamSize/2); kernel<<<subKernelBlock, nThreads, 0, streams[i]>>>(&d_in[offset + streamSize/2], &d_out[offset + streamSize/2], streamSize/2); } for(int i = 1; i < NUM_STREAMS; i++) { int offset = i * streamSize; cudaMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, cudaMemcpyDeviceToHost, streams[i]); } cudaMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, cudaMemcpyDeviceToHost, 0); for(int i = 1; i < NUM_STREAMS; i++) { int offset = i * streamSize; cudaMemcpyAsync(&h_out[offset], &d_out[offset], streamMemSize, cudaMemcpyDeviceToHost, 0); } for(int i = 0; i < NUM_STREAMS; i++) gpuErrchk(cudaStreamSynchronize(streams[i])); gpuErrchk(cudaDeviceSynchronize()); // --- Release resources gpuErrchk(cudaHostUnregister(h_in)); gpuErrchk(cudaHostUnregister(h_out)); gpuErrchk(cudaFree(d_in)); gpuErrchk(cudaFree(d_out)); for(int i = 0; i < NUM_STREAMS; i++) gpuErrchk(cudaStreamDestroy(streams[i])); cudaDeviceReset(); // --- GPU output check int sum = 0; for(int i = 0; i < N; i++) sum += h_checkResults[i] - h_out[i]; cout << "Error between CPU and GPU: " << sum << endl; delete[] h_in; delete[] h_out; delete[] h_checkResults; return 0; }
d593266c8d39d2fbe6d2de7daf4d1fb095501c0b.hip
// !!! This is a file automatically generated by hipify!!! /*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdio.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime_api.h> #include <math.h> #include <stdlib.h> #include <string.h> #include "parboil.h" #include "UDTypes.h" #include "scanLargeArray.h" #include "GPU_kernels.cu" #include "CPU_kernels.h" #define USE_CUDPP 0 #if USE_CUDPP #include "cudpp.h" #else #include "sort.h" #include "scanLargeArray.h" #endif #define BLOCKSIZE 512 #define PI 3.14159265359 #define CUERR \ do { \ hipError_t err; \ if ((err = hipGetLastError()) != hipSuccess) { \ printf("CUDA error: %s, line %d\n", hipGetErrorString(err), __LINE__); \ return; \ } \ } while (0) /*********************************************************************** * CUDA_interface is the main function for GPU execution. This * implementation uses compact binning to distribute input elements * into unit-cubed sized bins. The bins are then visited by GPU * threads, where every thread computes the value of one (or small set) * of output elements by computing the contributions of elements in * neighboring bins to these output elements. * * The bins have a limited bin size and everything beyond that bin size * is offloaded to the CPU to be computed in parallel with the GPU * gridding. ***********************************************************************/ void CUDA_interface ( struct pb_TimerSet* timers, unsigned int n, // Number of input elements parameters params, // Parameter struct which defines output gridSize, cutoff distance, etc. ReconstructionSample* sample, // Array of input elements float* LUT, // Precomputed LUT table of Kaiser-Bessel function. // Used for computation on CPU instead of using the function every time int sizeLUT, // Size of LUT cmplx* gridData, // Array of output grid points. Each element has a real and imaginary component float* sampleDensity // Array of same size as gridData couting the number of contributions // to each grid point in the gridData array ){ /* Initializing all variables */ dim3 dims (8,4,2); //size of a gridding block on the GPU /* x, y, z dimensions of the output grid (gridData) */ int size_x = params.gridSize[0]; int size_y = params.gridSize[1]; int size_z = params.gridSize[2]; int size_xy = size_y*size_x; int gridNumElems = size_x * size_y * size_z; // Total number of grid points float beta = PI * sqrt(4*params.kernelWidth*params.kernelWidth/(params.oversample*params.oversample) * (params.oversample-.5)*(params.oversample-.5)-.8); float cutoff = float(params.kernelWidth)/2.0; // cutoff radius float cutoff2 = cutoff*cutoff; // square of cutoff radius float _1overCutoff2 = 1/cutoff2; // 1 over square of cutoff radius /* Declarations of device data structures */ ReconstructionSample* sample_d = NULL; // Device array for original input array ReconstructionSample* sortedSample_d = NULL; // Device array of the sorted (into bins) input elements. // This array is accessed by sortedSampleSoA_d in a structure // of arrays manner. float2* gridData_d = NULL; // Device array for output grid float* sampleDensity_d = NULL; // Device array for output sample density unsigned int* idxKey_d = NULL; // Array of bin indeces generated in the binning kernel // and used to sort the input elements into their // corresponding bins unsigned int* idxValue_d = NULL; // This array holds the indices of input elements in the // the original array. This array is sorted using the // the idxKey_d array, and once sorted, it is used in // the reorder kernel to move the actual elements into // their corresponding bins. unsigned int* binCount_d = NULL; // Zero-initialized array which counts the number of elements // put in each bin. Based on this array, we determine which // elements get offloaded to the CPU unsigned int* binStartAddr_d = NULL; // Array of start offset of each of the compact bins /* Allocating device memory */ pb_SwitchToTimer(timers, pb_TimerID_COPY); hipMalloc((void**)&sortedSample_d, n*sizeof(ReconstructionSample)); hipMalloc((void**)&binStartAddr_d, (gridNumElems+1)*sizeof(unsigned int)); hipMalloc((void**)&sample_d, n*sizeof(ReconstructionSample)); hipMalloc((void**)&idxKey_d, (((n+3)/4)*4)*sizeof(unsigned int)); //Pad to nearest multiple of 4 to hipMalloc((void**)&idxValue_d, (((n+3)/4)*4)*sizeof(unsigned int)); //satisfy a property of the sorting kernel. /*The CUDPP library features highly optimizes implementations for radix sort and prefix sum. However for portability reasons, we implemented our own, slightly less optimized versions of these operations. When performing prefix sum using CUDPP, the output array has to be different from the input array, which is why we would allocate an array for binCount_d. For our implementation, we allow the input and output arrays to be the same, therefore we reuse the binCount_d array to get the starting offset of each bin. */ #if USE_CUDPP hipMalloc((void**)&binCount_d, (gridNumElems+1)*sizeof(unsigned int)); #else binCount_d = binStartAddr_d; #endif CUERR; /* Transfering data from Host to Device */ hipMemcpyToSymbol(cutoff2_c, &cutoff2, sizeof(float), 0); hipMemcpyToSymbol(cutoff_c, &cutoff, sizeof(float), 0); hipMemcpyToSymbol(gridSize_c, params.gridSize, 3*sizeof(int), 0); hipMemcpyToSymbol(size_xy_c, &size_xy, sizeof(int), 0); hipMemcpyToSymbol(_1overCutoff2_c, &_1overCutoff2, sizeof(float), 0); hipMemcpy(sample_d, sample, n*sizeof(ReconstructionSample), hipMemcpyHostToDevice); hipMemset(binCount_d, 0, (gridNumElems+1)*sizeof(unsigned int)); // Initialize padding to max integer value, so that when sorted, // these elements get pushed to the end of the array. hipMemset(idxKey_d+n, 0xFF, (((n+3)&~(3))-n)*sizeof(unsigned int)); pb_SwitchToTimer(timers, pb_TimerID_KERNEL); /* STEP 1: Perform binning. This kernel determines which output bin each input element * goes into. Any excess (beyond binsize) is put in the CPU bin */ dim3 block1 (BLOCKSIZE); dim3 grid1 ((n+BLOCKSIZE-1)/BLOCKSIZE); hipLaunchKernelGGL(( binning_kernel), dim3(grid1), dim3(block1), 0, 0, n, sample_d, idxKey_d, idxValue_d, binCount_d, params.binsize, gridNumElems); /* STEP 2: Sort the index-value pair generate in the binning kernel */ #if USE_CUDPP CUDPPConfiguration config; config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SORT_RADIX; config.options = CUDPP_OPTION_KEY_VALUE_PAIRS; CUDPPHandle sortplan = 0; CUDPPResult result = cudppPlan(&sortplan, config, n, 1, 0); int precision = 0; int numElems = gridNumElems; while (numElems > 0){ numElems >>= 1; precision++; } cudppSort(sortplan, idxKey_d, idxValue_d, int(precision), n); result = cudppDestroyPlan(sortplan); #else sort(n, gridNumElems+1, idxKey_d, idxValue_d); #endif /* STEP 3: Reorder the input data, based on the sorted values from Step 2. * this step also involves changing the data from array of structs to a struct * of arrays. Also in this kernel, we populate an array with the starting index * of every output bin features in the input array, based on the sorted indices * from Step 2. * At the end of this step, we copy the start address and list of input elements * that will be computed on the CPU. */ hipLaunchKernelGGL(( reorder_kernel), dim3(grid1),dim3(block1), 0, 0, n, idxValue_d, sample_d, sortedSample_d); pb_SwitchToTimer(timers, pb_TimerID_COPY); hipFree(idxValue_d); hipFree(idxKey_d); hipFree(sample_d); pb_SwitchToTimer(timers, pb_TimerID_KERNEL); /* STEP 4: In this step we generate the ADD scan of the array of starting indices * of the output bins. The result is an array that contains the starting address of * every output bin. */ #if USE_CUDPP config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_EXCLUSIVE; config.op=CUDPP_ADD; CUDPPHandle scanplan = 0; result = cudppPlan(&scanplan, config, gridNumElems+1, 1, 0); cudppScan(scanplan, binCount_d, binStartAddr_d, gridNumElems+1); result = cudppDestroyPlan(scanplan); #else scanLargeArray(gridNumElems+1, binCount_d); #endif pb_SwitchToTimer(timers, pb_TimerID_COPY); // Copy back to the CPU the indices of the input elements that will be processed on the CPU int cpuStart; hipMemcpy(&cpuStart, binCount_d+gridNumElems, sizeof(unsigned int), hipMemcpyDeviceToHost); int CPUbin_size = int(n)-int(cpuStart); ReconstructionSample* CPUbin; hipHostMalloc((void**)&CPUbin,CPUbin_size*sizeof(ReconstructionSample)); hipMemcpy(CPUbin, sortedSample_d+cpuStart, CPUbin_size*sizeof(ReconstructionSample), hipMemcpyDeviceToHost); #if USE_CUDPP hipFree(binCount_d); #endif /* STEP 5: Perform the binning on the GPU. The results are computed in a gather fashion * where each thread computes the value of one output element by reading the relevant * bins. */ hipMalloc((void**)&gridData_d, gridNumElems*sizeof(float2)); hipMalloc((void**)&sampleDensity_d, gridNumElems*sizeof(float)); CUERR; hipMemset(gridData_d, 0, gridNumElems*sizeof(float2)); hipMemset(sampleDensity_d, 0, gridNumElems*sizeof(float)); pb_SwitchToTimer(timers, pb_TimerID_KERNEL); dim3 block2 (dims.x,dims.y,dims.z); dim3 grid2 (size_x/dims.x, (size_y*size_z)/(dims.y*dims.z)); hipLaunchKernelGGL(( gridding_GPU), dim3(grid2), dim3(block2), 0, 0, sortedSample_d, binStartAddr_d, gridData_d, sampleDensity_d, beta); pb_SwitchToTimer(timers, pb_TimerID_COPY); /* Copying the results from the Device to the Host */ hipMemcpy(sampleDensity, sampleDensity_d, gridNumElems*sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(gridData, gridData_d, gridNumElems*sizeof(float2),hipMemcpyDeviceToHost); pb_SwitchToTimer(timers, pb_TimerID_COMPUTE); /* STEP 6: Computing the contributions of the sample points handled by the Host * and adding those to the GPU results. */ gridding_Gold(CPUbin_size, params, CPUbin, LUT, sizeLUT, gridData, sampleDensity); pb_SwitchToTimer(timers, pb_TimerID_COPY); hipHostFree(CPUbin); hipFree(gridData_d); hipFree(sampleDensity_d); hipFree(binCount_d); hipFree(sortedSample_d); pb_SwitchToTimer(timers, pb_TimerID_NONE); return; }
d593266c8d39d2fbe6d2de7daf4d1fb095501c0b.cu
/*************************************************************************** * * (C) Copyright 2010 The Board of Trustees of the * University of Illinois * All Rights Reserved * ***************************************************************************/ #include <stdio.h> #include <cuda_runtime.h> #include <cuda_runtime_api.h> #include <math.h> #include <stdlib.h> #include <string.h> #include "parboil.h" #include "UDTypes.h" #include "scanLargeArray.h" #include "GPU_kernels.cu" #include "CPU_kernels.h" #define USE_CUDPP 0 #if USE_CUDPP #include "cudpp.h" #else #include "sort.h" #include "scanLargeArray.h" #endif #define BLOCKSIZE 512 #define PI 3.14159265359 #define CUERR \ do { \ cudaError_t err; \ if ((err = cudaGetLastError()) != cudaSuccess) { \ printf("CUDA error: %s, line %d\n", cudaGetErrorString(err), __LINE__); \ return; \ } \ } while (0) /*********************************************************************** * CUDA_interface is the main function for GPU execution. This * implementation uses compact binning to distribute input elements * into unit-cubed sized bins. The bins are then visited by GPU * threads, where every thread computes the value of one (or small set) * of output elements by computing the contributions of elements in * neighboring bins to these output elements. * * The bins have a limited bin size and everything beyond that bin size * is offloaded to the CPU to be computed in parallel with the GPU * gridding. ***********************************************************************/ void CUDA_interface ( struct pb_TimerSet* timers, unsigned int n, // Number of input elements parameters params, // Parameter struct which defines output gridSize, cutoff distance, etc. ReconstructionSample* sample, // Array of input elements float* LUT, // Precomputed LUT table of Kaiser-Bessel function. // Used for computation on CPU instead of using the function every time int sizeLUT, // Size of LUT cmplx* gridData, // Array of output grid points. Each element has a real and imaginary component float* sampleDensity // Array of same size as gridData couting the number of contributions // to each grid point in the gridData array ){ /* Initializing all variables */ dim3 dims (8,4,2); //size of a gridding block on the GPU /* x, y, z dimensions of the output grid (gridData) */ int size_x = params.gridSize[0]; int size_y = params.gridSize[1]; int size_z = params.gridSize[2]; int size_xy = size_y*size_x; int gridNumElems = size_x * size_y * size_z; // Total number of grid points float beta = PI * sqrt(4*params.kernelWidth*params.kernelWidth/(params.oversample*params.oversample) * (params.oversample-.5)*(params.oversample-.5)-.8); float cutoff = float(params.kernelWidth)/2.0; // cutoff radius float cutoff2 = cutoff*cutoff; // square of cutoff radius float _1overCutoff2 = 1/cutoff2; // 1 over square of cutoff radius /* Declarations of device data structures */ ReconstructionSample* sample_d = NULL; // Device array for original input array ReconstructionSample* sortedSample_d = NULL; // Device array of the sorted (into bins) input elements. // This array is accessed by sortedSampleSoA_d in a structure // of arrays manner. float2* gridData_d = NULL; // Device array for output grid float* sampleDensity_d = NULL; // Device array for output sample density unsigned int* idxKey_d = NULL; // Array of bin indeces generated in the binning kernel // and used to sort the input elements into their // corresponding bins unsigned int* idxValue_d = NULL; // This array holds the indices of input elements in the // the original array. This array is sorted using the // the idxKey_d array, and once sorted, it is used in // the reorder kernel to move the actual elements into // their corresponding bins. unsigned int* binCount_d = NULL; // Zero-initialized array which counts the number of elements // put in each bin. Based on this array, we determine which // elements get offloaded to the CPU unsigned int* binStartAddr_d = NULL; // Array of start offset of each of the compact bins /* Allocating device memory */ pb_SwitchToTimer(timers, pb_TimerID_COPY); cudaMalloc((void**)&sortedSample_d, n*sizeof(ReconstructionSample)); cudaMalloc((void**)&binStartAddr_d, (gridNumElems+1)*sizeof(unsigned int)); cudaMalloc((void**)&sample_d, n*sizeof(ReconstructionSample)); cudaMalloc((void**)&idxKey_d, (((n+3)/4)*4)*sizeof(unsigned int)); //Pad to nearest multiple of 4 to cudaMalloc((void**)&idxValue_d, (((n+3)/4)*4)*sizeof(unsigned int)); //satisfy a property of the sorting kernel. /*The CUDPP library features highly optimizes implementations for radix sort and prefix sum. However for portability reasons, we implemented our own, slightly less optimized versions of these operations. When performing prefix sum using CUDPP, the output array has to be different from the input array, which is why we would allocate an array for binCount_d. For our implementation, we allow the input and output arrays to be the same, therefore we reuse the binCount_d array to get the starting offset of each bin. */ #if USE_CUDPP cudaMalloc((void**)&binCount_d, (gridNumElems+1)*sizeof(unsigned int)); #else binCount_d = binStartAddr_d; #endif CUERR; /* Transfering data from Host to Device */ cudaMemcpyToSymbol(cutoff2_c, &cutoff2, sizeof(float), 0); cudaMemcpyToSymbol(cutoff_c, &cutoff, sizeof(float), 0); cudaMemcpyToSymbol(gridSize_c, params.gridSize, 3*sizeof(int), 0); cudaMemcpyToSymbol(size_xy_c, &size_xy, sizeof(int), 0); cudaMemcpyToSymbol(_1overCutoff2_c, &_1overCutoff2, sizeof(float), 0); cudaMemcpy(sample_d, sample, n*sizeof(ReconstructionSample), cudaMemcpyHostToDevice); cudaMemset(binCount_d, 0, (gridNumElems+1)*sizeof(unsigned int)); // Initialize padding to max integer value, so that when sorted, // these elements get pushed to the end of the array. cudaMemset(idxKey_d+n, 0xFF, (((n+3)&~(3))-n)*sizeof(unsigned int)); pb_SwitchToTimer(timers, pb_TimerID_KERNEL); /* STEP 1: Perform binning. This kernel determines which output bin each input element * goes into. Any excess (beyond binsize) is put in the CPU bin */ dim3 block1 (BLOCKSIZE); dim3 grid1 ((n+BLOCKSIZE-1)/BLOCKSIZE); binning_kernel<<<grid1, block1>>>(n, sample_d, idxKey_d, idxValue_d, binCount_d, params.binsize, gridNumElems); /* STEP 2: Sort the index-value pair generate in the binning kernel */ #if USE_CUDPP CUDPPConfiguration config; config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SORT_RADIX; config.options = CUDPP_OPTION_KEY_VALUE_PAIRS; CUDPPHandle sortplan = 0; CUDPPResult result = cudppPlan(&sortplan, config, n, 1, 0); int precision = 0; int numElems = gridNumElems; while (numElems > 0){ numElems >>= 1; precision++; } cudppSort(sortplan, idxKey_d, idxValue_d, int(precision), n); result = cudppDestroyPlan(sortplan); #else sort(n, gridNumElems+1, idxKey_d, idxValue_d); #endif /* STEP 3: Reorder the input data, based on the sorted values from Step 2. * this step also involves changing the data from array of structs to a struct * of arrays. Also in this kernel, we populate an array with the starting index * of every output bin features in the input array, based on the sorted indices * from Step 2. * At the end of this step, we copy the start address and list of input elements * that will be computed on the CPU. */ reorder_kernel<<<grid1,block1>>>(n, idxValue_d, sample_d, sortedSample_d); pb_SwitchToTimer(timers, pb_TimerID_COPY); cudaFree(idxValue_d); cudaFree(idxKey_d); cudaFree(sample_d); pb_SwitchToTimer(timers, pb_TimerID_KERNEL); /* STEP 4: In this step we generate the ADD scan of the array of starting indices * of the output bins. The result is an array that contains the starting address of * every output bin. */ #if USE_CUDPP config.datatype = CUDPP_UINT; config.algorithm = CUDPP_SCAN; config.options = CUDPP_OPTION_EXCLUSIVE; config.op=CUDPP_ADD; CUDPPHandle scanplan = 0; result = cudppPlan(&scanplan, config, gridNumElems+1, 1, 0); cudppScan(scanplan, binCount_d, binStartAddr_d, gridNumElems+1); result = cudppDestroyPlan(scanplan); #else scanLargeArray(gridNumElems+1, binCount_d); #endif pb_SwitchToTimer(timers, pb_TimerID_COPY); // Copy back to the CPU the indices of the input elements that will be processed on the CPU int cpuStart; cudaMemcpy(&cpuStart, binCount_d+gridNumElems, sizeof(unsigned int), cudaMemcpyDeviceToHost); int CPUbin_size = int(n)-int(cpuStart); ReconstructionSample* CPUbin; cudaMallocHost((void**)&CPUbin,CPUbin_size*sizeof(ReconstructionSample)); cudaMemcpy(CPUbin, sortedSample_d+cpuStart, CPUbin_size*sizeof(ReconstructionSample), cudaMemcpyDeviceToHost); #if USE_CUDPP cudaFree(binCount_d); #endif /* STEP 5: Perform the binning on the GPU. The results are computed in a gather fashion * where each thread computes the value of one output element by reading the relevant * bins. */ cudaMalloc((void**)&gridData_d, gridNumElems*sizeof(float2)); cudaMalloc((void**)&sampleDensity_d, gridNumElems*sizeof(float)); CUERR; cudaMemset(gridData_d, 0, gridNumElems*sizeof(float2)); cudaMemset(sampleDensity_d, 0, gridNumElems*sizeof(float)); pb_SwitchToTimer(timers, pb_TimerID_KERNEL); dim3 block2 (dims.x,dims.y,dims.z); dim3 grid2 (size_x/dims.x, (size_y*size_z)/(dims.y*dims.z)); gridding_GPU<<<grid2, block2>>>(sortedSample_d, binStartAddr_d, gridData_d, sampleDensity_d, beta); pb_SwitchToTimer(timers, pb_TimerID_COPY); /* Copying the results from the Device to the Host */ cudaMemcpy(sampleDensity, sampleDensity_d, gridNumElems*sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(gridData, gridData_d, gridNumElems*sizeof(float2),cudaMemcpyDeviceToHost); pb_SwitchToTimer(timers, pb_TimerID_COMPUTE); /* STEP 6: Computing the contributions of the sample points handled by the Host * and adding those to the GPU results. */ gridding_Gold(CPUbin_size, params, CPUbin, LUT, sizeLUT, gridData, sampleDensity); pb_SwitchToTimer(timers, pb_TimerID_COPY); cudaFreeHost(CPUbin); cudaFree(gridData_d); cudaFree(sampleDensity_d); cudaFree(binCount_d); cudaFree(sortedSample_d); pb_SwitchToTimer(timers, pb_TimerID_NONE); return; }