hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
compare_static_gfft.hip
// !!! This is a file automatically generated by hipify!!! /* * A program that compare performance of the optimized gfft and cuFFT library * Test the speed and accuracy of FP16 and FP32 calculation * Try to avoid the impact of device warming up */ // C library, CUDA runtime, helpers, and utilities #include "../util/my_include_combined.h" #include <vector> // gfft #include "../alternative/static_gfft.h" #include "../util/32_gfft.h" // CUFFT #include <hipfft.h> #include <hipfftXt.h> typedef half2 Chalf; typedef float2 Csingle; const float NORM = 1.0f; const int BATCH = 16; const int SIZE = 256; const int ITERATION = 10; const int DISPLAY_DATA = 0; const int DEVICE = 0; #define __START__ hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); #define __STOP__(_V) hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&duration, start, stop); _V.push_back(duration); hipEventDestroy(start); hipEventDestroy(stop); float show_mean(std::vector<float> v) { float sum = 0; for (int i = 0; i < v.size(); i++) sum += v[i]; return sum / v.size(); } int cuFFT32(int N, Csingle* X, Csingle* FX, int B){ // Allocate unified momory for input and output int mem_size = N * B *sizeof(Csingle); Csingle *d_idata, *d_odata; checkCudaErrors(hipMalloc((void **) &d_idata, mem_size)); checkCudaErrors(hipMalloc((void **) &d_odata, mem_size)); // Copy input data to memory checkCudaErrors(hipMemcpy(d_idata, X, mem_size, hipMemcpyHostToDevice)); // cuFFT plan hipfftResult result; hipfftHandle plan; size_t workSize; long long int input_size_long = N; result = hipfftCreate(&plan); if (result != HIPFFT_SUCCESS) { fprintf(stderr, "In cuFFT32: hipfftCreate plan returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \ HIP_C_32F, NULL, 1, 1, HIP_C_32F, B, \ &workSize, HIP_C_32F); if (result != HIPFFT_SUCCESS) { printf("In cuFFT32: cufftXtMakePlanMany returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } // cuFFT execution result = cufftXtExec(plan, reinterpret_cast<hipfftComplex *>(d_idata), \ reinterpret_cast<hipfftComplex *>(d_odata), \ HIPFFT_FORWARD); if (result != HIPFFT_SUCCESS) { printf("In cuFFT32: hipfftExecC2C (execution) returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } // Copy Device memory to output checkCudaErrors(hipMemcpy(FX, d_odata, mem_size, hipMemcpyDeviceToHost)); // Clean up content and memory hipfftDestroy(plan); checkCudaErrors(hipFree(d_idata)); checkCudaErrors(hipFree(d_odata)); return 0; } int cuFFT16(int N, Chalf* X, Chalf* FX, int B){ // Allocate unified momory for input and output int mem_size = N * B *sizeof(Chalf); Chalf *d_idata, *d_odata; checkCudaErrors(hipMalloc((void **) &d_idata, mem_size)); checkCudaErrors(hipMalloc((void **) &d_odata, mem_size)); // Copy input data to memory checkCudaErrors(hipMemcpy(d_idata, X, mem_size, hipMemcpyHostToDevice)); // cuFFT plan hipfftResult result; hipfftHandle plan; size_t workSize; long long int input_size_long = N; result = hipfftCreate(&plan); if (result != HIPFFT_SUCCESS) { printf("hipfftCreate (plan) returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \ HIP_C_16F, NULL, 1, 1, HIP_C_16F, B, \ &workSize, HIP_C_16F); if (result != HIPFFT_SUCCESS) { printf("cufftXtMakePlanMany (plan) returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } // cuFFT execution result = cufftXtExec(plan, reinterpret_cast<hipfftComplex *>(d_idata), \ reinterpret_cast<hipfftComplex *>(d_odata), \ HIPFFT_FORWARD); if (result != HIPFFT_SUCCESS) { printf("hipfftExecC2C (execution) returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } // Copy Device memory to output checkCudaErrors(hipMemcpy(FX, d_odata, mem_size, hipMemcpyDeviceToHost)); // Clean up content and memory hipfftDestroy(plan); checkCudaErrors(hipFree(d_idata)); checkCudaErrors(hipFree(d_odata)); return 0; } int get_parameters(int argc, char **argv, int& help_info, float& norm, int& n, int& batch, int& iter, int& display, int& device){ if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?") || checkCmdLineFlag(argc, (const char **)argv, "h")) { printf("Usage: -norm=upper_bound (Max norm of input elements)\n" " -n=size (Input vector size)\n" " -batch=batch_size (Number of input vectors)\n" " -iter=iteration (Times of experiments)\n" " -display=show_result (0 or 1) \n" " -device=ID (ID >= 0 for deviceID)\n"); help_info = 1; return 0; } // Get and set parameter if (checkCmdLineFlag(argc, (const char **)argv, "norm")) { norm = getCmdLineArgumentFloat(argc, (const char **)argv, "norm"); } if (checkCmdLineFlag(argc, (const char **)argv, "n")) { n = getCmdLineArgumentInt(argc, (const char **)argv, "n"); } if (checkCmdLineFlag(argc, (const char **)argv, "batch")) { batch = getCmdLineArgumentInt(argc, (const char **)argv, "batch"); } if (checkCmdLineFlag(argc, (const char **)argv, "iter")) { iter = getCmdLineArgumentInt(argc, (const char **)argv, "iter"); } if (checkCmdLineFlag(argc, (const char **)argv, "display")) { display = getCmdLineArgumentInt(argc, (const char **)argv, "display"); } if (checkCmdLineFlag(argc, (const char **)argv, "device")) { device = getCmdLineArgumentInt(argc, (const char **)argv, "device"); hipSetDevice(device); } hipError_t error; hipDeviceProp_t deviceProp; error = hipGetDevice(&device); if (error != hipSuccess) { printf("hipGetDevice returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } error = hipGetDeviceProperties(&deviceProp, device); if (deviceProp.computeMode == hipComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::hipSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != hipSuccess) { printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", device, deviceProp.name, deviceProp.major, deviceProp.minor); } return 0; } int main(int argc, char **argv) { int help_info = 0; float norm = NORM; int n = SIZE; int batch = BATCH; int iter = ITERATION; int display = DISPLAY_DATA; int device = DEVICE; get_parameters(argc, argv, help_info, norm, n, batch, iter, display, device); if (help_info == 1){ exit(EXIT_SUCCESS); } // Start program printf("Problem size = %d, batch size = %d, norm = %f, iteration = %d\n", n, batch, norm, iter); printf("[Testing of gfft and cuFFT] - Starting...\n"); // Define error, event, result data structure hipEvent_t start, stop; std::vector<float> cuFFT32Run, cuFFT16Run, gfft32Run, gfftRun; std::vector<float> cuFFT16Error, gfft32Error, gfftError; float duration, error1, error2, error3; // Define and zero initialize input and output float* X_re = new float[n * batch](); float* X_im = new float[n * batch](); float* FX_re = new float[n * batch](); float* FX_im = new float [n * batch](); float* FX_re_32 = new float[n * batch](); float* FX_im_32 = new float [n * batch](); Csingle* X_32 = new Csingle[n * batch](); Csingle* FX_32 = new Csingle[n * batch](); Chalf* X_16 = new Chalf[n * batch](); Chalf* FX_16 = new Chalf[n * batch](); // Warm up cuFFT32(n, X_32, FX_32, batch); cuFFT16(n, X_16, FX_16, batch); gfft_32(n, X_re, X_im, FX_re_32, FX_im_32, batch); gfft(n, X_re, X_im, FX_re, FX_im, batch); // Run experiment for (int i = 0; i < iter; i++){ // Initialize input srand(time(NULL)); for (int j = 0; j < n * batch; j++){ X_re[j] = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm; X_im[j] = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm; X_32[j].x = X_re[j]; X_32[j].y = X_im[j]; X_16[j].x = (half)X_re[j]; X_16[j].y = (half)X_im[j]; if (display == 1){ printf("X[%d] = (%.10f, %.10f) \n", j, X_re[j], X_im[j]); } } // Call cuFFT32 __START__ cuFFT32(n, X_32, FX_32, batch); __STOP__(cuFFT32Run) // Call cuFFT16 __START__ cuFFT16(n, X_16, FX_16, batch); __STOP__(cuFFT16Run) // Call gfft32 __START__ gfft_32(n, X_re, X_im, FX_re_32, FX_im_32, batch); __STOP__(gfft32Run) // Call gfft __START__ gfft(n, X_re, X_im, FX_re, FX_im, batch); __STOP__(gfftRun) // Calculate error for (int j = 0; j < n * batch; j++){ error1 += (float)fabs((float)(FX_16[j].x) - FX_32[j].x); error1 += (float)fabs((float)(FX_16[j].y) - FX_32[j].y); error2 += (float)fabs(FX_re[j] - FX_32[j].x); error2 += (float)fabs(FX_im[j] - FX_32[j].y); error3 += (float)fabs(FX_re_32[j] - FX_32[j].x); error3 += (float)fabs(FX_im_32[j] - FX_32[j].y); } cuFFT16Error.push_back(error1 / (n * batch)); gfftError.push_back(error2 / (n * batch)); gfft32Error.push_back(error3 / (n * batch)); } // Print experiment result printf("Time of cuFFT32: %f milliseconds\n", show_mean(cuFFT32Run)); printf("Time of cuFFT16: %f milliseconds, error = %.10f\n", show_mean(cuFFT16Run), show_mean(cuFFT16Error)/norm); printf("Time of gfft32: %f milliseconds, error = %.10f\n", show_mean(gfft32Run), show_mean(gfft32Error)/norm); printf("Time of gfft: %f milliseconds, error = %.10f\n", show_mean(gfftRun), show_mean(gfftError)/norm); // Free input and output memory delete [] X_re; delete [] X_im; delete [] FX_re; delete [] FX_im; delete [] FX_re_32; delete [] FX_im_32; delete [] X_32; delete [] FX_32; delete [] X_16; delete [] FX_16; exit(0); }
compare_static_gfft.cu
/* * A program that compare performance of the optimized gfft and cuFFT library * Test the speed and accuracy of FP16 and FP32 calculation * Try to avoid the impact of device warming up */ // C library, CUDA runtime, helpers, and utilities #include "../util/my_include_combined.h" #include <vector> // gfft #include "../alternative/static_gfft.h" #include "../util/32_gfft.h" // CUFFT #include <cufft.h> #include <cufftXt.h> typedef half2 Chalf; typedef float2 Csingle; const float NORM = 1.0f; const int BATCH = 16; const int SIZE = 256; const int ITERATION = 10; const int DISPLAY_DATA = 0; const int DEVICE = 0; #define __START__ cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); #define __STOP__(_V) cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&duration, start, stop); _V.push_back(duration); cudaEventDestroy(start); cudaEventDestroy(stop); float show_mean(std::vector<float> v) { float sum = 0; for (int i = 0; i < v.size(); i++) sum += v[i]; return sum / v.size(); } int cuFFT32(int N, Csingle* X, Csingle* FX, int B){ // Allocate unified momory for input and output int mem_size = N * B *sizeof(Csingle); Csingle *d_idata, *d_odata; checkCudaErrors(cudaMalloc((void **) &d_idata, mem_size)); checkCudaErrors(cudaMalloc((void **) &d_odata, mem_size)); // Copy input data to memory checkCudaErrors(cudaMemcpy(d_idata, X, mem_size, cudaMemcpyHostToDevice)); // cuFFT plan cufftResult result; cufftHandle plan; size_t workSize; long long int input_size_long = N; result = cufftCreate(&plan); if (result != CUFFT_SUCCESS) { fprintf(stderr, "In cuFFT32: cufftCreate plan returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \ CUDA_C_32F, NULL, 1, 1, CUDA_C_32F, B, \ &workSize, CUDA_C_32F); if (result != CUFFT_SUCCESS) { printf("In cuFFT32: cufftXtMakePlanMany returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } // cuFFT execution result = cufftXtExec(plan, reinterpret_cast<cufftComplex *>(d_idata), \ reinterpret_cast<cufftComplex *>(d_odata), \ CUFFT_FORWARD); if (result != CUFFT_SUCCESS) { printf("In cuFFT32: cufftExecC2C (execution) returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } // Copy Device memory to output checkCudaErrors(cudaMemcpy(FX, d_odata, mem_size, cudaMemcpyDeviceToHost)); // Clean up content and memory cufftDestroy(plan); checkCudaErrors(cudaFree(d_idata)); checkCudaErrors(cudaFree(d_odata)); return 0; } int cuFFT16(int N, Chalf* X, Chalf* FX, int B){ // Allocate unified momory for input and output int mem_size = N * B *sizeof(Chalf); Chalf *d_idata, *d_odata; checkCudaErrors(cudaMalloc((void **) &d_idata, mem_size)); checkCudaErrors(cudaMalloc((void **) &d_odata, mem_size)); // Copy input data to memory checkCudaErrors(cudaMemcpy(d_idata, X, mem_size, cudaMemcpyHostToDevice)); // cuFFT plan cufftResult result; cufftHandle plan; size_t workSize; long long int input_size_long = N; result = cufftCreate(&plan); if (result != CUFFT_SUCCESS) { printf("cufftCreate (plan) returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } result = cufftXtMakePlanMany(plan, 1, &input_size_long, NULL, 1, 1, \ CUDA_C_16F, NULL, 1, 1, CUDA_C_16F, B, \ &workSize, CUDA_C_16F); if (result != CUFFT_SUCCESS) { printf("cufftXtMakePlanMany (plan) returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } // cuFFT execution result = cufftXtExec(plan, reinterpret_cast<cufftComplex *>(d_idata), \ reinterpret_cast<cufftComplex *>(d_odata), \ CUFFT_FORWARD); if (result != CUFFT_SUCCESS) { printf("cufftExecC2C (execution) returned error code %d, line(%d)\n", result, __LINE__); exit(EXIT_FAILURE); } // Copy Device memory to output checkCudaErrors(cudaMemcpy(FX, d_odata, mem_size, cudaMemcpyDeviceToHost)); // Clean up content and memory cufftDestroy(plan); checkCudaErrors(cudaFree(d_idata)); checkCudaErrors(cudaFree(d_odata)); return 0; } int get_parameters(int argc, char **argv, int& help_info, float& norm, int& n, int& batch, int& iter, int& display, int& device){ if (checkCmdLineFlag(argc, (const char **)argv, "help") || checkCmdLineFlag(argc, (const char **)argv, "?") || checkCmdLineFlag(argc, (const char **)argv, "h")) { printf("Usage: -norm=upper_bound (Max norm of input elements)\n" " -n=size (Input vector size)\n" " -batch=batch_size (Number of input vectors)\n" " -iter=iteration (Times of experiments)\n" " -display=show_result (0 or 1) \n" " -device=ID (ID >= 0 for deviceID)\n"); help_info = 1; return 0; } // Get and set parameter if (checkCmdLineFlag(argc, (const char **)argv, "norm")) { norm = getCmdLineArgumentFloat(argc, (const char **)argv, "norm"); } if (checkCmdLineFlag(argc, (const char **)argv, "n")) { n = getCmdLineArgumentInt(argc, (const char **)argv, "n"); } if (checkCmdLineFlag(argc, (const char **)argv, "batch")) { batch = getCmdLineArgumentInt(argc, (const char **)argv, "batch"); } if (checkCmdLineFlag(argc, (const char **)argv, "iter")) { iter = getCmdLineArgumentInt(argc, (const char **)argv, "iter"); } if (checkCmdLineFlag(argc, (const char **)argv, "display")) { display = getCmdLineArgumentInt(argc, (const char **)argv, "display"); } if (checkCmdLineFlag(argc, (const char **)argv, "device")) { device = getCmdLineArgumentInt(argc, (const char **)argv, "device"); cudaSetDevice(device); } cudaError_t error; cudaDeviceProp deviceProp; error = cudaGetDevice(&device); if (error != cudaSuccess) { printf("cudaGetDevice returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } error = cudaGetDeviceProperties(&deviceProp, device); if (deviceProp.computeMode == cudaComputeModeProhibited) { fprintf(stderr, "Error: device is running in <Compute Mode Prohibited>, no threads can use ::cudaSetDevice().\n"); exit(EXIT_SUCCESS); } if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n", device, deviceProp.name, deviceProp.major, deviceProp.minor); } return 0; } int main(int argc, char **argv) { int help_info = 0; float norm = NORM; int n = SIZE; int batch = BATCH; int iter = ITERATION; int display = DISPLAY_DATA; int device = DEVICE; get_parameters(argc, argv, help_info, norm, n, batch, iter, display, device); if (help_info == 1){ exit(EXIT_SUCCESS); } // Start program printf("Problem size = %d, batch size = %d, norm = %f, iteration = %d\n", n, batch, norm, iter); printf("[Testing of gfft and cuFFT] - Starting...\n"); // Define error, event, result data structure cudaEvent_t start, stop; std::vector<float> cuFFT32Run, cuFFT16Run, gfft32Run, gfftRun; std::vector<float> cuFFT16Error, gfft32Error, gfftError; float duration, error1, error2, error3; // Define and zero initialize input and output float* X_re = new float[n * batch](); float* X_im = new float[n * batch](); float* FX_re = new float[n * batch](); float* FX_im = new float [n * batch](); float* FX_re_32 = new float[n * batch](); float* FX_im_32 = new float [n * batch](); Csingle* X_32 = new Csingle[n * batch](); Csingle* FX_32 = new Csingle[n * batch](); Chalf* X_16 = new Chalf[n * batch](); Chalf* FX_16 = new Chalf[n * batch](); // Warm up cuFFT32(n, X_32, FX_32, batch); cuFFT16(n, X_16, FX_16, batch); gfft_32(n, X_re, X_im, FX_re_32, FX_im_32, batch); gfft(n, X_re, X_im, FX_re, FX_im, batch); // Run experiment for (int i = 0; i < iter; i++){ // Initialize input srand(time(NULL)); for (int j = 0; j < n * batch; j++){ X_re[j] = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm; X_im[j] = (float)rand() / (float)(RAND_MAX) * 2 * norm - norm; X_32[j].x = X_re[j]; X_32[j].y = X_im[j]; X_16[j].x = (half)X_re[j]; X_16[j].y = (half)X_im[j]; if (display == 1){ printf("X[%d] = (%.10f, %.10f) \n", j, X_re[j], X_im[j]); } } // Call cuFFT32 __START__ cuFFT32(n, X_32, FX_32, batch); __STOP__(cuFFT32Run) // Call cuFFT16 __START__ cuFFT16(n, X_16, FX_16, batch); __STOP__(cuFFT16Run) // Call gfft32 __START__ gfft_32(n, X_re, X_im, FX_re_32, FX_im_32, batch); __STOP__(gfft32Run) // Call gfft __START__ gfft(n, X_re, X_im, FX_re, FX_im, batch); __STOP__(gfftRun) // Calculate error for (int j = 0; j < n * batch; j++){ error1 += (float)fabs((float)(FX_16[j].x) - FX_32[j].x); error1 += (float)fabs((float)(FX_16[j].y) - FX_32[j].y); error2 += (float)fabs(FX_re[j] - FX_32[j].x); error2 += (float)fabs(FX_im[j] - FX_32[j].y); error3 += (float)fabs(FX_re_32[j] - FX_32[j].x); error3 += (float)fabs(FX_im_32[j] - FX_32[j].y); } cuFFT16Error.push_back(error1 / (n * batch)); gfftError.push_back(error2 / (n * batch)); gfft32Error.push_back(error3 / (n * batch)); } // Print experiment result printf("Time of cuFFT32: %f milliseconds\n", show_mean(cuFFT32Run)); printf("Time of cuFFT16: %f milliseconds, error = %.10f\n", show_mean(cuFFT16Run), show_mean(cuFFT16Error)/norm); printf("Time of gfft32: %f milliseconds, error = %.10f\n", show_mean(gfft32Run), show_mean(gfft32Error)/norm); printf("Time of gfft: %f milliseconds, error = %.10f\n", show_mean(gfftRun), show_mean(gfftError)/norm); // Free input and output memory delete [] X_re; delete [] X_im; delete [] FX_re; delete [] FX_im; delete [] FX_re_32; delete [] FX_im_32; delete [] X_32; delete [] FX_32; delete [] X_16; delete [] FX_16; exit(0); }
675a5b6e992550b19a527dd6597ec0a6d226d517.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2020 HOOMD-TF Developers #include "TFArrayComm.cuh" /*! \file TFArrayComm.cu \brief CUDA kernels and functions for TFArrayComm */ extern "C" __global__ void htf_gpu_unstuff4_kerenl(Scalar4 *array, unsigned int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < N) array[i].w = static_cast<Scalar> (__scalar_as_int(array[i].w)); } hipError_t htf_gpu_unstuff4(Scalar4 *array, unsigned int m_N, hipStream_t s) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (int)ceil((double)m_N / (double)block_size), 1, 1); dim3 threads(block_size, 1, 1); // run the kernel hipLaunchKernelGGL(( htf_gpu_unstuff4_kerenl), dim3(grid), dim3(threads), 0, s , array, m_N); // this method always succeds. return hipSuccess; }
675a5b6e992550b19a527dd6597ec0a6d226d517.cu
// Copyright (c) 2020 HOOMD-TF Developers #include "TFArrayComm.cuh" /*! \file TFArrayComm.cu \brief CUDA kernels and functions for TFArrayComm */ extern "C" __global__ void htf_gpu_unstuff4_kerenl(Scalar4 *array, unsigned int N) { int i = blockIdx.x * blockDim.x + threadIdx.x; if ( i < N) array[i].w = static_cast<Scalar> (__scalar_as_int(array[i].w)); } cudaError_t htf_gpu_unstuff4(Scalar4 *array, unsigned int m_N, cudaStream_t s) { // setup the grid to run the kernel int block_size = 256; dim3 grid( (int)ceil((double)m_N / (double)block_size), 1, 1); dim3 threads(block_size, 1, 1); // run the kernel htf_gpu_unstuff4_kerenl<<< grid, threads, 0, s >>>(array, m_N); // this method always succeds. return cudaSuccess; }
fa1f91effcc6134415414cc93301a505193056d5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef USE_OPENCV #include <opencv2/core/core.hpp> #endif // USE_OPENCV #include <device_launch_parameters.h> #include <hip/hip_fp16.h> #include "caffe/util/gpu_math_functions.cuh" #include "caffe/data_transformer.hpp" #include "caffe/util/io.hpp" namespace caffe { template <typename Dtype> __global__ void transform_kernel(int N, int C, int H, int W, // original size int Hc, int Wc, // cropped size bool param_mirror, int datum_height, int datum_width, // offsets int crop_size, Phase phase, size_t sizeof_element, const Dtype *in, Dtype *out, // buffers float scale, int has_mean_file, int has_mean_values, float *mean, const unsigned int *random_numbers) { const int c = blockIdx.y; // loop over images for (int n = blockIdx.x; n < N; n += gridDim.x) { // get mirror and offsets unsigned int rand1 = random_numbers[n*3 ]; unsigned int rand2 = random_numbers[n*3 + 1]; unsigned int rand3 = random_numbers[n*3 + 2]; bool mirror = param_mirror && (rand1 % 2); int h_off = 0, w_off = 0; if (crop_size) { if (phase == TRAIN) { h_off = rand2 % (datum_height - crop_size + 1); w_off = rand3 % (datum_width - crop_size + 1); } else { h_off = (datum_height - crop_size) / 2; w_off = (datum_width - crop_size) / 2; } } const uint8_t *in_ptri; const float *in_ptrf; // offsets into start of (image, channel) = (n, c) // channel is handled by blockIdx.y // Initial offset per Dtype: const Dtype *in_ptr = &in[n*C*H*W]; // Element-specific offset to a channel c if (sizeof_element == sizeof(uint8_t)) { in_ptri = reinterpret_cast<const uint8_t*>(in_ptr); in_ptri += c*H*W; } else if (sizeof_element == sizeof(float)) { in_ptrf = reinterpret_cast<const float*>(in_ptr); in_ptrf += c*H*W; } else { in_ptr += c*H*W; } Dtype *out_ptr = &out[n*C*Hc*Wc + c*Hc*Wc]; Dtype element; // loop over pixels using threads for (int h = threadIdx.y; h < Hc; h += blockDim.y) { for (int w = threadIdx.x; w < Wc; w += blockDim.x) { // get the indices for in, out buffers int in_idx = (h_off + h) * W + w_off + w; int out_idx = mirror ? h * Wc + (Wc - 1 - w) : h * Wc + w; if (sizeof_element == sizeof(uint8_t)) { element = in_ptri[in_idx]; } else if (sizeof_element == sizeof(float)) { element = in_ptrf[in_idx]; } else { element = in_ptr[in_idx]; } // perform the transform if (has_mean_file) { out_ptr[out_idx] = (element - mean[c*H*W + in_idx]) * scale; } else { if (has_mean_values) { out_ptr[out_idx] = (element - mean[c]) * scale; } else { out_ptr[out_idx] = element * scale; } } } } } } template <> __global__ void transform_kernel<__half>(int N, int C, int H, int W, // original size int Hc, int Wc, // cropped size bool param_mirror, int datum_height, int datum_width, // offsets int crop_size, Phase phase, size_t sizeof_element, const __half* in, __half* out, // buffers float scale, int has_mean_file, int has_mean_values, float* mean, const unsigned int *random_numbers) { const int c = blockIdx.y; // loop over images for (int n = blockIdx.x; n < N; n += gridDim.x) { // get mirror and offsets unsigned int rand1 = random_numbers[n*3 ]; unsigned int rand2 = random_numbers[n*3 + 1]; unsigned int rand3 = random_numbers[n*3 + 2]; bool mirror = param_mirror && (rand1 % 2); int h_off = 0, w_off = 0; if (crop_size) { if (phase == TRAIN) { h_off = rand2 % (datum_height - crop_size + 1); w_off = rand3 % (datum_width - crop_size + 1); } else { h_off = (datum_height - crop_size) / 2; w_off = (datum_width - crop_size) / 2; } } const uint8_t *in_ptri; const float *in_ptrf; // offsets into start of (image, channel) = (n, c) // channel is handled by blockIdx.y // Initial offset per Dtype: const __half *in_ptr = &in[n*C*H*W]; // Element-specific offset to a channel c if (sizeof_element == sizeof(uint8_t)) { in_ptri = reinterpret_cast<const uint8_t*>(in_ptr); in_ptri += c*H*W; } else if (sizeof_element == sizeof(float)) { in_ptrf = reinterpret_cast<const float*>(in_ptr); in_ptrf += c*H*W; } else { in_ptr += c*H*W; } __half* out_ptr = &out[n*C*Hc*Wc + c*Hc*Wc]; float element; // loop over pixels using threads for (int h = threadIdx.y; h < Hc; h += blockDim.y) { for (int w = threadIdx.x; w < Wc; w += blockDim.x) { // get the indices for in, out buffers int in_idx = (h_off + h) * W + w_off + w; int out_idx = mirror ? h * Wc + (Wc - 1 - w) : h * Wc + w; if (sizeof_element == sizeof(uint8_t)) { element = in_ptri[in_idx]; } else if (sizeof_element == sizeof(float)) { element = in_ptrf[in_idx]; } else { element = __half2float(in_ptr[in_idx]); } // perform the transform if (has_mean_file) { out_ptr[out_idx] = float2half_clip((element - mean[c*H*W + in_idx]) * scale); } else { if (has_mean_values) { out_ptr[out_idx] = float2half_clip((element - mean[c]) * scale); } else { out_ptr[out_idx] = float2half_clip(element * scale); } } } } } } template <typename Dtype> void DataTransformer<Dtype>::TransformGPU(int N, int C, int H, int W, size_t sizeof_element, const Dtype *in, Dtype *out, const unsigned int *random_numbers) { const int datum_channels = C; const int datum_height = H; const int datum_width = W; const int crop_size = param_.crop_size(); float scale = param_.scale(); const bool mirror = param_.mirror(); const bool has_mean_file = param_.has_mean_file(); const bool has_mean_values = mean_values_.size() > 0; CHECK_GT(datum_channels, 0); CHECK_GE(datum_height, crop_size); CHECK_GE(datum_width, crop_size); float* mean = nullptr; if (has_mean_file) { CHECK_EQ(datum_channels, data_mean_.channels()); // no need to check equality anymore // datum_{height, width} are _output_ not input mean = data_mean_.mutable_gpu_data(); } if (has_mean_values) { if (mean_values_gpu_.empty()) { CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) << "Specify either 1 mean_value or as many as channels: " << datum_channels; if (datum_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < datum_channels; ++c) { mean_values_.push_back(mean_values_[0]); } } mean_values_gpu_.reserve(sizeof(float) * mean_values_.size()); caffe_copy(static_cast<int>(mean_values_.size()), &mean_values_.front(), reinterpret_cast<float*>(mean_values_gpu_.data())); } mean = reinterpret_cast<float*>(mean_values_gpu_.data()); } int height = datum_height; int width = datum_width; if (crop_size) { height = crop_size; width = crop_size; } dim3 grid(N, C); dim3 block(16, 16); hipStream_t stream = Caffe::th_stream_aux(Caffe::STREAM_ID_TRANSFORMER); hipLaunchKernelGGL(( transform_kernel<Dtype>) , dim3(grid), dim3(block), 0, stream , N, C, H, W, height, width, param_.mirror(), datum_height, datum_width, crop_size, phase_, sizeof_element, in, out, scale, static_cast<int>(has_mean_file), static_cast<int>(has_mean_values), mean, random_numbers); CUDA_POST_KERNEL_CHECK; CUDA_CHECK(hipStreamSynchronize(stream)); } template void DataTransformer<float>::TransformGPU(int, int, int, int, size_t, const float*, float*, const unsigned int*); template void DataTransformer<double>::TransformGPU(int, int, int, int, size_t, const double*, double*, const unsigned int*); template void DataTransformer<float16>::TransformGPU(int, int, int, int, size_t, const float16*, float16*, const unsigned int*); } // namespace caffe
fa1f91effcc6134415414cc93301a505193056d5.cu
#ifdef USE_OPENCV #include <opencv2/core/core.hpp> #endif // USE_OPENCV #include <device_launch_parameters.h> #include <cuda_fp16.h> #include "caffe/util/gpu_math_functions.cuh" #include "caffe/data_transformer.hpp" #include "caffe/util/io.hpp" namespace caffe { template <typename Dtype> __global__ void transform_kernel(int N, int C, int H, int W, // original size int Hc, int Wc, // cropped size bool param_mirror, int datum_height, int datum_width, // offsets int crop_size, Phase phase, size_t sizeof_element, const Dtype *in, Dtype *out, // buffers float scale, int has_mean_file, int has_mean_values, float *mean, const unsigned int *random_numbers) { const int c = blockIdx.y; // loop over images for (int n = blockIdx.x; n < N; n += gridDim.x) { // get mirror and offsets unsigned int rand1 = random_numbers[n*3 ]; unsigned int rand2 = random_numbers[n*3 + 1]; unsigned int rand3 = random_numbers[n*3 + 2]; bool mirror = param_mirror && (rand1 % 2); int h_off = 0, w_off = 0; if (crop_size) { if (phase == TRAIN) { h_off = rand2 % (datum_height - crop_size + 1); w_off = rand3 % (datum_width - crop_size + 1); } else { h_off = (datum_height - crop_size) / 2; w_off = (datum_width - crop_size) / 2; } } const uint8_t *in_ptri; const float *in_ptrf; // offsets into start of (image, channel) = (n, c) // channel is handled by blockIdx.y // Initial offset per Dtype: const Dtype *in_ptr = &in[n*C*H*W]; // Element-specific offset to a channel c if (sizeof_element == sizeof(uint8_t)) { in_ptri = reinterpret_cast<const uint8_t*>(in_ptr); in_ptri += c*H*W; } else if (sizeof_element == sizeof(float)) { in_ptrf = reinterpret_cast<const float*>(in_ptr); in_ptrf += c*H*W; } else { in_ptr += c*H*W; } Dtype *out_ptr = &out[n*C*Hc*Wc + c*Hc*Wc]; Dtype element; // loop over pixels using threads for (int h = threadIdx.y; h < Hc; h += blockDim.y) { for (int w = threadIdx.x; w < Wc; w += blockDim.x) { // get the indices for in, out buffers int in_idx = (h_off + h) * W + w_off + w; int out_idx = mirror ? h * Wc + (Wc - 1 - w) : h * Wc + w; if (sizeof_element == sizeof(uint8_t)) { element = in_ptri[in_idx]; } else if (sizeof_element == sizeof(float)) { element = in_ptrf[in_idx]; } else { element = in_ptr[in_idx]; } // perform the transform if (has_mean_file) { out_ptr[out_idx] = (element - mean[c*H*W + in_idx]) * scale; } else { if (has_mean_values) { out_ptr[out_idx] = (element - mean[c]) * scale; } else { out_ptr[out_idx] = element * scale; } } } } } } template <> __global__ void transform_kernel<__half>(int N, int C, int H, int W, // original size int Hc, int Wc, // cropped size bool param_mirror, int datum_height, int datum_width, // offsets int crop_size, Phase phase, size_t sizeof_element, const __half* in, __half* out, // buffers float scale, int has_mean_file, int has_mean_values, float* mean, const unsigned int *random_numbers) { const int c = blockIdx.y; // loop over images for (int n = blockIdx.x; n < N; n += gridDim.x) { // get mirror and offsets unsigned int rand1 = random_numbers[n*3 ]; unsigned int rand2 = random_numbers[n*3 + 1]; unsigned int rand3 = random_numbers[n*3 + 2]; bool mirror = param_mirror && (rand1 % 2); int h_off = 0, w_off = 0; if (crop_size) { if (phase == TRAIN) { h_off = rand2 % (datum_height - crop_size + 1); w_off = rand3 % (datum_width - crop_size + 1); } else { h_off = (datum_height - crop_size) / 2; w_off = (datum_width - crop_size) / 2; } } const uint8_t *in_ptri; const float *in_ptrf; // offsets into start of (image, channel) = (n, c) // channel is handled by blockIdx.y // Initial offset per Dtype: const __half *in_ptr = &in[n*C*H*W]; // Element-specific offset to a channel c if (sizeof_element == sizeof(uint8_t)) { in_ptri = reinterpret_cast<const uint8_t*>(in_ptr); in_ptri += c*H*W; } else if (sizeof_element == sizeof(float)) { in_ptrf = reinterpret_cast<const float*>(in_ptr); in_ptrf += c*H*W; } else { in_ptr += c*H*W; } __half* out_ptr = &out[n*C*Hc*Wc + c*Hc*Wc]; float element; // loop over pixels using threads for (int h = threadIdx.y; h < Hc; h += blockDim.y) { for (int w = threadIdx.x; w < Wc; w += blockDim.x) { // get the indices for in, out buffers int in_idx = (h_off + h) * W + w_off + w; int out_idx = mirror ? h * Wc + (Wc - 1 - w) : h * Wc + w; if (sizeof_element == sizeof(uint8_t)) { element = in_ptri[in_idx]; } else if (sizeof_element == sizeof(float)) { element = in_ptrf[in_idx]; } else { element = __half2float(in_ptr[in_idx]); } // perform the transform if (has_mean_file) { out_ptr[out_idx] = float2half_clip((element - mean[c*H*W + in_idx]) * scale); } else { if (has_mean_values) { out_ptr[out_idx] = float2half_clip((element - mean[c]) * scale); } else { out_ptr[out_idx] = float2half_clip(element * scale); } } } } } } template <typename Dtype> void DataTransformer<Dtype>::TransformGPU(int N, int C, int H, int W, size_t sizeof_element, const Dtype *in, Dtype *out, const unsigned int *random_numbers) { const int datum_channels = C; const int datum_height = H; const int datum_width = W; const int crop_size = param_.crop_size(); float scale = param_.scale(); const bool mirror = param_.mirror(); const bool has_mean_file = param_.has_mean_file(); const bool has_mean_values = mean_values_.size() > 0; CHECK_GT(datum_channels, 0); CHECK_GE(datum_height, crop_size); CHECK_GE(datum_width, crop_size); float* mean = nullptr; if (has_mean_file) { CHECK_EQ(datum_channels, data_mean_.channels()); // no need to check equality anymore // datum_{height, width} are _output_ not input mean = data_mean_.mutable_gpu_data(); } if (has_mean_values) { if (mean_values_gpu_.empty()) { CHECK(mean_values_.size() == 1 || mean_values_.size() == datum_channels) << "Specify either 1 mean_value or as many as channels: " << datum_channels; if (datum_channels > 1 && mean_values_.size() == 1) { // Replicate the mean_value for simplicity for (int c = 1; c < datum_channels; ++c) { mean_values_.push_back(mean_values_[0]); } } mean_values_gpu_.reserve(sizeof(float) * mean_values_.size()); caffe_copy(static_cast<int>(mean_values_.size()), &mean_values_.front(), reinterpret_cast<float*>(mean_values_gpu_.data())); } mean = reinterpret_cast<float*>(mean_values_gpu_.data()); } int height = datum_height; int width = datum_width; if (crop_size) { height = crop_size; width = crop_size; } dim3 grid(N, C); dim3 block(16, 16); cudaStream_t stream = Caffe::th_stream_aux(Caffe::STREAM_ID_TRANSFORMER); transform_kernel<Dtype> <<< grid, block, 0, stream >>>(N, C, H, W, height, width, param_.mirror(), datum_height, datum_width, crop_size, phase_, sizeof_element, in, out, scale, static_cast<int>(has_mean_file), static_cast<int>(has_mean_values), mean, random_numbers); CUDA_POST_KERNEL_CHECK; CUDA_CHECK(cudaStreamSynchronize(stream)); } template void DataTransformer<float>::TransformGPU(int, int, int, int, size_t, const float*, float*, const unsigned int*); template void DataTransformer<double>::TransformGPU(int, int, int, int, size_t, const double*, double*, const unsigned int*); template void DataTransformer<float16>::TransformGPU(int, int, int, int, size_t, const float16*, float16*, const unsigned int*); } // namespace caffe
7cb562755a5e6a4d1a051c86cccfcdf8d7629692.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cmath> #include <cstdio> #include "../include/slic.h" __device__ __constant__ float slic_factor; void initializeSlicFactor() { const float * slic_factor_hp = &slic_factor_h; hipError_t cudaStatus = hipMemcpyToSymbol(slic_factor, slic_factor_hp, sizeof(float)); } __global__ void k_measure(int* d_device_location, int target) { int accum = threadIdx.x; for (int i=1; i<100; i++) for (int j=1; j<1000; j++) { accum *= j; accum = accum ^ (threadIdx.y << j / 100); accum += target; } if (accum == target) *d_device_location = 0; } __global__ void k_cumulativeCountOrig(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data) { //if (threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0) //{ //printf("k\n"); //} int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < pix_height && x < pix_width) { int pix_index = y * pix_width + x; int i = d_own_data[pix_index].i; int j = d_own_data[pix_index].j; int spx_index = j * spx_width + i; atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[0]), d_pix_data[pix_index].l); atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[1]), d_pix_data[pix_index].a); atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[2]), d_pix_data[pix_index].b); atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[3]), 1); atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[4]), x); atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[5]), y); } } #define dimensions_x 128 #define dimensions_y 1 #define dimensions (dimensions_x * dimensions_y) #define log2_dimensions_x 7 #define log2_dimensions_y 0 #define log2_dimensions (log2_dimensions_x + log2_dimensions_y) #define log2_pix_at_a_time 7 #define sums 54 #define log2_pix_width 12 #define const_pix_width 4096 #define log2_spx_size 7 #define log2_spx_width 5 __global__ void k_cumulativeCountOpt1(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data) { //bool debug = (blockIdx.x == 20 && blockIdx.y == 30 && threadIdx.x == 5); //if (debug) printf("D\n"); typedef int itemsToSum[dimensions]; __shared__ itemsToSum acc[6][3][3]; //LAB+count, 3x3 neighbors, 128 values int x = (blockIdx.x << log2_dimensions_x) + threadIdx.x; int y = ((blockIdx.y << log2_dimensions_y) + threadIdx.y) << log2_pix_at_a_time; int sx = (threadIdx.y << log2_dimensions_x) + threadIdx.x; //thread id // Initialize SMEM to 0 int* accptr = (int*)acc; itemsToSum* sumptr = (itemsToSum*)acc; #pragma unroll for (int i=0; i<sums; ++i) sumptr[i][sx] = 0; accptr = (int*)acc; int i_center = blockIdx.x; // OPT14: * blockDim.x / spx_size; //int j_center = blockIdx.y; // OPT14: y / spx_size; //int j_center = y >> log2_spx_size; int j_center = y / spx_size; int pix_index = (y << log2_pix_width) + x; for (int yidx=0; yidx<pix_at_a_time; ++yidx) { int odata = *((int*)(d_own_data + pix_index)); own_data od = *((own_data*)(&odata)); int i = od.i; int j = od.j; int nx = (i<i_center) ? 0 : ((i>i_center) ? 2 : 1); int ny = (j<j_center) ? 0 : ((j>j_center) ? 2 : 1); int pdata = *((int*)(d_pix_data + pix_index)); pix_data pd = *((pix_data*)(&pdata)); int ayidx=1; acc[0][ny][nx][sx] = (int)pd.l + (ayidx?(acc[0][ny][nx][sx]):0); acc[1][ny][nx][sx] = (int)pd.a + (ayidx?(acc[1][ny][nx][sx]):0); acc[2][ny][nx][sx] = (int)pd.b + (ayidx?(acc[2][ny][nx][sx]):0); acc[3][ny][nx][sx] = (int)1 + (ayidx?(acc[3][ny][nx][sx]):0); acc[4][ny][nx][sx] = (int)x + (ayidx?(acc[4][ny][nx][sx]):0); acc[5][ny][nx][sx] = (int)(y+yidx) + (ayidx?(acc[5][ny][nx][sx]):0); //if (debug) //printf("yidx:%d ny:%d nx:%d accX:%d, accY:%d\n", yidx, ny, nx, acc[4][ny][nx][sx], acc[5][ny][nx][sx]); pix_index += const_pix_width; } __syncthreads(); // Collapse over X and Y for (int log2_step=log2_dimensions-1; log2_step>=0; --log2_step) { int step = 1 << log2_step; int locationIndex = sx % step; int threadGroup = sx >> log2_step; int maxThreadGroup = 1 << (log2_dimensions - log2_step); int maxLoopIndex = (sums + maxThreadGroup - 1) / maxThreadGroup; // Divide arraySize (3*3*6=54) by max threadGroup + 1 and that's the loop // Actual a = loop index * (max threadGroup + 1) + innerIndex // It looks like a lot of unnecessary math (multiplications, etc) is going // on below, but all attempts to optimize this lead to slowdowns. Looks like the // compiler is doing something smart here. for (int loopIndex=0; loopIndex<maxLoopIndex; loopIndex++) { int innerIndex = loopIndex * maxThreadGroup + threadGroup; if (innerIndex >= sums) continue; *(accptr + ((innerIndex<<log2_dimensions) + locationIndex)) += *(accptr + ((innerIndex<<log2_dimensions) + locationIndex + step)); } __syncthreads(); } if (sx >= sums) return; int c = sx % 6; sx /= 6; int nx = sx % 3; int ny = sx / 3; int j = j_center + ny - 1; if (j<0 || j>=spx_height) return; int i = i_center + nx - 1; if (i<0 || i>=spx_width) return; int spx_index = (j << log2_spx_width) + i; int* accum = (int*)(d_spx_data[spx_index].accum); //accum[sx*6 + c] = (int)acc[c][ny][nx][0]; atomicAdd(accum+c,(int)acc[c][ny][nx][0]); } __global__ void k_averaging(spx_data* d_spx_data) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; //bool debug = (i==20 && (j==15 || j==16)); //bool debug = true; if (i < spx_width && j < spx_height) { int spx_index = j * spx_width + i; int num = 0, l = 0, a = 0, b = 0, x = 0, y = 0; //for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx) //{ l += d_spx_data[spx_index].accum/*[ny][nx]*/[0]; a += d_spx_data[spx_index].accum/*[ny][nx]*/[1]; b += d_spx_data[spx_index].accum/*[ny][nx]*/[2]; num += d_spx_data[spx_index].accum/*[ny][nx]*/[3]; x += d_spx_data[spx_index].accum/*[ny][nx]*/[4]; y += d_spx_data[spx_index].accum/*[ny][nx]*/[5]; //} //if (debug) printf("i:%d j:%d l:%d a:%d b:%d num:%d x:%d y:%d\n", //i,j,l/num,a/num,b/num,num,x/num,y/num); d_spx_data[spx_index].l = l / num; d_spx_data[spx_index].a = a / num; d_spx_data[spx_index].b = b / num; d_spx_data[spx_index].x = x / num; d_spx_data[spx_index].y = y / num; } } __global__ void k_ownershipOpt(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data) { return; // Does not work after opt14, used to be 9*32 __shared__ spx_data spx[1 * 1]; float min_dist = 10E99;// max_float; int min_i = 0; int min_j = 0; int i_sign[9] = {-1, -1, -1, 0, 0, 0, 1, 1, 1}; int j_sign[9] = {-1, 0, 1, -1, 0, 1, -1, 0, 1}; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < pix_height && x < pix_width) { int pix_index = y * pix_width + x; int i_center = x/spx_size; int j_center = y/spx_size; int l = d_pix_data[pix_index].l; int a = d_pix_data[pix_index].a; int b = d_pix_data[pix_index].b; if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x % 3 == 0)// && threadIdx.y == 0) { int sh_idx = 0; for (int i = i_center - window_size; i <= i_center + window_size; i++) // i = i_center - 1, i_center, i_center + 1 { for(int j = j_center - window_size; j <= j_center + window_size; j++) // j = j_center - 1, j_center, j_center + 1 { if (j < 0 || j >= spx_height || i < 0 || i > spx_width) { sh_idx++; continue; } int spx_index = j * spx_width + i; // if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0) // printf("%i ::::: %i\n", spx_index, sh_idx); spx[sh_idx + 8*blockIdx.x] = d_spx_data[spx_index]; if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2 || sh_idx == 3 || sh_idx == 4 || sh_idx == 5)) //Why blockIdx.x-1 > 0 crashes? spx[sh_idx+3 + 8*(blockIdx.x-1)] = spx[sh_idx + 8*blockIdx.x]; if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2)) //Why blockIdx.x-1 > 0 crashes? spx[sh_idx+6 + 8*(blockIdx.x-2)] = spx[sh_idx + 8*blockIdx.x]; if(blockIdx.x < blockDim.x && (sh_idx == 3 || sh_idx == 4 || sh_idx == 5 || sh_idx == 6 || sh_idx == 7 || sh_idx == 8)) spx[sh_idx-3 + 8*(blockIdx.x+1)] = spx[sh_idx + 8*blockIdx.x]; if(blockIdx.x < blockDim.x && (sh_idx == 6 || sh_idx == 7 || sh_idx == 8)) spx[sh_idx-6 + 8*(blockIdx.x+2)] = spx[sh_idx + 8*blockIdx.x]; sh_idx++; } } } __syncthreads(); for(int i=0; i<9; i++) { int l_dist = l-(int)(spx[i + 8*blockIdx.x].l); l_dist *= l_dist; int a_dist = a-(int)(spx[i + 8*blockIdx.x].a); a_dist *= a_dist; int b_dist = b-(int)(spx[i + 8*blockIdx.x].b); b_dist *= b_dist; int dlab = l_dist + a_dist + b_dist; int x_dist = x-(int)spx[i + 8*blockIdx.x].x; x_dist *= x_dist; int y_dist = y-(int)spx[i + 8*blockIdx.x].y; y_dist *= y_dist; int dxy = x_dist + y_dist; float D = dlab + slic_factor * dxy; if (D < min_dist) { min_dist = D; min_i = i_center + i_sign[i]*window_size; min_j = j_center + j_sign[i]*window_size; } } d_own_data[pix_index].i = min_i; d_own_data[pix_index].j = min_j; } } __global__ void k_ownershipOrig(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data) { float min_dist = 10E99;// max_float; int min_i = 0; int min_j = 0; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < pix_height && x < pix_width) { int pix_index = y * pix_width + x; int i_center = x/spx_size; int j_center = y/spx_size; int l = d_pix_data[pix_index].l; int a = d_pix_data[pix_index].a; int b = d_pix_data[pix_index].b; for (int i = i_center - window_size; i <= i_center + window_size; i++) { if (i < 0 || i >= spx_width) continue; for(int j = j_center - window_size; j <= j_center + window_size; j++) { if (j < 0 || j >= spx_height) continue; int spx_index = j * spx_width + i; int l_dist = l-(int)(d_spx_data[spx_index].l); l_dist *= l_dist; int a_dist = a-(int)(d_spx_data[spx_index].a); a_dist *= a_dist; int b_dist = b-(int)(d_spx_data[spx_index].b); b_dist *= b_dist; int dlab = l_dist + a_dist + b_dist; int x_dist = x-(int)d_spx_data[spx_index].x; x_dist *= x_dist; int y_dist = y-(int)d_spx_data[spx_index].y; y_dist *= y_dist; int dxy = x_dist + y_dist; float D = dlab + slic_factor * dxy; if (D < min_dist) { min_dist = D; min_i = i; min_j = j; } } } d_own_data[pix_index].i = min_i; d_own_data[pix_index].j = min_j; //d_own_data[pix_index].i = (i_center / 4) * 4; //d_own_data[pix_index].j = (j_center / 4) * 4; } } __global__ void k_ownershipOpt2(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data) { float min_dist = 10E99;// max_float; int min_i = 0; int min_j = 0; __shared__ int spx[3][3][5]; // Y, X, LABXY int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < pix_height && x < pix_width) { int pix_index = y * pix_width + x; int i_center = x/spx_size; int j_center = y/spx_size; // Initialize SMEM int tid = threadIdx.x + blockDim.x * threadIdx.y; int nx = tid % 3; tid /= 3; int ny = tid % 3; tid /= 3; if (tid < 5) { int value; int i = i_center + nx - 1; int j = j_center + ny - 1; if (i<0 || i>=spx_width || j<0 || j>=spx_height) { value = -1; } else { int spx_index = j * spx_width + i; const spx_data& spix = d_spx_data[spx_index]; switch(tid) //TODO:Get rid of it by using better data struct.? { case 0: value=spix.l; break; case 1: value=spix.a; break; case 2: value=spix.b; break; case 3: value=spix.x; break; case 4: value=spix.y; break; } } spx[ny][nx][tid] = value; } __syncthreads(); int l = d_pix_data[pix_index].l; int a = d_pix_data[pix_index].a; int b = d_pix_data[pix_index].b; for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx) { int* spix = spx[ny][nx]; if (spix[0]==-1) continue; int l_dist = l-spix[0]; l_dist *= l_dist; int a_dist = a-spix[1]; a_dist *= a_dist; int b_dist = b-spix[2]; b_dist *= b_dist; int dlab = l_dist + a_dist + b_dist; int x_dist = x-spix[3]; x_dist *= x_dist; int y_dist = y-spix[4]; y_dist *= y_dist; int dxy = x_dist + y_dist; float D = dlab + slic_factor * dxy; if (D < min_dist) { min_dist = D; min_i = i_center + nx - 1; min_j = j_center + ny - 1; } } d_own_data[pix_index].i = min_i; d_own_data[pix_index].j = min_j; //d_own_data[pix_index].i = (i_center / 4) * 4; //d_own_data[pix_index].j = (j_center / 4) * 4; } } __global__ void k_reset(spx_data* d_spx_data) { // Shared memory conflict test // Removing the "*64" below results in no bank conflicts, so adjacent threads // reading adjacent shorts do not cause conflicts. //__shared__ unsigned short arr[32 * 2 * 100]; //int a=arr[threadIdx.x * 64]; //d_spx_data[0].accum[0]=a; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < spx_width && j < spx_height) { int spx_index = j * spx_width + i; //for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx) { d_spx_data[spx_index].accum/*[ny][nx]*/[0] = 0; d_spx_data[spx_index].accum/*[ny][nx]*/[1] = 0; d_spx_data[spx_index].accum/*[ny][nx]*/[2] = 0; d_spx_data[spx_index].accum/*[ny][nx]*/[3] = 0; d_spx_data[spx_index].accum/*[ny][nx]*/[4] = 0; d_spx_data[spx_index].accum/*[ny][nx]*/[5] = 0; //} } }
7cb562755a5e6a4d1a051c86cccfcdf8d7629692.cu
#include <cmath> #include <cstdio> #include "../include/slic.h" __device__ __constant__ float slic_factor; void initializeSlicFactor() { const float * slic_factor_hp = &slic_factor_h; cudaError_t cudaStatus = cudaMemcpyToSymbol(slic_factor, slic_factor_hp, sizeof(float)); } __global__ void k_measure(int* d_device_location, int target) { int accum = threadIdx.x; for (int i=1; i<100; i++) for (int j=1; j<1000; j++) { accum *= j; accum = accum ^ (threadIdx.y << j / 100); accum += target; } if (accum == target) *d_device_location = 0; } __global__ void k_cumulativeCountOrig(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data) { //if (threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0) //{ //printf("k\n"); //} int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < pix_height && x < pix_width) { int pix_index = y * pix_width + x; int i = d_own_data[pix_index].i; int j = d_own_data[pix_index].j; int spx_index = j * spx_width + i; atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[0]), d_pix_data[pix_index].l); atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[1]), d_pix_data[pix_index].a); atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[2]), d_pix_data[pix_index].b); atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[3]), 1); atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[4]), x); atomicAdd(&(d_spx_data[spx_index].accum/*[0][0]*/[5]), y); } } #define dimensions_x 128 #define dimensions_y 1 #define dimensions (dimensions_x * dimensions_y) #define log2_dimensions_x 7 #define log2_dimensions_y 0 #define log2_dimensions (log2_dimensions_x + log2_dimensions_y) #define log2_pix_at_a_time 7 #define sums 54 #define log2_pix_width 12 #define const_pix_width 4096 #define log2_spx_size 7 #define log2_spx_width 5 __global__ void k_cumulativeCountOpt1(const pix_data* d_pix_data, const own_data* d_own_data, spx_data* d_spx_data) { //bool debug = (blockIdx.x == 20 && blockIdx.y == 30 && threadIdx.x == 5); //if (debug) printf("D\n"); typedef int itemsToSum[dimensions]; __shared__ itemsToSum acc[6][3][3]; //LAB+count, 3x3 neighbors, 128 values int x = (blockIdx.x << log2_dimensions_x) + threadIdx.x; int y = ((blockIdx.y << log2_dimensions_y) + threadIdx.y) << log2_pix_at_a_time; int sx = (threadIdx.y << log2_dimensions_x) + threadIdx.x; //thread id // Initialize SMEM to 0 int* accptr = (int*)acc; itemsToSum* sumptr = (itemsToSum*)acc; #pragma unroll for (int i=0; i<sums; ++i) sumptr[i][sx] = 0; accptr = (int*)acc; int i_center = blockIdx.x; // OPT14: * blockDim.x / spx_size; //int j_center = blockIdx.y; // OPT14: y / spx_size; //int j_center = y >> log2_spx_size; int j_center = y / spx_size; int pix_index = (y << log2_pix_width) + x; for (int yidx=0; yidx<pix_at_a_time; ++yidx) { int odata = *((int*)(d_own_data + pix_index)); own_data od = *((own_data*)(&odata)); int i = od.i; int j = od.j; int nx = (i<i_center) ? 0 : ((i>i_center) ? 2 : 1); int ny = (j<j_center) ? 0 : ((j>j_center) ? 2 : 1); int pdata = *((int*)(d_pix_data + pix_index)); pix_data pd = *((pix_data*)(&pdata)); int ayidx=1; acc[0][ny][nx][sx] = (int)pd.l + (ayidx?(acc[0][ny][nx][sx]):0); acc[1][ny][nx][sx] = (int)pd.a + (ayidx?(acc[1][ny][nx][sx]):0); acc[2][ny][nx][sx] = (int)pd.b + (ayidx?(acc[2][ny][nx][sx]):0); acc[3][ny][nx][sx] = (int)1 + (ayidx?(acc[3][ny][nx][sx]):0); acc[4][ny][nx][sx] = (int)x + (ayidx?(acc[4][ny][nx][sx]):0); acc[5][ny][nx][sx] = (int)(y+yidx) + (ayidx?(acc[5][ny][nx][sx]):0); //if (debug) //printf("yidx:%d ny:%d nx:%d accX:%d, accY:%d\n", yidx, ny, nx, acc[4][ny][nx][sx], acc[5][ny][nx][sx]); pix_index += const_pix_width; } __syncthreads(); // Collapse over X and Y for (int log2_step=log2_dimensions-1; log2_step>=0; --log2_step) { int step = 1 << log2_step; int locationIndex = sx % step; int threadGroup = sx >> log2_step; int maxThreadGroup = 1 << (log2_dimensions - log2_step); int maxLoopIndex = (sums + maxThreadGroup - 1) / maxThreadGroup; // Divide arraySize (3*3*6=54) by max threadGroup + 1 and that's the loop // Actual a = loop index * (max threadGroup + 1) + innerIndex // It looks like a lot of unnecessary math (multiplications, etc) is going // on below, but all attempts to optimize this lead to slowdowns. Looks like the // compiler is doing something smart here. for (int loopIndex=0; loopIndex<maxLoopIndex; loopIndex++) { int innerIndex = loopIndex * maxThreadGroup + threadGroup; if (innerIndex >= sums) continue; *(accptr + ((innerIndex<<log2_dimensions) + locationIndex)) += *(accptr + ((innerIndex<<log2_dimensions) + locationIndex + step)); } __syncthreads(); } if (sx >= sums) return; int c = sx % 6; sx /= 6; int nx = sx % 3; int ny = sx / 3; int j = j_center + ny - 1; if (j<0 || j>=spx_height) return; int i = i_center + nx - 1; if (i<0 || i>=spx_width) return; int spx_index = (j << log2_spx_width) + i; int* accum = (int*)(d_spx_data[spx_index].accum); //accum[sx*6 + c] = (int)acc[c][ny][nx][0]; atomicAdd(accum+c,(int)acc[c][ny][nx][0]); } __global__ void k_averaging(spx_data* d_spx_data) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; //bool debug = (i==20 && (j==15 || j==16)); //bool debug = true; if (i < spx_width && j < spx_height) { int spx_index = j * spx_width + i; int num = 0, l = 0, a = 0, b = 0, x = 0, y = 0; //for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx) //{ l += d_spx_data[spx_index].accum/*[ny][nx]*/[0]; a += d_spx_data[spx_index].accum/*[ny][nx]*/[1]; b += d_spx_data[spx_index].accum/*[ny][nx]*/[2]; num += d_spx_data[spx_index].accum/*[ny][nx]*/[3]; x += d_spx_data[spx_index].accum/*[ny][nx]*/[4]; y += d_spx_data[spx_index].accum/*[ny][nx]*/[5]; //} //if (debug) printf("i:%d j:%d l:%d a:%d b:%d num:%d x:%d y:%d\n", //i,j,l/num,a/num,b/num,num,x/num,y/num); d_spx_data[spx_index].l = l / num; d_spx_data[spx_index].a = a / num; d_spx_data[spx_index].b = b / num; d_spx_data[spx_index].x = x / num; d_spx_data[spx_index].y = y / num; } } __global__ void k_ownershipOpt(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data) { return; // Does not work after opt14, used to be 9*32 __shared__ spx_data spx[1 * 1]; float min_dist = 10E99;// max_float; int min_i = 0; int min_j = 0; int i_sign[9] = {-1, -1, -1, 0, 0, 0, 1, 1, 1}; int j_sign[9] = {-1, 0, 1, -1, 0, 1, -1, 0, 1}; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < pix_height && x < pix_width) { int pix_index = y * pix_width + x; int i_center = x/spx_size; int j_center = y/spx_size; int l = d_pix_data[pix_index].l; int a = d_pix_data[pix_index].a; int b = d_pix_data[pix_index].b; if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x % 3 == 0)// && threadIdx.y == 0) { int sh_idx = 0; for (int i = i_center - window_size; i <= i_center + window_size; i++) // i = i_center - 1, i_center, i_center + 1 { for(int j = j_center - window_size; j <= j_center + window_size; j++) // j = j_center - 1, j_center, j_center + 1 { if (j < 0 || j >= spx_height || i < 0 || i > spx_width) { sh_idx++; continue; } int spx_index = j * spx_width + i; // if(threadIdx.x == 0 && threadIdx.y == 0 && blockIdx.x == 0 && blockIdx.y == 0) // printf("%i ::::: %i\n", spx_index, sh_idx); spx[sh_idx + 8*blockIdx.x] = d_spx_data[spx_index]; if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2 || sh_idx == 3 || sh_idx == 4 || sh_idx == 5)) //Why blockIdx.x-1 > 0 crashes? spx[sh_idx+3 + 8*(blockIdx.x-1)] = spx[sh_idx + 8*blockIdx.x]; if(blockIdx.x > 0 && (sh_idx == 0 || sh_idx == 1 || sh_idx == 2)) //Why blockIdx.x-1 > 0 crashes? spx[sh_idx+6 + 8*(blockIdx.x-2)] = spx[sh_idx + 8*blockIdx.x]; if(blockIdx.x < blockDim.x && (sh_idx == 3 || sh_idx == 4 || sh_idx == 5 || sh_idx == 6 || sh_idx == 7 || sh_idx == 8)) spx[sh_idx-3 + 8*(blockIdx.x+1)] = spx[sh_idx + 8*blockIdx.x]; if(blockIdx.x < blockDim.x && (sh_idx == 6 || sh_idx == 7 || sh_idx == 8)) spx[sh_idx-6 + 8*(blockIdx.x+2)] = spx[sh_idx + 8*blockIdx.x]; sh_idx++; } } } __syncthreads(); for(int i=0; i<9; i++) { int l_dist = l-(int)(spx[i + 8*blockIdx.x].l); l_dist *= l_dist; int a_dist = a-(int)(spx[i + 8*blockIdx.x].a); a_dist *= a_dist; int b_dist = b-(int)(spx[i + 8*blockIdx.x].b); b_dist *= b_dist; int dlab = l_dist + a_dist + b_dist; int x_dist = x-(int)spx[i + 8*blockIdx.x].x; x_dist *= x_dist; int y_dist = y-(int)spx[i + 8*blockIdx.x].y; y_dist *= y_dist; int dxy = x_dist + y_dist; float D = dlab + slic_factor * dxy; if (D < min_dist) { min_dist = D; min_i = i_center + i_sign[i]*window_size; min_j = j_center + j_sign[i]*window_size; } } d_own_data[pix_index].i = min_i; d_own_data[pix_index].j = min_j; } } __global__ void k_ownershipOrig(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data) { float min_dist = 10E99;// max_float; int min_i = 0; int min_j = 0; int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < pix_height && x < pix_width) { int pix_index = y * pix_width + x; int i_center = x/spx_size; int j_center = y/spx_size; int l = d_pix_data[pix_index].l; int a = d_pix_data[pix_index].a; int b = d_pix_data[pix_index].b; for (int i = i_center - window_size; i <= i_center + window_size; i++) { if (i < 0 || i >= spx_width) continue; for(int j = j_center - window_size; j <= j_center + window_size; j++) { if (j < 0 || j >= spx_height) continue; int spx_index = j * spx_width + i; int l_dist = l-(int)(d_spx_data[spx_index].l); l_dist *= l_dist; int a_dist = a-(int)(d_spx_data[spx_index].a); a_dist *= a_dist; int b_dist = b-(int)(d_spx_data[spx_index].b); b_dist *= b_dist; int dlab = l_dist + a_dist + b_dist; int x_dist = x-(int)d_spx_data[spx_index].x; x_dist *= x_dist; int y_dist = y-(int)d_spx_data[spx_index].y; y_dist *= y_dist; int dxy = x_dist + y_dist; float D = dlab + slic_factor * dxy; if (D < min_dist) { min_dist = D; min_i = i; min_j = j; } } } d_own_data[pix_index].i = min_i; d_own_data[pix_index].j = min_j; //d_own_data[pix_index].i = (i_center / 4) * 4; //d_own_data[pix_index].j = (j_center / 4) * 4; } } __global__ void k_ownershipOpt2(const pix_data* d_pix_data, own_data* d_own_data, const spx_data* d_spx_data) { float min_dist = 10E99;// max_float; int min_i = 0; int min_j = 0; __shared__ int spx[3][3][5]; // Y, X, LABXY int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; if (y < pix_height && x < pix_width) { int pix_index = y * pix_width + x; int i_center = x/spx_size; int j_center = y/spx_size; // Initialize SMEM int tid = threadIdx.x + blockDim.x * threadIdx.y; int nx = tid % 3; tid /= 3; int ny = tid % 3; tid /= 3; if (tid < 5) { int value; int i = i_center + nx - 1; int j = j_center + ny - 1; if (i<0 || i>=spx_width || j<0 || j>=spx_height) { value = -1; } else { int spx_index = j * spx_width + i; const spx_data& spix = d_spx_data[spx_index]; switch(tid) //TODO:Get rid of it by using better data struct.? { case 0: value=spix.l; break; case 1: value=spix.a; break; case 2: value=spix.b; break; case 3: value=spix.x; break; case 4: value=spix.y; break; } } spx[ny][nx][tid] = value; } __syncthreads(); int l = d_pix_data[pix_index].l; int a = d_pix_data[pix_index].a; int b = d_pix_data[pix_index].b; for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx) { int* spix = spx[ny][nx]; if (spix[0]==-1) continue; int l_dist = l-spix[0]; l_dist *= l_dist; int a_dist = a-spix[1]; a_dist *= a_dist; int b_dist = b-spix[2]; b_dist *= b_dist; int dlab = l_dist + a_dist + b_dist; int x_dist = x-spix[3]; x_dist *= x_dist; int y_dist = y-spix[4]; y_dist *= y_dist; int dxy = x_dist + y_dist; float D = dlab + slic_factor * dxy; if (D < min_dist) { min_dist = D; min_i = i_center + nx - 1; min_j = j_center + ny - 1; } } d_own_data[pix_index].i = min_i; d_own_data[pix_index].j = min_j; //d_own_data[pix_index].i = (i_center / 4) * 4; //d_own_data[pix_index].j = (j_center / 4) * 4; } } __global__ void k_reset(spx_data* d_spx_data) { // Shared memory conflict test // Removing the "*64" below results in no bank conflicts, so adjacent threads // reading adjacent shorts do not cause conflicts. //__shared__ unsigned short arr[32 * 2 * 100]; //int a=arr[threadIdx.x * 64]; //d_spx_data[0].accum[0]=a; int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < spx_width && j < spx_height) { int spx_index = j * spx_width + i; //for (int ny=0; ny<3; ++ny) for (int nx=0; nx<3; ++nx) { d_spx_data[spx_index].accum/*[ny][nx]*/[0] = 0; d_spx_data[spx_index].accum/*[ny][nx]*/[1] = 0; d_spx_data[spx_index].accum/*[ny][nx]*/[2] = 0; d_spx_data[spx_index].accum/*[ny][nx]*/[3] = 0; d_spx_data[spx_index].accum/*[ny][nx]*/[4] = 0; d_spx_data[spx_index].accum/*[ny][nx]*/[5] = 0; //} } }
738ecfb881ecd8e99fbf4390647487f1afdacfbf.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file reader_impl.cu * @brief cuDF-IO ORC reader class implementation */ #include "io/orc/orc_gpu.h" #include "reader_impl.hpp" #include "timezone.cuh" #include <io/comp/gpuinflate.h> #include "orc.h" #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <iterator> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <nvcomp/snappy.h> #include <algorithm> #include <array> namespace cudf { namespace io { namespace detail { namespace orc { // Import functionality that's independent of legacy code using namespace cudf::io::orc; using namespace cudf::io; namespace { /** * @brief Function that translates ORC data kind to cuDF type enum */ constexpr type_id to_type_id(const orc::SchemaType& schema, bool use_np_dtypes, type_id timestamp_type_id, bool decimals_as_float64) { switch (schema.kind) { case orc::BOOLEAN: return type_id::BOOL8; case orc::BYTE: return type_id::INT8; case orc::SHORT: return type_id::INT16; case orc::INT: return type_id::INT32; case orc::LONG: return type_id::INT64; case orc::FLOAT: return type_id::FLOAT32; case orc::DOUBLE: return type_id::FLOAT64; case orc::STRING: case orc::BINARY: case orc::VARCHAR: case orc::CHAR: // Variable-length types can all be mapped to STRING return type_id::STRING; case orc::TIMESTAMP: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_NANOSECONDS; case orc::DATE: // There isn't a (DAYS -> np.dtype) mapping return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS; case orc::DECIMAL: return (decimals_as_float64) ? type_id::FLOAT64 : type_id::DECIMAL64; // Need to update once cuDF plans to support map type case orc::MAP: case orc::LIST: return type_id::LIST; case orc::STRUCT: return type_id::STRUCT; default: break; } return type_id::EMPTY; } /** * @brief Function that translates cuDF time unit to ORC clock frequency */ constexpr int32_t to_clockrate(type_id timestamp_type_id) { switch (timestamp_type_id) { case type_id::TIMESTAMP_SECONDS: return 1; case type_id::TIMESTAMP_MILLISECONDS: return 1000; case type_id::TIMESTAMP_MICROSECONDS: return 1000000; case type_id::TIMESTAMP_NANOSECONDS: return 1000000000; default: return 0; } } constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos( const orc::StreamKind kind, uint32_t skip_count, bool non_child) { switch (kind) { case orc::DATA: skip_count += 1; skip_count |= (skip_count & 0xff) << 8; return std::make_pair(gpu::CI_DATA, skip_count); case orc::LENGTH: case orc::SECONDARY: skip_count += 1; skip_count |= (skip_count & 0xff) << 16; return std::make_pair(gpu::CI_DATA2, skip_count); case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count); case orc::PRESENT: skip_count += (non_child ? 1 : 0); return std::make_pair(gpu::CI_PRESENT, skip_count); case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count); default: // Skip this stream as it's not strictly required return std::make_pair(gpu::CI_NUM_STREAMS, 0); } } } // namespace namespace { /** * @brief struct to store buffer data and size of list buffer */ struct list_buffer_data { size_type* data; size_type size; }; // Generates offsets for list buffer from number of elements in a row. void generate_offsets_for_list(rmm::device_uvector<list_buffer_data> const& buff_data, rmm::cuda_stream_view stream) { auto transformer = [] __device__(list_buffer_data list_data) { thrust::exclusive_scan( thrust::seq, list_data.data, list_data.data + list_data.size, list_data.data); }; thrust::for_each(rmm::exec_policy(stream), buff_data.begin(), buff_data.end(), transformer); stream.synchronize(); } /** * @brief Struct that maps ORC streams to columns */ struct orc_stream_info { orc_stream_info() = default; explicit orc_stream_info( uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_) : offset(offset_), dst_pos(dst_pos_), length(length_), gdf_idx(gdf_idx_), stripe_idx(stripe_idx_) { } uint64_t offset; // offset in file size_t dst_pos; // offset in memory relative to start of compressed stripe data size_t length; // length in file uint32_t gdf_idx; // column index uint32_t stripe_idx; // stripe index }; /** * @brief Function that populates column descriptors stream/chunk */ size_t gather_stream_info(const size_t stripe_index, const orc::StripeInformation* stripeinfo, const orc::StripeFooter* stripefooter, const std::vector<int>& orc2gdf, const std::vector<orc::SchemaType> types, bool use_index, size_t* num_dictionary_entries, cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, std::vector<orc_stream_info>& stream_info, bool apply_struct_map) { uint64_t src_offset = 0; uint64_t dst_offset = 0; for (const auto& stream : stripefooter->streams) { if (!stream.column_id || *stream.column_id >= orc2gdf.size()) { dst_offset += stream.length; continue; } auto const column_id = *stream.column_id; auto col = orc2gdf[column_id]; if (col == -1 and apply_struct_map) { // A struct-type column has no data itself, but rather child columns // for each of its fields. There is only a PRESENT stream, which // needs to be included for the reader. const auto schema_type = types[column_id]; if (schema_type.subtypes.size() != 0) { if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) { for (const auto& idx : schema_type.subtypes) { auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1; if (child_idx >= 0) { col = child_idx; auto& chunk = chunks[stripe_index][col]; chunk.strm_id[gpu::CI_PRESENT] = stream_info.size(); chunk.strm_len[gpu::CI_PRESENT] = stream.length; } } } } } if (col != -1) { if (src_offset >= stripeinfo->indexLength || use_index) { // NOTE: skip_count field is temporarily used to track index ordering auto& chunk = chunks[stripe_index][col]; const auto idx = get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[column_id]); if (idx.first < gpu::CI_NUM_STREAMS) { chunk.strm_id[idx.first] = stream_info.size(); chunk.strm_len[idx.first] = stream.length; chunk.skip_count = idx.second; if (idx.first == gpu::CI_DICTIONARY) { chunk.dictionary_start = *num_dictionary_entries; chunk.dict_len = stripefooter->columns[column_id].dictionarySize; *num_dictionary_entries += stripefooter->columns[column_id].dictionarySize; } } } stream_info.emplace_back( stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index); dst_offset += stream.length; } src_offset += stream.length; } return dst_offset; } /** * @brief Determines if a column should be converted from decimal to float */ bool should_convert_decimal_column_to_float(const std::vector<std::string>& columns_to_convert, cudf::io::orc::metadata& metadata, int column_index) { return (std::find(columns_to_convert.begin(), columns_to_convert.end(), metadata.get_column_name(column_index)) != columns_to_convert.end()); } } // namespace /** * @brief In order to support multiple input files/buffers we need to gather * the metadata across all of those input(s). This class provides a place * to aggregate that metadata from all the files. */ class aggregate_orc_metadata { using OrcStripeInfo = std::pair<const StripeInformation*, const StripeFooter*>; public: mutable std::vector<cudf::io::orc::metadata> per_file_metadata; size_type const num_rows; size_type const num_columns; size_type const num_stripes; bool row_grp_idx_present = true; /** * @brief Create a metadata object from each element in the source vector */ auto metadatas_from_sources(std::vector<std::unique_ptr<datasource>> const& sources) { std::vector<cudf::io::orc::metadata> metadatas; std::transform( sources.cbegin(), sources.cend(), std::back_inserter(metadatas), [](auto const& source) { return cudf::io::orc::metadata(source.get()); }); return metadatas; } /** * @brief Sums up the number of rows of each source */ size_type calc_num_rows() const { return std::accumulate( per_file_metadata.begin(), per_file_metadata.end(), 0, [](auto& sum, auto& pfm) { return sum + pfm.get_total_rows(); }); } /** * @brief Number of columns in a ORC file. */ size_type calc_num_cols() const { if (not per_file_metadata.empty()) { return per_file_metadata[0].get_num_columns(); } return 0; } /** * @brief Sums up the number of stripes of each source */ size_type calc_num_stripes() const { return std::accumulate( per_file_metadata.begin(), per_file_metadata.end(), 0, [](auto& sum, auto& pfm) { return sum + pfm.get_num_stripes(); }); } aggregate_orc_metadata(std::vector<std::unique_ptr<datasource>> const& sources) : per_file_metadata(metadatas_from_sources(sources)), num_rows(calc_num_rows()), num_columns(calc_num_cols()), num_stripes(calc_num_stripes()) { // Verify that the input files have the same number of columns, // as well as matching types, compression, and names for (auto const& pfm : per_file_metadata) { CUDF_EXPECTS(per_file_metadata[0].get_num_columns() == pfm.get_num_columns(), "All sources must have the same number of columns"); CUDF_EXPECTS(per_file_metadata[0].ps.compression == pfm.ps.compression, "All sources must have the same compression type"); // Check the types, column names, and decimal scale for (size_t i = 0; i < pfm.ff.types.size(); i++) { CUDF_EXPECTS(pfm.ff.types[i].kind == per_file_metadata[0].ff.types[i].kind, "Column types across all input sources must be the same"); CUDF_EXPECTS(std::equal(pfm.ff.types[i].fieldNames.begin(), pfm.ff.types[i].fieldNames.end(), per_file_metadata[0].ff.types[i].fieldNames.begin()), "All source column names must be the same"); CUDF_EXPECTS( pfm.ff.types[i].scale.value_or(0) == per_file_metadata[0].ff.types[i].scale.value_or(0), "All scale values must be the same"); } } } auto const& get_schema(int schema_idx) const { return per_file_metadata[0].ff.types[schema_idx]; } auto get_col_type(int col_idx) const { return per_file_metadata[0].ff.types[col_idx]; } auto get_num_rows() const { return num_rows; } auto get_num_cols() const { return per_file_metadata[0].get_num_columns(); } auto get_num_stripes() const { return num_stripes; } auto get_num_source_files() const { return per_file_metadata.size(); } auto const& get_types() const { return per_file_metadata[0].ff.types; } int get_row_index_stride() const { return per_file_metadata[0].ff.rowIndexStride; } auto get_column_name(const int source_idx, const int column_idx) const { CUDF_EXPECTS(source_idx <= static_cast<int>(per_file_metadata.size()), "Out of range source_idx provided"); CUDF_EXPECTS(column_idx <= per_file_metadata[source_idx].get_num_columns(), "Out of range column_idx provided"); return per_file_metadata[source_idx].get_column_name(column_idx); } auto is_row_grp_idx_present() const { return row_grp_idx_present; } std::vector<cudf::io::orc::metadata::stripe_source_mapping> select_stripes( std::vector<std::vector<size_type>> const& user_specified_stripes, size_type& row_start, size_type& row_count) { std::vector<cudf::io::orc::metadata::stripe_source_mapping> selected_stripes_mapping; if (!user_specified_stripes.empty()) { CUDF_EXPECTS(user_specified_stripes.size() == get_num_source_files(), "Must specify stripes for each source"); // row_start is 0 if stripes are set. If this is not true anymore, then // row_start needs to be subtracted to get the correct row_count CUDF_EXPECTS(row_start == 0, "Start row index should be 0"); row_count = 0; // Each vector entry represents a source file; each nested vector represents the // user_defined_stripes to get from that source file for (size_t src_file_idx = 0; src_file_idx < user_specified_stripes.size(); ++src_file_idx) { std::vector<OrcStripeInfo> stripe_infos; // Coalesce stripe info at the source file later since that makes downstream processing much // easier in impl::read for (const size_t& stripe_idx : user_specified_stripes[src_file_idx]) { CUDF_EXPECTS(stripe_idx < per_file_metadata[src_file_idx].ff.stripes.size(), "Invalid stripe index"); stripe_infos.push_back( std::make_pair(&per_file_metadata[src_file_idx].ff.stripes[stripe_idx], nullptr)); row_count += per_file_metadata[src_file_idx].ff.stripes[stripe_idx].numberOfRows; } selected_stripes_mapping.push_back({static_cast<int>(src_file_idx), stripe_infos}); } } else { row_start = ::max(row_start, 0); if (row_count < 0) { row_count = static_cast<size_type>( std::min<int64_t>(get_num_rows(), std::numeric_limits<size_type>::max())); } row_count = ::min(row_count, get_num_rows() - row_start); CUDF_EXPECTS(row_count >= 0, "Invalid row count"); CUDF_EXPECTS(row_start <= get_num_rows(), "Invalid row start"); size_type count = 0; size_type stripe_skip_rows = 0; // Iterate all source files, each source file has corelating metadata for (size_t src_file_idx = 0; src_file_idx < per_file_metadata.size() && count < row_start + row_count; ++src_file_idx) { std::vector<OrcStripeInfo> stripe_infos; for (size_t stripe_idx = 0; stripe_idx < per_file_metadata[src_file_idx].ff.stripes.size() && count < row_start + row_count; ++stripe_idx) { count += per_file_metadata[src_file_idx].ff.stripes[stripe_idx].numberOfRows; if (count > row_start || count == 0) { stripe_infos.push_back( std::make_pair(&per_file_metadata[src_file_idx].ff.stripes[stripe_idx], nullptr)); } else { stripe_skip_rows = count; } } selected_stripes_mapping.push_back({static_cast<int>(src_file_idx), stripe_infos}); } // Need to remove skipped rows from the stripes which are not selected. row_start -= stripe_skip_rows; } // Read each stripe's stripefooter metadata if (not selected_stripes_mapping.empty()) { for (auto& mapping : selected_stripes_mapping) { // Resize to all stripe_info for the source level per_file_metadata[mapping.source_idx].stripefooters.resize(mapping.stripe_info.size()); for (size_t i = 0; i < mapping.stripe_info.size(); i++) { const auto stripe = mapping.stripe_info[i].first; const auto sf_comp_offset = stripe->offset + stripe->indexLength + stripe->dataLength; const auto sf_comp_length = stripe->footerLength; CUDF_EXPECTS( sf_comp_offset + sf_comp_length < per_file_metadata[mapping.source_idx].source->size(), "Invalid stripe information"); const auto buffer = per_file_metadata[mapping.source_idx].source->host_read(sf_comp_offset, sf_comp_length); size_t sf_length = 0; auto sf_data = per_file_metadata[mapping.source_idx].decompressor->Decompress( buffer->data(), sf_comp_length, &sf_length); ProtobufReader(sf_data, sf_length) .read(per_file_metadata[mapping.source_idx].stripefooters[i]); mapping.stripe_info[i].second = &per_file_metadata[mapping.source_idx].stripefooters[i]; if (stripe->indexLength == 0) { row_grp_idx_present = false; } } } } return selected_stripes_mapping; } /** * @brief Adds column as per the request and saves metadata about children. * Children of a column will be added to the next level. * * @param selection A vector that saves list of columns as per levels of nesting. * @param types A vector of schema types of columns. * @param level current level of nesting. * @param id current column id that needs to be added. * @param has_timestamp_column True if timestamp column present and false otherwise. * @param has_nested_column True if any of the selected column is a nested type. */ void add_column(std::vector<std::vector<orc_column_meta>>& selection, std::vector<SchemaType> const& types, const size_t level, const uint32_t id, bool& has_timestamp_column, bool& has_nested_column) { if (level == selection.size()) { selection.emplace_back(); } selection[level].push_back({id, 0}); const int col_id = selection[level].size() - 1; if (types[id].kind == orc::TIMESTAMP) { has_timestamp_column = true; } if (types[id].kind == orc::MAP or types[id].kind == orc::LIST or types[id].kind == orc::STRUCT) { has_nested_column = true; for (const auto child_id : types[id].subtypes) { // Since nested column needs to be processed before its child can be processed, // child column is being added to next level add_column(selection, types, level + 1, child_id, has_timestamp_column, has_nested_column); } selection[level][col_id].num_children = types[id].subtypes.size(); } } /** * @brief Filters and reduces down to a selection of columns * * @param use_names List of column names to select * @param has_timestamp_column True if timestamp column present and false otherwise * @param has_nested_column True if any of the selected column is a nested type. * * @return Vector of list of ORC column meta-data */ std::vector<std::vector<orc_column_meta>> select_columns( std::vector<std::string> const& use_names, bool& has_timestamp_column, bool& has_nested_column) { auto const& pfm = per_file_metadata[0]; std::vector<std::vector<orc_column_meta>> selection; if (not use_names.empty()) { uint32_t index = 0; // Have to check only parent columns auto const num_columns = pfm.ff.types[0].subtypes.size(); for (const auto& use_name : use_names) { bool name_found = false; for (uint32_t i = 0; i < num_columns; ++i, ++index) { if (index >= num_columns) { index = 0; } auto col_id = pfm.ff.types[0].subtypes[index]; if (pfm.get_column_name(col_id) == use_name) { name_found = true; add_column(selection, pfm.ff.types, 0, col_id, has_timestamp_column, has_nested_column); // Should start with next index index = i + 1; break; } } CUDF_EXPECTS(name_found, "Unknown column name : " + std::string(use_name)); } } else { for (auto const& col_id : pfm.ff.types[0].subtypes) { add_column(selection, pfm.ff.types, 0, col_id, has_timestamp_column, has_nested_column); } } return selection; } }; void snappy_decompress(device_span<gpu_inflate_input_s> comp_in, device_span<gpu_inflate_status_s> comp_stat, size_t max_uncomp_page_size, rmm::cuda_stream_view stream) { size_t num_blocks = comp_in.size(); size_t temp_size; auto status = nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_page_size, &temp_size); CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status, "Unable to get scratch size for snappy decompression"); rmm::device_buffer scratch(temp_size, stream); rmm::device_uvector<void const*> compressed_data_ptrs(num_blocks, stream); rmm::device_uvector<size_t> compressed_data_sizes(num_blocks, stream); rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream); rmm::device_uvector<size_t> uncompressed_data_sizes(num_blocks, stream); rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream); rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream); // Prepare the vectors auto comp_it = thrust::make_zip_iterator(compressed_data_ptrs.begin(), compressed_data_sizes.begin(), uncompressed_data_ptrs.begin(), uncompressed_data_sizes.data()); thrust::transform(rmm::exec_policy(stream), comp_in.begin(), comp_in.end(), comp_it, [] __device__(gpu_inflate_input_s in) { return thrust::make_tuple(in.srcDevice, in.srcSize, in.dstDevice, in.dstSize); }); status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.data(), compressed_data_sizes.data(), uncompressed_data_sizes.data(), actual_uncompressed_data_sizes.data(), num_blocks, scratch.data(), scratch.size(), uncompressed_data_ptrs.data(), statuses.data(), stream.value()); CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status, "unable to perform snappy decompression"); CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream), statuses.begin(), statuses.end(), thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)), "Error during snappy decompression"); thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator(0), num_blocks, [=, actual_uncomp_sizes = actual_uncompressed_data_sizes.data()] __device__(auto i) { comp_stat[i].bytes_written = actual_uncomp_sizes[i]; comp_stat[i].status = 0; }); } rmm::device_buffer reader::impl::decompress_stripe_data( cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, const std::vector<rmm::device_buffer>& stripe_data, const OrcDecompressor* decompressor, std::vector<orc_stream_info>& stream_info, size_t num_stripes, cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups, size_t row_index_stride, bool use_base_stride, rmm::cuda_stream_view stream) { // Parse the columns' compressed info hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream); for (const auto& info : stream_info) { compinfo.insert(gpu::CompressedStreamInfo( static_cast<const uint8_t*>(stripe_data[info.stripe_idx].data()) + info.dst_pos, info.length)); } compinfo.host_to_device(stream); gpu::ParseCompressedStripeData(compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(), decompressor->GetLog2MaxCompressionRatio(), stream); compinfo.device_to_host(stream, true); // Count the exact number of compressed blocks size_t num_compressed_blocks = 0; size_t num_uncompressed_blocks = 0; size_t total_decomp_size = 0; for (size_t i = 0; i < compinfo.size(); ++i) { num_compressed_blocks += compinfo[i].num_compressed_blocks; num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks; total_decomp_size += compinfo[i].max_uncompressed_size; } CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found"); rmm::device_buffer decomp_data(total_decomp_size, stream); rmm::device_uvector<gpu_inflate_input_s> inflate_in( num_compressed_blocks + num_uncompressed_blocks, stream); rmm::device_uvector<gpu_inflate_status_s> inflate_out(num_compressed_blocks, stream); // Parse again to populate the decompression input/output buffers size_t decomp_offset = 0; uint32_t max_uncomp_block_size = 0; uint32_t start_pos = 0; uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks; for (size_t i = 0; i < compinfo.size(); ++i) { auto dst_base = static_cast<uint8_t*>(decomp_data.data()); compinfo[i].uncompressed_data = dst_base + decomp_offset; compinfo[i].decctl = inflate_in.data() + start_pos; compinfo[i].decstatus = inflate_out.data() + start_pos; compinfo[i].copyctl = inflate_in.data() + start_pos_uncomp; stream_info[i].dst_pos = decomp_offset; decomp_offset += compinfo[i].max_uncompressed_size; start_pos += compinfo[i].num_compressed_blocks; start_pos_uncomp += compinfo[i].num_uncompressed_blocks; max_uncomp_block_size = ::max(max_uncomp_block_size, compinfo[i].max_uncompressed_block_size); } compinfo.host_to_device(stream); gpu::ParseCompressedStripeData(compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(), decompressor->GetLog2MaxCompressionRatio(), stream); // Dispatch batches of blocks to decompress if (num_compressed_blocks > 0) { auto env_use_nvcomp = std::getenv("LIBCUDF_USE_NVCOMP"); bool use_nvcomp = env_use_nvcomp != nullptr ? std::atoi(env_use_nvcomp) : 0; switch (decompressor->GetKind()) { case orc::ZLIB: CUDA_TRY( gpuinflate(inflate_in.data(), inflate_out.data(), num_compressed_blocks, 0, stream)); break; case orc::SNAPPY: if (use_nvcomp) { device_span<gpu_inflate_input_s> inflate_in_view{inflate_in.data(), num_compressed_blocks}; device_span<gpu_inflate_status_s> inflate_out_view{inflate_out.data(), num_compressed_blocks}; snappy_decompress(inflate_in_view, inflate_out_view, max_uncomp_block_size, stream); } else { CUDA_TRY( gpu_unsnap(inflate_in.data(), inflate_out.data(), num_compressed_blocks, stream)); } break; default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break; } } if (num_uncompressed_blocks > 0) { CUDA_TRY(gpu_copy_uncompressed_blocks( inflate_in.data() + num_compressed_blocks, num_uncompressed_blocks, stream)); } gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream); // Update the stream information with the updated uncompressed info // TBD: We could update the value from the information we already // have in stream_info[], but using the gpu results also updates // max_uncompressed_size to the actual uncompressed size, or zero if // decompression failed. compinfo.device_to_host(stream, true); const size_t num_columns = chunks.size().second; for (size_t i = 0; i < num_stripes; ++i) { for (size_t j = 0; j < num_columns; ++j) { auto& chunk = chunks[i][j]; for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) { if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) { chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data; chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size; } } } } if (row_groups.size().first) { chunks.host_to_device(stream); row_groups.host_to_device(stream); gpu::ParseRowGroupIndex(row_groups.base_device_ptr(), compinfo.device_ptr(), chunks.base_device_ptr(), num_columns, num_stripes, row_groups.size().first, row_index_stride, use_base_stride, stream); } return decomp_data; } /** * @brief Updates null mask of columns whose parent is a struct column. * If struct column has null element, that row would be * skipped while writing child column in ORC, so we need to insert the missing null * elements in child column. * There is another behavior from pyspark, where if the child column doesn't have any null * elements, it will not have present stream, so in that case parent null mask need to be * copied to child column. * * @param chunks Vector of list of column chunk descriptors * @param out_buffers Output columns' device buffers * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource to use for device memory allocation */ void update_null_mask(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, std::vector<column_buffer>& out_buffers, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { const auto num_stripes = chunks.size().first; const auto num_columns = chunks.size().second; bool is_mask_updated = false; for (size_t col_idx = 0; col_idx < num_columns; ++col_idx) { if (chunks[0][col_idx].parent_validity_info.valid_map_base != nullptr) { if (not is_mask_updated) { chunks.device_to_host(stream, true); is_mask_updated = true; } auto parent_valid_map_base = chunks[0][col_idx].parent_validity_info.valid_map_base; auto child_valid_map_base = out_buffers[col_idx].null_mask(); auto child_mask_len = chunks[0][col_idx].column_num_rows - chunks[0][col_idx].parent_validity_info.null_count; auto parent_mask_len = chunks[0][col_idx].column_num_rows; if (child_valid_map_base != nullptr) { rmm::device_uvector<uint32_t> dst_idx(child_mask_len, stream); // Copy indexes at which the parent has valid value. thrust::copy_if(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + parent_mask_len, dst_idx.begin(), [parent_valid_map_base] __device__(auto idx) { return bit_is_set(parent_valid_map_base, idx); }); auto merged_null_mask = cudf::detail::create_null_mask( parent_mask_len, mask_state::ALL_NULL, rmm::cuda_stream_view(stream), mr); auto merged_mask = static_cast<bitmask_type*>(merged_null_mask.data()); uint32_t* dst_idx_ptr = dst_idx.data(); // Copy child valid bits from child column to valid indexes, this will merge both child and // parent null masks thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + dst_idx.size(), [child_valid_map_base, dst_idx_ptr, merged_mask] __device__(auto idx) { if (bit_is_set(child_valid_map_base, idx)) { cudf::set_bit(merged_mask, dst_idx_ptr[idx]); }; }); out_buffers[col_idx]._null_mask = std::move(merged_null_mask); } else { // Since child column doesn't have a mask, copy parent null mask auto mask_size = bitmask_allocation_size_bytes(parent_mask_len); out_buffers[col_idx]._null_mask = rmm::device_buffer(static_cast<void*>(parent_valid_map_base), mask_size, stream, mr); } } } thrust::counting_iterator<int, thrust::host_space_tag> col_idx_it(0); thrust::counting_iterator<int, thrust::host_space_tag> stripe_idx_it(0); if (is_mask_updated) { // Update chunks with pointers to column data which might have been changed. std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) { std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) { auto& chunk = chunks[stripe_idx][col_idx]; chunk.valid_map_base = out_buffers[col_idx].null_mask(); }); }); chunks.host_to_device(stream, true); } } /** * @brief Compute the per-stripe prefix sum of null count, for each struct column in the current * layer. */ void scan_null_counts(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> const& chunks, cudf::host_span<rmm::device_uvector<uint32_t>> prefix_sums, rmm::cuda_stream_view stream) { auto const num_stripes = chunks.size().first; if (num_stripes == 0) return; auto const num_columns = chunks.size().second; std::vector<thrust::pair<size_type, cudf::device_span<uint32_t>>> prefix_sums_to_update; for (auto col_idx = 0ul; col_idx < num_columns; ++col_idx) { // Null counts sums are only needed for children of struct columns if (chunks[0][col_idx].type_kind == STRUCT) { prefix_sums_to_update.emplace_back(col_idx, prefix_sums[col_idx]); } } auto const d_prefix_sums_to_update = cudf::detail::make_device_uvector_async(prefix_sums_to_update, stream); thrust::for_each(rmm::exec_policy(stream), d_prefix_sums_to_update.begin(), d_prefix_sums_to_update.end(), [chunks = cudf::detail::device_2dspan<gpu::ColumnDesc const>{chunks}] __device__( auto const& idx_psums) { auto const col_idx = idx_psums.first; auto const psums = idx_psums.second; thrust::transform( thrust::seq, thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + psums.size(), psums.begin(), [&](auto stripe_idx) { return chunks[stripe_idx][col_idx].null_count; }); thrust::inclusive_scan(thrust::seq, psums.begin(), psums.end(), psums.begin()); }); // `prefix_sums_to_update` goes out of scope, copy has to be done before we return stream.synchronize(); } void reader::impl::decode_stream_data(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, size_t num_dicts, size_t skip_rows, timezone_table_view tz_table, cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups, size_t row_index_stride, std::vector<column_buffer>& out_buffers, size_t level, rmm::cuda_stream_view stream) { const auto num_stripes = chunks.size().first; const auto num_columns = chunks.size().second; thrust::counting_iterator<int, thrust::host_space_tag> col_idx_it(0); thrust::counting_iterator<int, thrust::host_space_tag> stripe_idx_it(0); // Update chunks with pointers to column data std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) { std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) { auto& chunk = chunks[stripe_idx][col_idx]; chunk.column_data_base = out_buffers[col_idx].data(); chunk.valid_map_base = out_buffers[col_idx].null_mask(); }); }); // Allocate global dictionary for deserializing rmm::device_uvector<gpu::DictionaryEntry> global_dict(num_dicts, stream); chunks.host_to_device(stream, true); gpu::DecodeNullsAndStringDictionaries( chunks.base_device_ptr(), global_dict.data(), num_columns, num_stripes, skip_rows, stream); if (level > 0) { // Update nullmasks for children if parent was a struct and had null mask update_null_mask(chunks, out_buffers, stream, _mr); } // Update the null map for child columns gpu::DecodeOrcColumnData(chunks.base_device_ptr(), global_dict.data(), row_groups, num_columns, num_stripes, skip_rows, tz_table, row_groups.size().first, row_index_stride, level, stream); chunks.device_to_host(stream, true); std::for_each(col_idx_it + 0, col_idx_it + num_columns, [&](auto col_idx) { out_buffers[col_idx].null_count() = std::accumulate(stripe_idx_it + 0, stripe_idx_it + num_stripes, 0, [&](auto null_count, auto const stripe_idx) { return null_count + chunks[stripe_idx][col_idx].null_count; }); }); } // Aggregate child column metadata per stripe and per column void reader::impl::aggregate_child_meta(cudf::detail::host_2dspan<gpu::ColumnDesc> chunks, cudf::detail::host_2dspan<gpu::RowGroup> row_groups, std::vector<column_buffer>& out_buffers, std::vector<orc_column_meta> const& list_col, const int32_t level) { const auto num_of_stripes = chunks.size().first; const auto num_of_rowgroups = row_groups.size().first; const auto num_parent_cols = _selected_columns[level].size(); const auto num_child_cols = _selected_columns[level + 1].size(); const auto number_of_child_chunks = num_child_cols * num_of_stripes; auto& num_child_rows = _col_meta.num_child_rows; auto& parent_column_data = _col_meta.parent_column_data; // Reset the meta to store child column details. num_child_rows.resize(_selected_columns[level + 1].size()); std::fill(num_child_rows.begin(), num_child_rows.end(), 0); parent_column_data.resize(number_of_child_chunks); _col_meta.parent_column_index.resize(number_of_child_chunks); _col_meta.child_start_row.resize(number_of_child_chunks); _col_meta.num_child_rows_per_stripe.resize(number_of_child_chunks); _col_meta.rwgrp_meta.resize(num_of_rowgroups * num_child_cols); auto child_start_row = cudf::detail::host_2dspan<uint32_t>( _col_meta.child_start_row.data(), num_of_stripes, num_child_cols); auto num_child_rows_per_stripe = cudf::detail::host_2dspan<uint32_t>( _col_meta.num_child_rows_per_stripe.data(), num_of_stripes, num_child_cols); auto rwgrp_meta = cudf::detail::host_2dspan<reader_column_meta::row_group_meta>( _col_meta.rwgrp_meta.data(), num_of_rowgroups, num_child_cols); int index = 0; // number of child column processed // For each parent column, update its child column meta for each stripe. std::for_each(list_col.cbegin(), list_col.cend(), [&](const auto p_col) { const auto parent_col_idx = _col_meta.orc_col_map[level][p_col.id]; auto start_row = 0; auto processed_row_groups = 0; for (size_t stripe_id = 0; stripe_id < num_of_stripes; stripe_id++) { // Aggregate num_rows and start_row from processed parent columns per row groups if (num_of_rowgroups) { auto stripe_num_row_groups = chunks[stripe_id][parent_col_idx].num_rowgroups; auto processed_child_rows = 0; for (size_t rowgroup_id = 0; rowgroup_id < stripe_num_row_groups; rowgroup_id++, processed_row_groups++) { const auto child_rows = row_groups[processed_row_groups][parent_col_idx].num_child_rows; for (uint32_t id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; rwgrp_meta[processed_row_groups][child_col_idx].start_row = processed_child_rows; rwgrp_meta[processed_row_groups][child_col_idx].num_rows = child_rows; } processed_child_rows += child_rows; } } // Aggregate start row, number of rows per chunk and total number of rows in a column const auto child_rows = chunks[stripe_id][parent_col_idx].num_child_rows; for (uint32_t id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; num_child_rows[child_col_idx] += child_rows; num_child_rows_per_stripe[stripe_id][child_col_idx] = child_rows; // start row could be different for each column when there is nesting at each stripe level child_start_row[stripe_id][child_col_idx] = (stripe_id == 0) ? 0 : start_row; } start_row += child_rows; } // Parent column null mask and null count would be required for child column // to adjust its nullmask. auto type = out_buffers[parent_col_idx].type.id(); auto parent_null_count = static_cast<uint32_t>(out_buffers[parent_col_idx].null_count()); auto parent_valid_map = out_buffers[parent_col_idx].null_mask(); auto num_rows = out_buffers[parent_col_idx].size; for (uint32_t id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; _col_meta.parent_column_index[child_col_idx] = parent_col_idx; if (type == type_id::STRUCT) { parent_column_data[child_col_idx] = {parent_valid_map, parent_null_count}; // Number of rows in child will remain same as parent in case of struct column num_child_rows[child_col_idx] = num_rows; } else { parent_column_data[child_col_idx] = {nullptr, 0}; } } index += p_col.num_children; }); } std::string get_map_child_col_name(size_t const idx) { return (idx == 0) ? "key" : "value"; } std::unique_ptr<column> reader::impl::create_empty_column(const int32_t orc_col_id, column_name_info& schema_info, rmm::cuda_stream_view stream) { schema_info.name = _metadata->get_column_name(0, orc_col_id); // If the column type is orc::DECIMAL see if the user // desires it to be converted to float64 or not auto const decimal_as_float64 = should_convert_decimal_column_to_float( _decimal_cols_as_float, _metadata->per_file_metadata[0], orc_col_id); auto const type = to_type_id( _metadata->get_schema(orc_col_id), _use_np_dtypes, _timestamp_type.id(), decimal_as_float64); int32_t scale = 0; std::vector<std::unique_ptr<column>> child_columns; std::unique_ptr<column> out_col = nullptr; auto kind = _metadata->get_col_type(orc_col_id).kind; switch (kind) { case orc::LIST: schema_info.children.emplace_back("offsets"); schema_info.children.emplace_back(""); out_col = make_lists_column( 0, make_empty_column(data_type(type_id::INT32)), create_empty_column( _metadata->get_col_type(orc_col_id).subtypes[0], schema_info.children.back(), stream), 0, rmm::device_buffer{0, stream}, stream); break; case orc::MAP: { schema_info.children.emplace_back("offsets"); schema_info.children.emplace_back("struct"); const auto child_column_ids = _metadata->get_col_type(orc_col_id).subtypes; for (size_t idx = 0; idx < _metadata->get_col_type(orc_col_id).subtypes.size(); idx++) { auto& children_schema = schema_info.children.back().children; children_schema.emplace_back(""); child_columns.push_back(create_empty_column( child_column_ids[idx], schema_info.children.back().children.back(), stream)); auto name = get_map_child_col_name(idx); children_schema[idx].name = name; } auto struct_col = make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream); out_col = make_lists_column(0, make_empty_column(data_type(type_id::INT32)), std::move(struct_col), 0, rmm::device_buffer{0, stream}, stream); } break; case orc::STRUCT: for (const auto col : _metadata->get_col_type(orc_col_id).subtypes) { schema_info.children.emplace_back(""); child_columns.push_back(create_empty_column(col, schema_info.children.back(), stream)); } out_col = make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream); break; case orc::DECIMAL: if (type == type_id::DECIMAL64) { scale = -static_cast<int32_t>(_metadata->get_types()[orc_col_id].scale.value_or(0)); } out_col = make_empty_column(data_type(type, scale)); break; default: out_col = make_empty_column(data_type(type)); } return out_col; } // Adds child column buffers to parent column column_buffer&& reader::impl::assemble_buffer(const int32_t orc_col_id, std::vector<std::vector<column_buffer>>& col_buffers, const size_t level, rmm::cuda_stream_view stream) { auto const col_id = _col_meta.orc_col_map[level][orc_col_id]; auto& col_buffer = col_buffers[level][col_id]; col_buffer.name = _metadata->get_column_name(0, orc_col_id); auto kind = _metadata->get_col_type(orc_col_id).kind; switch (kind) { case orc::LIST: case orc::STRUCT: for (auto const& col : _metadata->get_col_type(orc_col_id).subtypes) { col_buffer.children.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream)); } break; case orc::MAP: { std::vector<column_buffer> child_col_buffers; // Get child buffers for (size_t idx = 0; idx < _metadata->get_col_type(orc_col_id).subtypes.size(); idx++) { auto name = get_map_child_col_name(idx); auto col = _metadata->get_col_type(orc_col_id).subtypes[idx]; child_col_buffers.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream)); child_col_buffers.back().name = name; } // Create a struct buffer auto num_rows = child_col_buffers[0].size; auto struct_buffer = column_buffer(cudf::data_type(type_id::STRUCT), num_rows, false, stream, _mr); struct_buffer.children = std::move(child_col_buffers); struct_buffer.name = "struct"; col_buffer.children.emplace_back(std::move(struct_buffer)); } break; default: break; } return std::move(col_buffer); } // creates columns along with schema information for each column void reader::impl::create_columns(std::vector<std::vector<column_buffer>>&& col_buffers, std::vector<std::unique_ptr<column>>& out_columns, std::vector<column_name_info>& schema_info, rmm::cuda_stream_view stream) { std::transform(_selected_columns[0].begin(), _selected_columns[0].end(), std::back_inserter(out_columns), [&](auto const col_meta) { schema_info.emplace_back(""); auto col_buffer = assemble_buffer(col_meta.id, col_buffers, 0, stream); return make_column(col_buffer, &schema_info.back(), stream, _mr); }); } reader::impl::impl(std::vector<std::unique_ptr<datasource>>&& sources, orc_reader_options const& options, rmm::mr::device_memory_resource* mr) : _mr(mr), _sources(std::move(sources)) { // Open and parse the source(s) dataset metadata _metadata = std::make_unique<aggregate_orc_metadata>(_sources); // Select only columns required by the options _selected_columns = _metadata->select_columns(options.get_columns(), _has_timestamp_column, _has_nested_column); // Override output timestamp resolution if requested if (options.get_timestamp_type().id() != type_id::EMPTY) { _timestamp_type = options.get_timestamp_type(); } // Enable or disable attempt to use row index for parsing _use_index = options.is_enabled_use_index(); // Enable or disable the conversion to numpy-compatible dtypes _use_np_dtypes = options.is_enabled_use_np_dtypes(); // Control decimals conversion (float64 or int64 with optional scale) _decimal_cols_as_float = options.get_decimal_cols_as_float(); } table_with_metadata reader::impl::read(size_type skip_rows, size_type num_rows, const std::vector<std::vector<size_type>>& stripes, rmm::cuda_stream_view stream) { CUDF_EXPECTS(skip_rows == 0 or (not _has_nested_column), "skip_rows is not supported by nested columns"); std::vector<std::unique_ptr<column>> out_columns; // buffer and stripe data are stored as per nesting level std::vector<std::vector<column_buffer>> out_buffers(_selected_columns.size()); std::vector<column_name_info> schema_info; std::vector<std::vector<rmm::device_buffer>> lvl_stripe_data(_selected_columns.size()); std::vector<std::vector<rmm::device_uvector<uint32_t>>> null_count_prefix_sums; table_metadata out_metadata; // There are no columns in the table if (_selected_columns.size() == 0) return {std::make_unique<table>(), std::move(out_metadata)}; // Select only stripes required (aka row groups) const auto selected_stripes = _metadata->select_stripes(stripes, skip_rows, num_rows); // Iterates through levels of nested columns, child column will be one level down // compared to parent column. for (size_t level = 0; level < _selected_columns.size(); level++) { auto& selected_columns = _selected_columns[level]; // Association between each ORC column and its cudf::column _col_meta.orc_col_map.emplace_back(_metadata->get_num_cols(), -1); std::vector<orc_column_meta> nested_col; bool is_data_empty = false; // Get a list of column data types std::vector<data_type> column_types; for (auto& col : selected_columns) { // If the column type is orc::DECIMAL see if the user // desires it to be converted to float64 or not auto const decimal_as_float64 = should_convert_decimal_column_to_float( _decimal_cols_as_float, _metadata->per_file_metadata[0], col.id); auto col_type = to_type_id( _metadata->get_col_type(col.id), _use_np_dtypes, _timestamp_type.id(), decimal_as_float64); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type"); // Remove this once we support Decimal128 data type CUDF_EXPECTS( (col_type != type_id::DECIMAL64) or (_metadata->get_col_type(col.id).precision <= 18), "Decimal data has precision > 18, Decimal64 data type doesn't support it."); if (col_type == type_id::DECIMAL64) { // sign of the scale is changed since cuDF follows c++ libraries like CNL // which uses negative scaling, but liborc and other libraries // follow positive scaling. auto const scale = -static_cast<int32_t>(_metadata->get_col_type(col.id).scale.value_or(0)); column_types.emplace_back(col_type, scale); } else { column_types.emplace_back(col_type); } // Map each ORC column to its column _col_meta.orc_col_map[level][col.id] = column_types.size() - 1; // TODO: Once MAP type is supported in cuDF, update this for MAP as well if (col_type == type_id::LIST or col_type == type_id::STRUCT) nested_col.emplace_back(col); } // If no rows or stripes to read, return empty columns if (num_rows <= 0 || selected_stripes.empty()) { std::transform(_selected_columns[0].begin(), _selected_columns[0].end(), std::back_inserter(out_columns), [&](auto const col_meta) { schema_info.emplace_back(""); return create_empty_column(col_meta.id, schema_info.back(), stream); }); break; } else { // Get the total number of stripes across all input files. size_t total_num_stripes = std::accumulate(selected_stripes.begin(), selected_stripes.end(), 0, [](size_t sum, auto& stripe_source_mapping) { return sum + stripe_source_mapping.stripe_info.size(); }); const auto num_columns = selected_columns.size(); cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> chunks( total_num_stripes, num_columns, stream); memset(chunks.base_host_ptr(), 0, chunks.memory_size()); const bool use_index = (_use_index == true) && // Do stripes have row group index _metadata->is_row_grp_idx_present() && // Only use if we don't have much work with complete columns & stripes // TODO: Consider nrows, gpu, and tune the threshold (num_rows > _metadata->get_row_index_stride() && !(_metadata->get_row_index_stride() & 7) && _metadata->get_row_index_stride() > 0 && num_columns * total_num_stripes < 8 * 128) && // Only use if first row is aligned to a stripe boundary // TODO: Fix logic to handle unaligned rows (skip_rows == 0); // Logically view streams as columns std::vector<orc_stream_info> stream_info; null_count_prefix_sums.emplace_back(); null_count_prefix_sums.back().reserve(_selected_columns[level].size()); std::generate_n( std::back_inserter(null_count_prefix_sums.back()), _selected_columns[level].size(), [&]() { return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(total_num_stripes, stream); }); // Tracker for eventually deallocating compressed and uncompressed data auto& stripe_data = lvl_stripe_data[level]; size_t stripe_start_row = 0; size_t num_dict_entries = 0; size_t num_rowgroups = 0; int stripe_idx = 0; std::vector<std::pair<std::future<size_t>, size_t>> read_tasks; for (auto const& stripe_source_mapping : selected_stripes) { // Iterate through the source files selected stripes for (auto const& stripe : stripe_source_mapping.stripe_info) { const auto stripe_info = stripe.first; const auto stripe_footer = stripe.second; auto stream_count = stream_info.size(); const auto total_data_size = gather_stream_info(stripe_idx, stripe_info, stripe_footer, _col_meta.orc_col_map[level], _metadata->get_types(), use_index, &num_dict_entries, chunks, stream_info, level == 0); if (total_data_size == 0) { CUDF_EXPECTS(stripe_info->indexLength == 0, "Invalid index rowgroup stream data"); // In case ROW GROUP INDEX is not present and all columns are structs with no null // stream, there is nothing to read at this level. auto fn_check_dtype = [](auto dtype) { return dtype.id() == type_id::STRUCT; }; CUDF_EXPECTS(std::all_of(column_types.begin(), column_types.end(), fn_check_dtype), "Expected streams data within stripe"); is_data_empty = true; } stripe_data.emplace_back(total_data_size, stream); auto dst_base = static_cast<uint8_t*>(stripe_data.back().data()); // Coalesce consecutive streams into one read while (not is_data_empty and stream_count < stream_info.size()) { const auto d_dst = dst_base + stream_info[stream_count].dst_pos; const auto offset = stream_info[stream_count].offset; auto len = stream_info[stream_count].length; stream_count++; while (stream_count < stream_info.size() && stream_info[stream_count].offset == offset + len) { len += stream_info[stream_count].length; stream_count++; } if (_metadata->per_file_metadata[stripe_source_mapping.source_idx] .source->is_device_read_preferred(len)) { read_tasks.push_back( std::make_pair(_metadata->per_file_metadata[stripe_source_mapping.source_idx] .source->device_read_async(offset, len, d_dst, stream), len)); } else { const auto buffer = _metadata->per_file_metadata[stripe_source_mapping.source_idx].source->host_read( offset, len); CUDF_EXPECTS(buffer->size() == len, "Unexpected discrepancy in bytes read."); CUDA_TRY(hipMemcpyAsync( d_dst, buffer->data(), len, hipMemcpyHostToDevice, stream.value())); stream.synchronize(); } } const auto num_rows_per_stripe = stripe_info->numberOfRows; const auto rowgroup_id = num_rowgroups; auto stripe_num_rowgroups = 0; if (use_index) { stripe_num_rowgroups = (num_rows_per_stripe + _metadata->get_row_index_stride() - 1) / _metadata->get_row_index_stride(); } // Update chunks to reference streams pointers for (size_t col_idx = 0; col_idx < num_columns; col_idx++) { auto& chunk = chunks[stripe_idx][col_idx]; // start row, number of rows in a each stripe and total number of rows // may change in lower levels of nesting chunk.start_row = (level == 0) ? stripe_start_row : _col_meta.child_start_row[stripe_idx * num_columns + col_idx]; chunk.num_rows = (level == 0) ? stripe_info->numberOfRows : _col_meta.num_child_rows_per_stripe[stripe_idx * num_columns + col_idx]; chunk.column_num_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[col_idx]; chunk.parent_validity_info = (level == 0) ? column_validity_info{} : _col_meta.parent_column_data[col_idx]; chunk.parent_null_count_prefix_sums = (level == 0) ? nullptr : null_count_prefix_sums[level - 1][_col_meta.parent_column_index[col_idx]].data(); chunk.encoding_kind = stripe_footer->columns[selected_columns[col_idx].id].kind; chunk.type_kind = _metadata->per_file_metadata[stripe_source_mapping.source_idx] .ff.types[selected_columns[col_idx].id] .kind; // num_child_rows for a struct column will be same, for other nested types it will be // calculated. chunk.num_child_rows = (chunk.type_kind != orc::STRUCT) ? 0 : chunk.num_rows; auto const decimal_as_float64 = should_convert_decimal_column_to_float(_decimal_cols_as_float, _metadata->per_file_metadata[0], selected_columns[col_idx].id); chunk.decimal_scale = _metadata->per_file_metadata[stripe_source_mapping.source_idx] .ff.types[selected_columns[col_idx].id] .scale.value_or(0) | (decimal_as_float64 ? orc::gpu::orc_decimal2float64_scale : 0); chunk.rowgroup_id = rowgroup_id; chunk.dtype_len = (column_types[col_idx].id() == type_id::STRING) ? sizeof(string_index_pair) : ((column_types[col_idx].id() == type_id::LIST) or (column_types[col_idx].id() == type_id::STRUCT)) ? sizeof(int32_t) : cudf::size_of(column_types[col_idx]); chunk.num_rowgroups = stripe_num_rowgroups; if (chunk.type_kind == orc::TIMESTAMP) { chunk.ts_clock_rate = to_clockrate(_timestamp_type.id()); } if (not is_data_empty) { for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) { chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos; } } } stripe_start_row += num_rows_per_stripe; num_rowgroups += stripe_num_rowgroups; stripe_idx++; } } for (auto& task : read_tasks) { CUDF_EXPECTS(task.first.get() == task.second, "Unexpected discrepancy in bytes read."); } // Process dataset chunk pages into output columns if (stripe_data.size() != 0) { auto row_groups = cudf::detail::hostdevice_2dvector<gpu::RowGroup>(num_rowgroups, num_columns, stream); if (level > 0 and row_groups.size().first) { cudf::host_span<gpu::RowGroup> row_groups_span(row_groups.base_host_ptr(), num_rowgroups * num_columns); auto& rw_grp_meta = _col_meta.rwgrp_meta; // Update start row and num rows per row group std::transform(rw_grp_meta.begin(), rw_grp_meta.end(), row_groups_span.begin(), rw_grp_meta.begin(), [&](auto meta, auto& row_grp) { row_grp.num_rows = meta.num_rows; row_grp.start_row = meta.start_row; return meta; }); } // Setup row group descriptors if using indexes if (_metadata->per_file_metadata[0].ps.compression != orc::NONE and not is_data_empty) { auto decomp_data = decompress_stripe_data(chunks, stripe_data, _metadata->per_file_metadata[0].decompressor.get(), stream_info, total_num_stripes, row_groups, _metadata->get_row_index_stride(), level == 0, stream); stripe_data.clear(); stripe_data.push_back(std::move(decomp_data)); } else { if (row_groups.size().first) { chunks.host_to_device(stream); row_groups.host_to_device(stream); gpu::ParseRowGroupIndex(row_groups.base_device_ptr(), nullptr, chunks.base_device_ptr(), num_columns, total_num_stripes, num_rowgroups, _metadata->get_row_index_stride(), level == 0, stream); } } // Setup table for converting timestamp columns from local to UTC time auto const tz_table = _has_timestamp_column ? build_timezone_transition_table( selected_stripes[0].stripe_info[0].second->writerTimezone, stream) : timezone_table{}; for (size_t i = 0; i < column_types.size(); ++i) { bool is_nullable = false; for (size_t j = 0; j < total_num_stripes; ++j) { if (chunks[j][i].strm_len[gpu::CI_PRESENT] != 0) { is_nullable = true; break; } } auto is_list_type = (column_types[i].id() == type_id::LIST); auto n_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[i]; // For list column, offset column will be always size + 1 if (is_list_type) n_rows++; out_buffers[level].emplace_back(column_types[i], n_rows, is_nullable, stream, _mr); } if (not is_data_empty) { decode_stream_data(chunks, num_dict_entries, skip_rows, tz_table.view(), row_groups, _metadata->get_row_index_stride(), out_buffers[level], level, stream); } // Extract information to process nested child columns if (nested_col.size()) { if (not is_data_empty) { scan_null_counts(chunks, null_count_prefix_sums[level], stream); } row_groups.device_to_host(stream, true); aggregate_child_meta(chunks, row_groups, out_buffers[level], nested_col, level); } // ORC stores number of elements at each row, so we need to generate offsets from that if (nested_col.size()) { std::vector<list_buffer_data> buff_data; std::for_each( out_buffers[level].begin(), out_buffers[level].end(), [&buff_data](auto& out_buffer) { if (out_buffer.type.id() == type_id::LIST) { auto data = static_cast<size_type*>(out_buffer.data()); buff_data.emplace_back(list_buffer_data{data, out_buffer.size}); } }); if (buff_data.size()) { auto const dev_buff_data = cudf::detail::make_device_uvector_async(buff_data, stream); generate_offsets_for_list(dev_buff_data, stream); } } } } } // If out_columns is empty, then create columns from buffer. if (out_columns.empty()) { create_columns(std::move(out_buffers), out_columns, schema_info, stream); } // Return column names (must match order of returned columns) out_metadata.column_names.reserve(schema_info.size()); std::transform(schema_info.cbegin(), schema_info.cend(), std::back_inserter(out_metadata.column_names), [](auto info) { return info.name; }); out_metadata.schema_info = std::move(schema_info); for (const auto& meta : _metadata->per_file_metadata) { for (const auto& kv : meta.ff.metadata) { out_metadata.user_data.insert({kv.name, kv.value}); } } return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)}; } // Forward to implementation reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>>&& sources, orc_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { _impl = std::make_unique<impl>(std::move(sources), options, mr); } // Destructor within this translation unit reader::~reader() = default; // Forward to implementation table_with_metadata reader::read(orc_reader_options const& options, rmm::cuda_stream_view stream) { return _impl->read( options.get_skip_rows(), options.get_num_rows(), options.get_stripes(), stream); } } // namespace orc } // namespace detail } // namespace io } // namespace cudf
738ecfb881ecd8e99fbf4390647487f1afdacfbf.cu
/* * Copyright (c) 2019-2021, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /** * @file reader_impl.cu * @brief cuDF-IO ORC reader class implementation */ #include "io/orc/orc_gpu.h" #include "reader_impl.hpp" #include "timezone.cuh" #include <io/comp/gpuinflate.h> #include "orc.h" #include <cudf/detail/utilities/vector_factories.hpp> #include <cudf/table/table.hpp> #include <cudf/utilities/bit.hpp> #include <cudf/utilities/error.hpp> #include <cudf/utilities/traits.hpp> #include <iterator> #include <rmm/cuda_stream_view.hpp> #include <rmm/device_buffer.hpp> #include <rmm/device_uvector.hpp> #include <rmm/exec_policy.hpp> #include <nvcomp/snappy.h> #include <algorithm> #include <array> namespace cudf { namespace io { namespace detail { namespace orc { // Import functionality that's independent of legacy code using namespace cudf::io::orc; using namespace cudf::io; namespace { /** * @brief Function that translates ORC data kind to cuDF type enum */ constexpr type_id to_type_id(const orc::SchemaType& schema, bool use_np_dtypes, type_id timestamp_type_id, bool decimals_as_float64) { switch (schema.kind) { case orc::BOOLEAN: return type_id::BOOL8; case orc::BYTE: return type_id::INT8; case orc::SHORT: return type_id::INT16; case orc::INT: return type_id::INT32; case orc::LONG: return type_id::INT64; case orc::FLOAT: return type_id::FLOAT32; case orc::DOUBLE: return type_id::FLOAT64; case orc::STRING: case orc::BINARY: case orc::VARCHAR: case orc::CHAR: // Variable-length types can all be mapped to STRING return type_id::STRING; case orc::TIMESTAMP: return (timestamp_type_id != type_id::EMPTY) ? timestamp_type_id : type_id::TIMESTAMP_NANOSECONDS; case orc::DATE: // There isn't a (DAYS -> np.dtype) mapping return (use_np_dtypes) ? type_id::TIMESTAMP_MILLISECONDS : type_id::TIMESTAMP_DAYS; case orc::DECIMAL: return (decimals_as_float64) ? type_id::FLOAT64 : type_id::DECIMAL64; // Need to update once cuDF plans to support map type case orc::MAP: case orc::LIST: return type_id::LIST; case orc::STRUCT: return type_id::STRUCT; default: break; } return type_id::EMPTY; } /** * @brief Function that translates cuDF time unit to ORC clock frequency */ constexpr int32_t to_clockrate(type_id timestamp_type_id) { switch (timestamp_type_id) { case type_id::TIMESTAMP_SECONDS: return 1; case type_id::TIMESTAMP_MILLISECONDS: return 1000; case type_id::TIMESTAMP_MICROSECONDS: return 1000000; case type_id::TIMESTAMP_NANOSECONDS: return 1000000000; default: return 0; } } constexpr std::pair<gpu::StreamIndexType, uint32_t> get_index_type_and_pos( const orc::StreamKind kind, uint32_t skip_count, bool non_child) { switch (kind) { case orc::DATA: skip_count += 1; skip_count |= (skip_count & 0xff) << 8; return std::make_pair(gpu::CI_DATA, skip_count); case orc::LENGTH: case orc::SECONDARY: skip_count += 1; skip_count |= (skip_count & 0xff) << 16; return std::make_pair(gpu::CI_DATA2, skip_count); case orc::DICTIONARY_DATA: return std::make_pair(gpu::CI_DICTIONARY, skip_count); case orc::PRESENT: skip_count += (non_child ? 1 : 0); return std::make_pair(gpu::CI_PRESENT, skip_count); case orc::ROW_INDEX: return std::make_pair(gpu::CI_INDEX, skip_count); default: // Skip this stream as it's not strictly required return std::make_pair(gpu::CI_NUM_STREAMS, 0); } } } // namespace namespace { /** * @brief struct to store buffer data and size of list buffer */ struct list_buffer_data { size_type* data; size_type size; }; // Generates offsets for list buffer from number of elements in a row. void generate_offsets_for_list(rmm::device_uvector<list_buffer_data> const& buff_data, rmm::cuda_stream_view stream) { auto transformer = [] __device__(list_buffer_data list_data) { thrust::exclusive_scan( thrust::seq, list_data.data, list_data.data + list_data.size, list_data.data); }; thrust::for_each(rmm::exec_policy(stream), buff_data.begin(), buff_data.end(), transformer); stream.synchronize(); } /** * @brief Struct that maps ORC streams to columns */ struct orc_stream_info { orc_stream_info() = default; explicit orc_stream_info( uint64_t offset_, size_t dst_pos_, uint32_t length_, uint32_t gdf_idx_, uint32_t stripe_idx_) : offset(offset_), dst_pos(dst_pos_), length(length_), gdf_idx(gdf_idx_), stripe_idx(stripe_idx_) { } uint64_t offset; // offset in file size_t dst_pos; // offset in memory relative to start of compressed stripe data size_t length; // length in file uint32_t gdf_idx; // column index uint32_t stripe_idx; // stripe index }; /** * @brief Function that populates column descriptors stream/chunk */ size_t gather_stream_info(const size_t stripe_index, const orc::StripeInformation* stripeinfo, const orc::StripeFooter* stripefooter, const std::vector<int>& orc2gdf, const std::vector<orc::SchemaType> types, bool use_index, size_t* num_dictionary_entries, cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, std::vector<orc_stream_info>& stream_info, bool apply_struct_map) { uint64_t src_offset = 0; uint64_t dst_offset = 0; for (const auto& stream : stripefooter->streams) { if (!stream.column_id || *stream.column_id >= orc2gdf.size()) { dst_offset += stream.length; continue; } auto const column_id = *stream.column_id; auto col = orc2gdf[column_id]; if (col == -1 and apply_struct_map) { // A struct-type column has no data itself, but rather child columns // for each of its fields. There is only a PRESENT stream, which // needs to be included for the reader. const auto schema_type = types[column_id]; if (schema_type.subtypes.size() != 0) { if (schema_type.kind == orc::STRUCT && stream.kind == orc::PRESENT) { for (const auto& idx : schema_type.subtypes) { auto child_idx = (idx < orc2gdf.size()) ? orc2gdf[idx] : -1; if (child_idx >= 0) { col = child_idx; auto& chunk = chunks[stripe_index][col]; chunk.strm_id[gpu::CI_PRESENT] = stream_info.size(); chunk.strm_len[gpu::CI_PRESENT] = stream.length; } } } } } if (col != -1) { if (src_offset >= stripeinfo->indexLength || use_index) { // NOTE: skip_count field is temporarily used to track index ordering auto& chunk = chunks[stripe_index][col]; const auto idx = get_index_type_and_pos(stream.kind, chunk.skip_count, col == orc2gdf[column_id]); if (idx.first < gpu::CI_NUM_STREAMS) { chunk.strm_id[idx.first] = stream_info.size(); chunk.strm_len[idx.first] = stream.length; chunk.skip_count = idx.second; if (idx.first == gpu::CI_DICTIONARY) { chunk.dictionary_start = *num_dictionary_entries; chunk.dict_len = stripefooter->columns[column_id].dictionarySize; *num_dictionary_entries += stripefooter->columns[column_id].dictionarySize; } } } stream_info.emplace_back( stripeinfo->offset + src_offset, dst_offset, stream.length, col, stripe_index); dst_offset += stream.length; } src_offset += stream.length; } return dst_offset; } /** * @brief Determines if a column should be converted from decimal to float */ bool should_convert_decimal_column_to_float(const std::vector<std::string>& columns_to_convert, cudf::io::orc::metadata& metadata, int column_index) { return (std::find(columns_to_convert.begin(), columns_to_convert.end(), metadata.get_column_name(column_index)) != columns_to_convert.end()); } } // namespace /** * @brief In order to support multiple input files/buffers we need to gather * the metadata across all of those input(s). This class provides a place * to aggregate that metadata from all the files. */ class aggregate_orc_metadata { using OrcStripeInfo = std::pair<const StripeInformation*, const StripeFooter*>; public: mutable std::vector<cudf::io::orc::metadata> per_file_metadata; size_type const num_rows; size_type const num_columns; size_type const num_stripes; bool row_grp_idx_present = true; /** * @brief Create a metadata object from each element in the source vector */ auto metadatas_from_sources(std::vector<std::unique_ptr<datasource>> const& sources) { std::vector<cudf::io::orc::metadata> metadatas; std::transform( sources.cbegin(), sources.cend(), std::back_inserter(metadatas), [](auto const& source) { return cudf::io::orc::metadata(source.get()); }); return metadatas; } /** * @brief Sums up the number of rows of each source */ size_type calc_num_rows() const { return std::accumulate( per_file_metadata.begin(), per_file_metadata.end(), 0, [](auto& sum, auto& pfm) { return sum + pfm.get_total_rows(); }); } /** * @brief Number of columns in a ORC file. */ size_type calc_num_cols() const { if (not per_file_metadata.empty()) { return per_file_metadata[0].get_num_columns(); } return 0; } /** * @brief Sums up the number of stripes of each source */ size_type calc_num_stripes() const { return std::accumulate( per_file_metadata.begin(), per_file_metadata.end(), 0, [](auto& sum, auto& pfm) { return sum + pfm.get_num_stripes(); }); } aggregate_orc_metadata(std::vector<std::unique_ptr<datasource>> const& sources) : per_file_metadata(metadatas_from_sources(sources)), num_rows(calc_num_rows()), num_columns(calc_num_cols()), num_stripes(calc_num_stripes()) { // Verify that the input files have the same number of columns, // as well as matching types, compression, and names for (auto const& pfm : per_file_metadata) { CUDF_EXPECTS(per_file_metadata[0].get_num_columns() == pfm.get_num_columns(), "All sources must have the same number of columns"); CUDF_EXPECTS(per_file_metadata[0].ps.compression == pfm.ps.compression, "All sources must have the same compression type"); // Check the types, column names, and decimal scale for (size_t i = 0; i < pfm.ff.types.size(); i++) { CUDF_EXPECTS(pfm.ff.types[i].kind == per_file_metadata[0].ff.types[i].kind, "Column types across all input sources must be the same"); CUDF_EXPECTS(std::equal(pfm.ff.types[i].fieldNames.begin(), pfm.ff.types[i].fieldNames.end(), per_file_metadata[0].ff.types[i].fieldNames.begin()), "All source column names must be the same"); CUDF_EXPECTS( pfm.ff.types[i].scale.value_or(0) == per_file_metadata[0].ff.types[i].scale.value_or(0), "All scale values must be the same"); } } } auto const& get_schema(int schema_idx) const { return per_file_metadata[0].ff.types[schema_idx]; } auto get_col_type(int col_idx) const { return per_file_metadata[0].ff.types[col_idx]; } auto get_num_rows() const { return num_rows; } auto get_num_cols() const { return per_file_metadata[0].get_num_columns(); } auto get_num_stripes() const { return num_stripes; } auto get_num_source_files() const { return per_file_metadata.size(); } auto const& get_types() const { return per_file_metadata[0].ff.types; } int get_row_index_stride() const { return per_file_metadata[0].ff.rowIndexStride; } auto get_column_name(const int source_idx, const int column_idx) const { CUDF_EXPECTS(source_idx <= static_cast<int>(per_file_metadata.size()), "Out of range source_idx provided"); CUDF_EXPECTS(column_idx <= per_file_metadata[source_idx].get_num_columns(), "Out of range column_idx provided"); return per_file_metadata[source_idx].get_column_name(column_idx); } auto is_row_grp_idx_present() const { return row_grp_idx_present; } std::vector<cudf::io::orc::metadata::stripe_source_mapping> select_stripes( std::vector<std::vector<size_type>> const& user_specified_stripes, size_type& row_start, size_type& row_count) { std::vector<cudf::io::orc::metadata::stripe_source_mapping> selected_stripes_mapping; if (!user_specified_stripes.empty()) { CUDF_EXPECTS(user_specified_stripes.size() == get_num_source_files(), "Must specify stripes for each source"); // row_start is 0 if stripes are set. If this is not true anymore, then // row_start needs to be subtracted to get the correct row_count CUDF_EXPECTS(row_start == 0, "Start row index should be 0"); row_count = 0; // Each vector entry represents a source file; each nested vector represents the // user_defined_stripes to get from that source file for (size_t src_file_idx = 0; src_file_idx < user_specified_stripes.size(); ++src_file_idx) { std::vector<OrcStripeInfo> stripe_infos; // Coalesce stripe info at the source file later since that makes downstream processing much // easier in impl::read for (const size_t& stripe_idx : user_specified_stripes[src_file_idx]) { CUDF_EXPECTS(stripe_idx < per_file_metadata[src_file_idx].ff.stripes.size(), "Invalid stripe index"); stripe_infos.push_back( std::make_pair(&per_file_metadata[src_file_idx].ff.stripes[stripe_idx], nullptr)); row_count += per_file_metadata[src_file_idx].ff.stripes[stripe_idx].numberOfRows; } selected_stripes_mapping.push_back({static_cast<int>(src_file_idx), stripe_infos}); } } else { row_start = std::max(row_start, 0); if (row_count < 0) { row_count = static_cast<size_type>( std::min<int64_t>(get_num_rows(), std::numeric_limits<size_type>::max())); } row_count = std::min(row_count, get_num_rows() - row_start); CUDF_EXPECTS(row_count >= 0, "Invalid row count"); CUDF_EXPECTS(row_start <= get_num_rows(), "Invalid row start"); size_type count = 0; size_type stripe_skip_rows = 0; // Iterate all source files, each source file has corelating metadata for (size_t src_file_idx = 0; src_file_idx < per_file_metadata.size() && count < row_start + row_count; ++src_file_idx) { std::vector<OrcStripeInfo> stripe_infos; for (size_t stripe_idx = 0; stripe_idx < per_file_metadata[src_file_idx].ff.stripes.size() && count < row_start + row_count; ++stripe_idx) { count += per_file_metadata[src_file_idx].ff.stripes[stripe_idx].numberOfRows; if (count > row_start || count == 0) { stripe_infos.push_back( std::make_pair(&per_file_metadata[src_file_idx].ff.stripes[stripe_idx], nullptr)); } else { stripe_skip_rows = count; } } selected_stripes_mapping.push_back({static_cast<int>(src_file_idx), stripe_infos}); } // Need to remove skipped rows from the stripes which are not selected. row_start -= stripe_skip_rows; } // Read each stripe's stripefooter metadata if (not selected_stripes_mapping.empty()) { for (auto& mapping : selected_stripes_mapping) { // Resize to all stripe_info for the source level per_file_metadata[mapping.source_idx].stripefooters.resize(mapping.stripe_info.size()); for (size_t i = 0; i < mapping.stripe_info.size(); i++) { const auto stripe = mapping.stripe_info[i].first; const auto sf_comp_offset = stripe->offset + stripe->indexLength + stripe->dataLength; const auto sf_comp_length = stripe->footerLength; CUDF_EXPECTS( sf_comp_offset + sf_comp_length < per_file_metadata[mapping.source_idx].source->size(), "Invalid stripe information"); const auto buffer = per_file_metadata[mapping.source_idx].source->host_read(sf_comp_offset, sf_comp_length); size_t sf_length = 0; auto sf_data = per_file_metadata[mapping.source_idx].decompressor->Decompress( buffer->data(), sf_comp_length, &sf_length); ProtobufReader(sf_data, sf_length) .read(per_file_metadata[mapping.source_idx].stripefooters[i]); mapping.stripe_info[i].second = &per_file_metadata[mapping.source_idx].stripefooters[i]; if (stripe->indexLength == 0) { row_grp_idx_present = false; } } } } return selected_stripes_mapping; } /** * @brief Adds column as per the request and saves metadata about children. * Children of a column will be added to the next level. * * @param selection A vector that saves list of columns as per levels of nesting. * @param types A vector of schema types of columns. * @param level current level of nesting. * @param id current column id that needs to be added. * @param has_timestamp_column True if timestamp column present and false otherwise. * @param has_nested_column True if any of the selected column is a nested type. */ void add_column(std::vector<std::vector<orc_column_meta>>& selection, std::vector<SchemaType> const& types, const size_t level, const uint32_t id, bool& has_timestamp_column, bool& has_nested_column) { if (level == selection.size()) { selection.emplace_back(); } selection[level].push_back({id, 0}); const int col_id = selection[level].size() - 1; if (types[id].kind == orc::TIMESTAMP) { has_timestamp_column = true; } if (types[id].kind == orc::MAP or types[id].kind == orc::LIST or types[id].kind == orc::STRUCT) { has_nested_column = true; for (const auto child_id : types[id].subtypes) { // Since nested column needs to be processed before its child can be processed, // child column is being added to next level add_column(selection, types, level + 1, child_id, has_timestamp_column, has_nested_column); } selection[level][col_id].num_children = types[id].subtypes.size(); } } /** * @brief Filters and reduces down to a selection of columns * * @param use_names List of column names to select * @param has_timestamp_column True if timestamp column present and false otherwise * @param has_nested_column True if any of the selected column is a nested type. * * @return Vector of list of ORC column meta-data */ std::vector<std::vector<orc_column_meta>> select_columns( std::vector<std::string> const& use_names, bool& has_timestamp_column, bool& has_nested_column) { auto const& pfm = per_file_metadata[0]; std::vector<std::vector<orc_column_meta>> selection; if (not use_names.empty()) { uint32_t index = 0; // Have to check only parent columns auto const num_columns = pfm.ff.types[0].subtypes.size(); for (const auto& use_name : use_names) { bool name_found = false; for (uint32_t i = 0; i < num_columns; ++i, ++index) { if (index >= num_columns) { index = 0; } auto col_id = pfm.ff.types[0].subtypes[index]; if (pfm.get_column_name(col_id) == use_name) { name_found = true; add_column(selection, pfm.ff.types, 0, col_id, has_timestamp_column, has_nested_column); // Should start with next index index = i + 1; break; } } CUDF_EXPECTS(name_found, "Unknown column name : " + std::string(use_name)); } } else { for (auto const& col_id : pfm.ff.types[0].subtypes) { add_column(selection, pfm.ff.types, 0, col_id, has_timestamp_column, has_nested_column); } } return selection; } }; void snappy_decompress(device_span<gpu_inflate_input_s> comp_in, device_span<gpu_inflate_status_s> comp_stat, size_t max_uncomp_page_size, rmm::cuda_stream_view stream) { size_t num_blocks = comp_in.size(); size_t temp_size; auto status = nvcompBatchedSnappyDecompressGetTempSize(num_blocks, max_uncomp_page_size, &temp_size); CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status, "Unable to get scratch size for snappy decompression"); rmm::device_buffer scratch(temp_size, stream); rmm::device_uvector<void const*> compressed_data_ptrs(num_blocks, stream); rmm::device_uvector<size_t> compressed_data_sizes(num_blocks, stream); rmm::device_uvector<void*> uncompressed_data_ptrs(num_blocks, stream); rmm::device_uvector<size_t> uncompressed_data_sizes(num_blocks, stream); rmm::device_uvector<size_t> actual_uncompressed_data_sizes(num_blocks, stream); rmm::device_uvector<nvcompStatus_t> statuses(num_blocks, stream); // Prepare the vectors auto comp_it = thrust::make_zip_iterator(compressed_data_ptrs.begin(), compressed_data_sizes.begin(), uncompressed_data_ptrs.begin(), uncompressed_data_sizes.data()); thrust::transform(rmm::exec_policy(stream), comp_in.begin(), comp_in.end(), comp_it, [] __device__(gpu_inflate_input_s in) { return thrust::make_tuple(in.srcDevice, in.srcSize, in.dstDevice, in.dstSize); }); status = nvcompBatchedSnappyDecompressAsync(compressed_data_ptrs.data(), compressed_data_sizes.data(), uncompressed_data_sizes.data(), actual_uncompressed_data_sizes.data(), num_blocks, scratch.data(), scratch.size(), uncompressed_data_ptrs.data(), statuses.data(), stream.value()); CUDF_EXPECTS(nvcompStatus_t::nvcompSuccess == status, "unable to perform snappy decompression"); CUDF_EXPECTS(thrust::equal(rmm::exec_policy(stream), statuses.begin(), statuses.end(), thrust::make_constant_iterator(nvcompStatus_t::nvcompSuccess)), "Error during snappy decompression"); thrust::for_each_n( rmm::exec_policy(stream), thrust::make_counting_iterator(0), num_blocks, [=, actual_uncomp_sizes = actual_uncompressed_data_sizes.data()] __device__(auto i) { comp_stat[i].bytes_written = actual_uncomp_sizes[i]; comp_stat[i].status = 0; }); } rmm::device_buffer reader::impl::decompress_stripe_data( cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, const std::vector<rmm::device_buffer>& stripe_data, const OrcDecompressor* decompressor, std::vector<orc_stream_info>& stream_info, size_t num_stripes, cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups, size_t row_index_stride, bool use_base_stride, rmm::cuda_stream_view stream) { // Parse the columns' compressed info hostdevice_vector<gpu::CompressedStreamInfo> compinfo(0, stream_info.size(), stream); for (const auto& info : stream_info) { compinfo.insert(gpu::CompressedStreamInfo( static_cast<const uint8_t*>(stripe_data[info.stripe_idx].data()) + info.dst_pos, info.length)); } compinfo.host_to_device(stream); gpu::ParseCompressedStripeData(compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(), decompressor->GetLog2MaxCompressionRatio(), stream); compinfo.device_to_host(stream, true); // Count the exact number of compressed blocks size_t num_compressed_blocks = 0; size_t num_uncompressed_blocks = 0; size_t total_decomp_size = 0; for (size_t i = 0; i < compinfo.size(); ++i) { num_compressed_blocks += compinfo[i].num_compressed_blocks; num_uncompressed_blocks += compinfo[i].num_uncompressed_blocks; total_decomp_size += compinfo[i].max_uncompressed_size; } CUDF_EXPECTS(total_decomp_size > 0, "No decompressible data found"); rmm::device_buffer decomp_data(total_decomp_size, stream); rmm::device_uvector<gpu_inflate_input_s> inflate_in( num_compressed_blocks + num_uncompressed_blocks, stream); rmm::device_uvector<gpu_inflate_status_s> inflate_out(num_compressed_blocks, stream); // Parse again to populate the decompression input/output buffers size_t decomp_offset = 0; uint32_t max_uncomp_block_size = 0; uint32_t start_pos = 0; uint32_t start_pos_uncomp = (uint32_t)num_compressed_blocks; for (size_t i = 0; i < compinfo.size(); ++i) { auto dst_base = static_cast<uint8_t*>(decomp_data.data()); compinfo[i].uncompressed_data = dst_base + decomp_offset; compinfo[i].decctl = inflate_in.data() + start_pos; compinfo[i].decstatus = inflate_out.data() + start_pos; compinfo[i].copyctl = inflate_in.data() + start_pos_uncomp; stream_info[i].dst_pos = decomp_offset; decomp_offset += compinfo[i].max_uncompressed_size; start_pos += compinfo[i].num_compressed_blocks; start_pos_uncomp += compinfo[i].num_uncompressed_blocks; max_uncomp_block_size = std::max(max_uncomp_block_size, compinfo[i].max_uncompressed_block_size); } compinfo.host_to_device(stream); gpu::ParseCompressedStripeData(compinfo.device_ptr(), compinfo.size(), decompressor->GetBlockSize(), decompressor->GetLog2MaxCompressionRatio(), stream); // Dispatch batches of blocks to decompress if (num_compressed_blocks > 0) { auto env_use_nvcomp = std::getenv("LIBCUDF_USE_NVCOMP"); bool use_nvcomp = env_use_nvcomp != nullptr ? std::atoi(env_use_nvcomp) : 0; switch (decompressor->GetKind()) { case orc::ZLIB: CUDA_TRY( gpuinflate(inflate_in.data(), inflate_out.data(), num_compressed_blocks, 0, stream)); break; case orc::SNAPPY: if (use_nvcomp) { device_span<gpu_inflate_input_s> inflate_in_view{inflate_in.data(), num_compressed_blocks}; device_span<gpu_inflate_status_s> inflate_out_view{inflate_out.data(), num_compressed_blocks}; snappy_decompress(inflate_in_view, inflate_out_view, max_uncomp_block_size, stream); } else { CUDA_TRY( gpu_unsnap(inflate_in.data(), inflate_out.data(), num_compressed_blocks, stream)); } break; default: CUDF_EXPECTS(false, "Unexpected decompression dispatch"); break; } } if (num_uncompressed_blocks > 0) { CUDA_TRY(gpu_copy_uncompressed_blocks( inflate_in.data() + num_compressed_blocks, num_uncompressed_blocks, stream)); } gpu::PostDecompressionReassemble(compinfo.device_ptr(), compinfo.size(), stream); // Update the stream information with the updated uncompressed info // TBD: We could update the value from the information we already // have in stream_info[], but using the gpu results also updates // max_uncompressed_size to the actual uncompressed size, or zero if // decompression failed. compinfo.device_to_host(stream, true); const size_t num_columns = chunks.size().second; for (size_t i = 0; i < num_stripes; ++i) { for (size_t j = 0; j < num_columns; ++j) { auto& chunk = chunks[i][j]; for (int k = 0; k < gpu::CI_NUM_STREAMS; ++k) { if (chunk.strm_len[k] > 0 && chunk.strm_id[k] < compinfo.size()) { chunk.streams[k] = compinfo[chunk.strm_id[k]].uncompressed_data; chunk.strm_len[k] = compinfo[chunk.strm_id[k]].max_uncompressed_size; } } } } if (row_groups.size().first) { chunks.host_to_device(stream); row_groups.host_to_device(stream); gpu::ParseRowGroupIndex(row_groups.base_device_ptr(), compinfo.device_ptr(), chunks.base_device_ptr(), num_columns, num_stripes, row_groups.size().first, row_index_stride, use_base_stride, stream); } return decomp_data; } /** * @brief Updates null mask of columns whose parent is a struct column. * If struct column has null element, that row would be * skipped while writing child column in ORC, so we need to insert the missing null * elements in child column. * There is another behavior from pyspark, where if the child column doesn't have any null * elements, it will not have present stream, so in that case parent null mask need to be * copied to child column. * * @param chunks Vector of list of column chunk descriptors * @param out_buffers Output columns' device buffers * @param stream CUDA stream used for device memory operations and kernel launches. * @param mr Device memory resource to use for device memory allocation */ void update_null_mask(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, std::vector<column_buffer>& out_buffers, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { const auto num_stripes = chunks.size().first; const auto num_columns = chunks.size().second; bool is_mask_updated = false; for (size_t col_idx = 0; col_idx < num_columns; ++col_idx) { if (chunks[0][col_idx].parent_validity_info.valid_map_base != nullptr) { if (not is_mask_updated) { chunks.device_to_host(stream, true); is_mask_updated = true; } auto parent_valid_map_base = chunks[0][col_idx].parent_validity_info.valid_map_base; auto child_valid_map_base = out_buffers[col_idx].null_mask(); auto child_mask_len = chunks[0][col_idx].column_num_rows - chunks[0][col_idx].parent_validity_info.null_count; auto parent_mask_len = chunks[0][col_idx].column_num_rows; if (child_valid_map_base != nullptr) { rmm::device_uvector<uint32_t> dst_idx(child_mask_len, stream); // Copy indexes at which the parent has valid value. thrust::copy_if(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + parent_mask_len, dst_idx.begin(), [parent_valid_map_base] __device__(auto idx) { return bit_is_set(parent_valid_map_base, idx); }); auto merged_null_mask = cudf::detail::create_null_mask( parent_mask_len, mask_state::ALL_NULL, rmm::cuda_stream_view(stream), mr); auto merged_mask = static_cast<bitmask_type*>(merged_null_mask.data()); uint32_t* dst_idx_ptr = dst_idx.data(); // Copy child valid bits from child column to valid indexes, this will merge both child and // parent null masks thrust::for_each(rmm::exec_policy(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + dst_idx.size(), [child_valid_map_base, dst_idx_ptr, merged_mask] __device__(auto idx) { if (bit_is_set(child_valid_map_base, idx)) { cudf::set_bit(merged_mask, dst_idx_ptr[idx]); }; }); out_buffers[col_idx]._null_mask = std::move(merged_null_mask); } else { // Since child column doesn't have a mask, copy parent null mask auto mask_size = bitmask_allocation_size_bytes(parent_mask_len); out_buffers[col_idx]._null_mask = rmm::device_buffer(static_cast<void*>(parent_valid_map_base), mask_size, stream, mr); } } } thrust::counting_iterator<int, thrust::host_space_tag> col_idx_it(0); thrust::counting_iterator<int, thrust::host_space_tag> stripe_idx_it(0); if (is_mask_updated) { // Update chunks with pointers to column data which might have been changed. std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) { std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) { auto& chunk = chunks[stripe_idx][col_idx]; chunk.valid_map_base = out_buffers[col_idx].null_mask(); }); }); chunks.host_to_device(stream, true); } } /** * @brief Compute the per-stripe prefix sum of null count, for each struct column in the current * layer. */ void scan_null_counts(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> const& chunks, cudf::host_span<rmm::device_uvector<uint32_t>> prefix_sums, rmm::cuda_stream_view stream) { auto const num_stripes = chunks.size().first; if (num_stripes == 0) return; auto const num_columns = chunks.size().second; std::vector<thrust::pair<size_type, cudf::device_span<uint32_t>>> prefix_sums_to_update; for (auto col_idx = 0ul; col_idx < num_columns; ++col_idx) { // Null counts sums are only needed for children of struct columns if (chunks[0][col_idx].type_kind == STRUCT) { prefix_sums_to_update.emplace_back(col_idx, prefix_sums[col_idx]); } } auto const d_prefix_sums_to_update = cudf::detail::make_device_uvector_async(prefix_sums_to_update, stream); thrust::for_each(rmm::exec_policy(stream), d_prefix_sums_to_update.begin(), d_prefix_sums_to_update.end(), [chunks = cudf::detail::device_2dspan<gpu::ColumnDesc const>{chunks}] __device__( auto const& idx_psums) { auto const col_idx = idx_psums.first; auto const psums = idx_psums.second; thrust::transform( thrust::seq, thrust::make_counting_iterator(0), thrust::make_counting_iterator(0) + psums.size(), psums.begin(), [&](auto stripe_idx) { return chunks[stripe_idx][col_idx].null_count; }); thrust::inclusive_scan(thrust::seq, psums.begin(), psums.end(), psums.begin()); }); // `prefix_sums_to_update` goes out of scope, copy has to be done before we return stream.synchronize(); } void reader::impl::decode_stream_data(cudf::detail::hostdevice_2dvector<gpu::ColumnDesc>& chunks, size_t num_dicts, size_t skip_rows, timezone_table_view tz_table, cudf::detail::hostdevice_2dvector<gpu::RowGroup>& row_groups, size_t row_index_stride, std::vector<column_buffer>& out_buffers, size_t level, rmm::cuda_stream_view stream) { const auto num_stripes = chunks.size().first; const auto num_columns = chunks.size().second; thrust::counting_iterator<int, thrust::host_space_tag> col_idx_it(0); thrust::counting_iterator<int, thrust::host_space_tag> stripe_idx_it(0); // Update chunks with pointers to column data std::for_each(stripe_idx_it, stripe_idx_it + num_stripes, [&](auto stripe_idx) { std::for_each(col_idx_it, col_idx_it + num_columns, [&](auto col_idx) { auto& chunk = chunks[stripe_idx][col_idx]; chunk.column_data_base = out_buffers[col_idx].data(); chunk.valid_map_base = out_buffers[col_idx].null_mask(); }); }); // Allocate global dictionary for deserializing rmm::device_uvector<gpu::DictionaryEntry> global_dict(num_dicts, stream); chunks.host_to_device(stream, true); gpu::DecodeNullsAndStringDictionaries( chunks.base_device_ptr(), global_dict.data(), num_columns, num_stripes, skip_rows, stream); if (level > 0) { // Update nullmasks for children if parent was a struct and had null mask update_null_mask(chunks, out_buffers, stream, _mr); } // Update the null map for child columns gpu::DecodeOrcColumnData(chunks.base_device_ptr(), global_dict.data(), row_groups, num_columns, num_stripes, skip_rows, tz_table, row_groups.size().first, row_index_stride, level, stream); chunks.device_to_host(stream, true); std::for_each(col_idx_it + 0, col_idx_it + num_columns, [&](auto col_idx) { out_buffers[col_idx].null_count() = std::accumulate(stripe_idx_it + 0, stripe_idx_it + num_stripes, 0, [&](auto null_count, auto const stripe_idx) { return null_count + chunks[stripe_idx][col_idx].null_count; }); }); } // Aggregate child column metadata per stripe and per column void reader::impl::aggregate_child_meta(cudf::detail::host_2dspan<gpu::ColumnDesc> chunks, cudf::detail::host_2dspan<gpu::RowGroup> row_groups, std::vector<column_buffer>& out_buffers, std::vector<orc_column_meta> const& list_col, const int32_t level) { const auto num_of_stripes = chunks.size().first; const auto num_of_rowgroups = row_groups.size().first; const auto num_parent_cols = _selected_columns[level].size(); const auto num_child_cols = _selected_columns[level + 1].size(); const auto number_of_child_chunks = num_child_cols * num_of_stripes; auto& num_child_rows = _col_meta.num_child_rows; auto& parent_column_data = _col_meta.parent_column_data; // Reset the meta to store child column details. num_child_rows.resize(_selected_columns[level + 1].size()); std::fill(num_child_rows.begin(), num_child_rows.end(), 0); parent_column_data.resize(number_of_child_chunks); _col_meta.parent_column_index.resize(number_of_child_chunks); _col_meta.child_start_row.resize(number_of_child_chunks); _col_meta.num_child_rows_per_stripe.resize(number_of_child_chunks); _col_meta.rwgrp_meta.resize(num_of_rowgroups * num_child_cols); auto child_start_row = cudf::detail::host_2dspan<uint32_t>( _col_meta.child_start_row.data(), num_of_stripes, num_child_cols); auto num_child_rows_per_stripe = cudf::detail::host_2dspan<uint32_t>( _col_meta.num_child_rows_per_stripe.data(), num_of_stripes, num_child_cols); auto rwgrp_meta = cudf::detail::host_2dspan<reader_column_meta::row_group_meta>( _col_meta.rwgrp_meta.data(), num_of_rowgroups, num_child_cols); int index = 0; // number of child column processed // For each parent column, update its child column meta for each stripe. std::for_each(list_col.cbegin(), list_col.cend(), [&](const auto p_col) { const auto parent_col_idx = _col_meta.orc_col_map[level][p_col.id]; auto start_row = 0; auto processed_row_groups = 0; for (size_t stripe_id = 0; stripe_id < num_of_stripes; stripe_id++) { // Aggregate num_rows and start_row from processed parent columns per row groups if (num_of_rowgroups) { auto stripe_num_row_groups = chunks[stripe_id][parent_col_idx].num_rowgroups; auto processed_child_rows = 0; for (size_t rowgroup_id = 0; rowgroup_id < stripe_num_row_groups; rowgroup_id++, processed_row_groups++) { const auto child_rows = row_groups[processed_row_groups][parent_col_idx].num_child_rows; for (uint32_t id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; rwgrp_meta[processed_row_groups][child_col_idx].start_row = processed_child_rows; rwgrp_meta[processed_row_groups][child_col_idx].num_rows = child_rows; } processed_child_rows += child_rows; } } // Aggregate start row, number of rows per chunk and total number of rows in a column const auto child_rows = chunks[stripe_id][parent_col_idx].num_child_rows; for (uint32_t id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; num_child_rows[child_col_idx] += child_rows; num_child_rows_per_stripe[stripe_id][child_col_idx] = child_rows; // start row could be different for each column when there is nesting at each stripe level child_start_row[stripe_id][child_col_idx] = (stripe_id == 0) ? 0 : start_row; } start_row += child_rows; } // Parent column null mask and null count would be required for child column // to adjust its nullmask. auto type = out_buffers[parent_col_idx].type.id(); auto parent_null_count = static_cast<uint32_t>(out_buffers[parent_col_idx].null_count()); auto parent_valid_map = out_buffers[parent_col_idx].null_mask(); auto num_rows = out_buffers[parent_col_idx].size; for (uint32_t id = 0; id < p_col.num_children; id++) { const auto child_col_idx = index + id; _col_meta.parent_column_index[child_col_idx] = parent_col_idx; if (type == type_id::STRUCT) { parent_column_data[child_col_idx] = {parent_valid_map, parent_null_count}; // Number of rows in child will remain same as parent in case of struct column num_child_rows[child_col_idx] = num_rows; } else { parent_column_data[child_col_idx] = {nullptr, 0}; } } index += p_col.num_children; }); } std::string get_map_child_col_name(size_t const idx) { return (idx == 0) ? "key" : "value"; } std::unique_ptr<column> reader::impl::create_empty_column(const int32_t orc_col_id, column_name_info& schema_info, rmm::cuda_stream_view stream) { schema_info.name = _metadata->get_column_name(0, orc_col_id); // If the column type is orc::DECIMAL see if the user // desires it to be converted to float64 or not auto const decimal_as_float64 = should_convert_decimal_column_to_float( _decimal_cols_as_float, _metadata->per_file_metadata[0], orc_col_id); auto const type = to_type_id( _metadata->get_schema(orc_col_id), _use_np_dtypes, _timestamp_type.id(), decimal_as_float64); int32_t scale = 0; std::vector<std::unique_ptr<column>> child_columns; std::unique_ptr<column> out_col = nullptr; auto kind = _metadata->get_col_type(orc_col_id).kind; switch (kind) { case orc::LIST: schema_info.children.emplace_back("offsets"); schema_info.children.emplace_back(""); out_col = make_lists_column( 0, make_empty_column(data_type(type_id::INT32)), create_empty_column( _metadata->get_col_type(orc_col_id).subtypes[0], schema_info.children.back(), stream), 0, rmm::device_buffer{0, stream}, stream); break; case orc::MAP: { schema_info.children.emplace_back("offsets"); schema_info.children.emplace_back("struct"); const auto child_column_ids = _metadata->get_col_type(orc_col_id).subtypes; for (size_t idx = 0; idx < _metadata->get_col_type(orc_col_id).subtypes.size(); idx++) { auto& children_schema = schema_info.children.back().children; children_schema.emplace_back(""); child_columns.push_back(create_empty_column( child_column_ids[idx], schema_info.children.back().children.back(), stream)); auto name = get_map_child_col_name(idx); children_schema[idx].name = name; } auto struct_col = make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream); out_col = make_lists_column(0, make_empty_column(data_type(type_id::INT32)), std::move(struct_col), 0, rmm::device_buffer{0, stream}, stream); } break; case orc::STRUCT: for (const auto col : _metadata->get_col_type(orc_col_id).subtypes) { schema_info.children.emplace_back(""); child_columns.push_back(create_empty_column(col, schema_info.children.back(), stream)); } out_col = make_structs_column(0, std::move(child_columns), 0, rmm::device_buffer{0, stream}, stream); break; case orc::DECIMAL: if (type == type_id::DECIMAL64) { scale = -static_cast<int32_t>(_metadata->get_types()[orc_col_id].scale.value_or(0)); } out_col = make_empty_column(data_type(type, scale)); break; default: out_col = make_empty_column(data_type(type)); } return out_col; } // Adds child column buffers to parent column column_buffer&& reader::impl::assemble_buffer(const int32_t orc_col_id, std::vector<std::vector<column_buffer>>& col_buffers, const size_t level, rmm::cuda_stream_view stream) { auto const col_id = _col_meta.orc_col_map[level][orc_col_id]; auto& col_buffer = col_buffers[level][col_id]; col_buffer.name = _metadata->get_column_name(0, orc_col_id); auto kind = _metadata->get_col_type(orc_col_id).kind; switch (kind) { case orc::LIST: case orc::STRUCT: for (auto const& col : _metadata->get_col_type(orc_col_id).subtypes) { col_buffer.children.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream)); } break; case orc::MAP: { std::vector<column_buffer> child_col_buffers; // Get child buffers for (size_t idx = 0; idx < _metadata->get_col_type(orc_col_id).subtypes.size(); idx++) { auto name = get_map_child_col_name(idx); auto col = _metadata->get_col_type(orc_col_id).subtypes[idx]; child_col_buffers.emplace_back(assemble_buffer(col, col_buffers, level + 1, stream)); child_col_buffers.back().name = name; } // Create a struct buffer auto num_rows = child_col_buffers[0].size; auto struct_buffer = column_buffer(cudf::data_type(type_id::STRUCT), num_rows, false, stream, _mr); struct_buffer.children = std::move(child_col_buffers); struct_buffer.name = "struct"; col_buffer.children.emplace_back(std::move(struct_buffer)); } break; default: break; } return std::move(col_buffer); } // creates columns along with schema information for each column void reader::impl::create_columns(std::vector<std::vector<column_buffer>>&& col_buffers, std::vector<std::unique_ptr<column>>& out_columns, std::vector<column_name_info>& schema_info, rmm::cuda_stream_view stream) { std::transform(_selected_columns[0].begin(), _selected_columns[0].end(), std::back_inserter(out_columns), [&](auto const col_meta) { schema_info.emplace_back(""); auto col_buffer = assemble_buffer(col_meta.id, col_buffers, 0, stream); return make_column(col_buffer, &schema_info.back(), stream, _mr); }); } reader::impl::impl(std::vector<std::unique_ptr<datasource>>&& sources, orc_reader_options const& options, rmm::mr::device_memory_resource* mr) : _mr(mr), _sources(std::move(sources)) { // Open and parse the source(s) dataset metadata _metadata = std::make_unique<aggregate_orc_metadata>(_sources); // Select only columns required by the options _selected_columns = _metadata->select_columns(options.get_columns(), _has_timestamp_column, _has_nested_column); // Override output timestamp resolution if requested if (options.get_timestamp_type().id() != type_id::EMPTY) { _timestamp_type = options.get_timestamp_type(); } // Enable or disable attempt to use row index for parsing _use_index = options.is_enabled_use_index(); // Enable or disable the conversion to numpy-compatible dtypes _use_np_dtypes = options.is_enabled_use_np_dtypes(); // Control decimals conversion (float64 or int64 with optional scale) _decimal_cols_as_float = options.get_decimal_cols_as_float(); } table_with_metadata reader::impl::read(size_type skip_rows, size_type num_rows, const std::vector<std::vector<size_type>>& stripes, rmm::cuda_stream_view stream) { CUDF_EXPECTS(skip_rows == 0 or (not _has_nested_column), "skip_rows is not supported by nested columns"); std::vector<std::unique_ptr<column>> out_columns; // buffer and stripe data are stored as per nesting level std::vector<std::vector<column_buffer>> out_buffers(_selected_columns.size()); std::vector<column_name_info> schema_info; std::vector<std::vector<rmm::device_buffer>> lvl_stripe_data(_selected_columns.size()); std::vector<std::vector<rmm::device_uvector<uint32_t>>> null_count_prefix_sums; table_metadata out_metadata; // There are no columns in the table if (_selected_columns.size() == 0) return {std::make_unique<table>(), std::move(out_metadata)}; // Select only stripes required (aka row groups) const auto selected_stripes = _metadata->select_stripes(stripes, skip_rows, num_rows); // Iterates through levels of nested columns, child column will be one level down // compared to parent column. for (size_t level = 0; level < _selected_columns.size(); level++) { auto& selected_columns = _selected_columns[level]; // Association between each ORC column and its cudf::column _col_meta.orc_col_map.emplace_back(_metadata->get_num_cols(), -1); std::vector<orc_column_meta> nested_col; bool is_data_empty = false; // Get a list of column data types std::vector<data_type> column_types; for (auto& col : selected_columns) { // If the column type is orc::DECIMAL see if the user // desires it to be converted to float64 or not auto const decimal_as_float64 = should_convert_decimal_column_to_float( _decimal_cols_as_float, _metadata->per_file_metadata[0], col.id); auto col_type = to_type_id( _metadata->get_col_type(col.id), _use_np_dtypes, _timestamp_type.id(), decimal_as_float64); CUDF_EXPECTS(col_type != type_id::EMPTY, "Unknown type"); // Remove this once we support Decimal128 data type CUDF_EXPECTS( (col_type != type_id::DECIMAL64) or (_metadata->get_col_type(col.id).precision <= 18), "Decimal data has precision > 18, Decimal64 data type doesn't support it."); if (col_type == type_id::DECIMAL64) { // sign of the scale is changed since cuDF follows c++ libraries like CNL // which uses negative scaling, but liborc and other libraries // follow positive scaling. auto const scale = -static_cast<int32_t>(_metadata->get_col_type(col.id).scale.value_or(0)); column_types.emplace_back(col_type, scale); } else { column_types.emplace_back(col_type); } // Map each ORC column to its column _col_meta.orc_col_map[level][col.id] = column_types.size() - 1; // TODO: Once MAP type is supported in cuDF, update this for MAP as well if (col_type == type_id::LIST or col_type == type_id::STRUCT) nested_col.emplace_back(col); } // If no rows or stripes to read, return empty columns if (num_rows <= 0 || selected_stripes.empty()) { std::transform(_selected_columns[0].begin(), _selected_columns[0].end(), std::back_inserter(out_columns), [&](auto const col_meta) { schema_info.emplace_back(""); return create_empty_column(col_meta.id, schema_info.back(), stream); }); break; } else { // Get the total number of stripes across all input files. size_t total_num_stripes = std::accumulate(selected_stripes.begin(), selected_stripes.end(), 0, [](size_t sum, auto& stripe_source_mapping) { return sum + stripe_source_mapping.stripe_info.size(); }); const auto num_columns = selected_columns.size(); cudf::detail::hostdevice_2dvector<gpu::ColumnDesc> chunks( total_num_stripes, num_columns, stream); memset(chunks.base_host_ptr(), 0, chunks.memory_size()); const bool use_index = (_use_index == true) && // Do stripes have row group index _metadata->is_row_grp_idx_present() && // Only use if we don't have much work with complete columns & stripes // TODO: Consider nrows, gpu, and tune the threshold (num_rows > _metadata->get_row_index_stride() && !(_metadata->get_row_index_stride() & 7) && _metadata->get_row_index_stride() > 0 && num_columns * total_num_stripes < 8 * 128) && // Only use if first row is aligned to a stripe boundary // TODO: Fix logic to handle unaligned rows (skip_rows == 0); // Logically view streams as columns std::vector<orc_stream_info> stream_info; null_count_prefix_sums.emplace_back(); null_count_prefix_sums.back().reserve(_selected_columns[level].size()); std::generate_n( std::back_inserter(null_count_prefix_sums.back()), _selected_columns[level].size(), [&]() { return cudf::detail::make_zeroed_device_uvector_async<uint32_t>(total_num_stripes, stream); }); // Tracker for eventually deallocating compressed and uncompressed data auto& stripe_data = lvl_stripe_data[level]; size_t stripe_start_row = 0; size_t num_dict_entries = 0; size_t num_rowgroups = 0; int stripe_idx = 0; std::vector<std::pair<std::future<size_t>, size_t>> read_tasks; for (auto const& stripe_source_mapping : selected_stripes) { // Iterate through the source files selected stripes for (auto const& stripe : stripe_source_mapping.stripe_info) { const auto stripe_info = stripe.first; const auto stripe_footer = stripe.second; auto stream_count = stream_info.size(); const auto total_data_size = gather_stream_info(stripe_idx, stripe_info, stripe_footer, _col_meta.orc_col_map[level], _metadata->get_types(), use_index, &num_dict_entries, chunks, stream_info, level == 0); if (total_data_size == 0) { CUDF_EXPECTS(stripe_info->indexLength == 0, "Invalid index rowgroup stream data"); // In case ROW GROUP INDEX is not present and all columns are structs with no null // stream, there is nothing to read at this level. auto fn_check_dtype = [](auto dtype) { return dtype.id() == type_id::STRUCT; }; CUDF_EXPECTS(std::all_of(column_types.begin(), column_types.end(), fn_check_dtype), "Expected streams data within stripe"); is_data_empty = true; } stripe_data.emplace_back(total_data_size, stream); auto dst_base = static_cast<uint8_t*>(stripe_data.back().data()); // Coalesce consecutive streams into one read while (not is_data_empty and stream_count < stream_info.size()) { const auto d_dst = dst_base + stream_info[stream_count].dst_pos; const auto offset = stream_info[stream_count].offset; auto len = stream_info[stream_count].length; stream_count++; while (stream_count < stream_info.size() && stream_info[stream_count].offset == offset + len) { len += stream_info[stream_count].length; stream_count++; } if (_metadata->per_file_metadata[stripe_source_mapping.source_idx] .source->is_device_read_preferred(len)) { read_tasks.push_back( std::make_pair(_metadata->per_file_metadata[stripe_source_mapping.source_idx] .source->device_read_async(offset, len, d_dst, stream), len)); } else { const auto buffer = _metadata->per_file_metadata[stripe_source_mapping.source_idx].source->host_read( offset, len); CUDF_EXPECTS(buffer->size() == len, "Unexpected discrepancy in bytes read."); CUDA_TRY(cudaMemcpyAsync( d_dst, buffer->data(), len, cudaMemcpyHostToDevice, stream.value())); stream.synchronize(); } } const auto num_rows_per_stripe = stripe_info->numberOfRows; const auto rowgroup_id = num_rowgroups; auto stripe_num_rowgroups = 0; if (use_index) { stripe_num_rowgroups = (num_rows_per_stripe + _metadata->get_row_index_stride() - 1) / _metadata->get_row_index_stride(); } // Update chunks to reference streams pointers for (size_t col_idx = 0; col_idx < num_columns; col_idx++) { auto& chunk = chunks[stripe_idx][col_idx]; // start row, number of rows in a each stripe and total number of rows // may change in lower levels of nesting chunk.start_row = (level == 0) ? stripe_start_row : _col_meta.child_start_row[stripe_idx * num_columns + col_idx]; chunk.num_rows = (level == 0) ? stripe_info->numberOfRows : _col_meta.num_child_rows_per_stripe[stripe_idx * num_columns + col_idx]; chunk.column_num_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[col_idx]; chunk.parent_validity_info = (level == 0) ? column_validity_info{} : _col_meta.parent_column_data[col_idx]; chunk.parent_null_count_prefix_sums = (level == 0) ? nullptr : null_count_prefix_sums[level - 1][_col_meta.parent_column_index[col_idx]].data(); chunk.encoding_kind = stripe_footer->columns[selected_columns[col_idx].id].kind; chunk.type_kind = _metadata->per_file_metadata[stripe_source_mapping.source_idx] .ff.types[selected_columns[col_idx].id] .kind; // num_child_rows for a struct column will be same, for other nested types it will be // calculated. chunk.num_child_rows = (chunk.type_kind != orc::STRUCT) ? 0 : chunk.num_rows; auto const decimal_as_float64 = should_convert_decimal_column_to_float(_decimal_cols_as_float, _metadata->per_file_metadata[0], selected_columns[col_idx].id); chunk.decimal_scale = _metadata->per_file_metadata[stripe_source_mapping.source_idx] .ff.types[selected_columns[col_idx].id] .scale.value_or(0) | (decimal_as_float64 ? orc::gpu::orc_decimal2float64_scale : 0); chunk.rowgroup_id = rowgroup_id; chunk.dtype_len = (column_types[col_idx].id() == type_id::STRING) ? sizeof(string_index_pair) : ((column_types[col_idx].id() == type_id::LIST) or (column_types[col_idx].id() == type_id::STRUCT)) ? sizeof(int32_t) : cudf::size_of(column_types[col_idx]); chunk.num_rowgroups = stripe_num_rowgroups; if (chunk.type_kind == orc::TIMESTAMP) { chunk.ts_clock_rate = to_clockrate(_timestamp_type.id()); } if (not is_data_empty) { for (int k = 0; k < gpu::CI_NUM_STREAMS; k++) { chunk.streams[k] = dst_base + stream_info[chunk.strm_id[k]].dst_pos; } } } stripe_start_row += num_rows_per_stripe; num_rowgroups += stripe_num_rowgroups; stripe_idx++; } } for (auto& task : read_tasks) { CUDF_EXPECTS(task.first.get() == task.second, "Unexpected discrepancy in bytes read."); } // Process dataset chunk pages into output columns if (stripe_data.size() != 0) { auto row_groups = cudf::detail::hostdevice_2dvector<gpu::RowGroup>(num_rowgroups, num_columns, stream); if (level > 0 and row_groups.size().first) { cudf::host_span<gpu::RowGroup> row_groups_span(row_groups.base_host_ptr(), num_rowgroups * num_columns); auto& rw_grp_meta = _col_meta.rwgrp_meta; // Update start row and num rows per row group std::transform(rw_grp_meta.begin(), rw_grp_meta.end(), row_groups_span.begin(), rw_grp_meta.begin(), [&](auto meta, auto& row_grp) { row_grp.num_rows = meta.num_rows; row_grp.start_row = meta.start_row; return meta; }); } // Setup row group descriptors if using indexes if (_metadata->per_file_metadata[0].ps.compression != orc::NONE and not is_data_empty) { auto decomp_data = decompress_stripe_data(chunks, stripe_data, _metadata->per_file_metadata[0].decompressor.get(), stream_info, total_num_stripes, row_groups, _metadata->get_row_index_stride(), level == 0, stream); stripe_data.clear(); stripe_data.push_back(std::move(decomp_data)); } else { if (row_groups.size().first) { chunks.host_to_device(stream); row_groups.host_to_device(stream); gpu::ParseRowGroupIndex(row_groups.base_device_ptr(), nullptr, chunks.base_device_ptr(), num_columns, total_num_stripes, num_rowgroups, _metadata->get_row_index_stride(), level == 0, stream); } } // Setup table for converting timestamp columns from local to UTC time auto const tz_table = _has_timestamp_column ? build_timezone_transition_table( selected_stripes[0].stripe_info[0].second->writerTimezone, stream) : timezone_table{}; for (size_t i = 0; i < column_types.size(); ++i) { bool is_nullable = false; for (size_t j = 0; j < total_num_stripes; ++j) { if (chunks[j][i].strm_len[gpu::CI_PRESENT] != 0) { is_nullable = true; break; } } auto is_list_type = (column_types[i].id() == type_id::LIST); auto n_rows = (level == 0) ? num_rows : _col_meta.num_child_rows[i]; // For list column, offset column will be always size + 1 if (is_list_type) n_rows++; out_buffers[level].emplace_back(column_types[i], n_rows, is_nullable, stream, _mr); } if (not is_data_empty) { decode_stream_data(chunks, num_dict_entries, skip_rows, tz_table.view(), row_groups, _metadata->get_row_index_stride(), out_buffers[level], level, stream); } // Extract information to process nested child columns if (nested_col.size()) { if (not is_data_empty) { scan_null_counts(chunks, null_count_prefix_sums[level], stream); } row_groups.device_to_host(stream, true); aggregate_child_meta(chunks, row_groups, out_buffers[level], nested_col, level); } // ORC stores number of elements at each row, so we need to generate offsets from that if (nested_col.size()) { std::vector<list_buffer_data> buff_data; std::for_each( out_buffers[level].begin(), out_buffers[level].end(), [&buff_data](auto& out_buffer) { if (out_buffer.type.id() == type_id::LIST) { auto data = static_cast<size_type*>(out_buffer.data()); buff_data.emplace_back(list_buffer_data{data, out_buffer.size}); } }); if (buff_data.size()) { auto const dev_buff_data = cudf::detail::make_device_uvector_async(buff_data, stream); generate_offsets_for_list(dev_buff_data, stream); } } } } } // If out_columns is empty, then create columns from buffer. if (out_columns.empty()) { create_columns(std::move(out_buffers), out_columns, schema_info, stream); } // Return column names (must match order of returned columns) out_metadata.column_names.reserve(schema_info.size()); std::transform(schema_info.cbegin(), schema_info.cend(), std::back_inserter(out_metadata.column_names), [](auto info) { return info.name; }); out_metadata.schema_info = std::move(schema_info); for (const auto& meta : _metadata->per_file_metadata) { for (const auto& kv : meta.ff.metadata) { out_metadata.user_data.insert({kv.name, kv.value}); } } return {std::make_unique<table>(std::move(out_columns)), std::move(out_metadata)}; } // Forward to implementation reader::reader(std::vector<std::unique_ptr<cudf::io::datasource>>&& sources, orc_reader_options const& options, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { _impl = std::make_unique<impl>(std::move(sources), options, mr); } // Destructor within this translation unit reader::~reader() = default; // Forward to implementation table_with_metadata reader::read(orc_reader_options const& options, rmm::cuda_stream_view stream) { return _impl->read( options.get_skip_rows(), options.get_num_rows(), options.get_stripes(), stream); } } // namespace orc } // namespace detail } // namespace io } // namespace cudf
c5df48e285dd8873e0c32cfa85eea428c4cdb1e5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_4_right; int xdim0_update_halo_kernel5_plus_4_right_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_4_right; int ydim0_update_halo_kernel5_plus_4_right_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_4_right; int xdim1_update_halo_kernel5_plus_4_right_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_4_right; int ydim1_update_halo_kernel5_plus_4_right_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel5_plus_4_right * (y) + \ xdim0_update_halo_kernel5_plus_4_right * \ ydim0_update_halo_kernel5_plus_4_right * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel5_plus_4_right * (y) + \ xdim1_update_halo_kernel5_plus_4_right * \ ydim1_update_halo_kernel5_plus_4_right * (z)) // user function __device__ inline void update_halo_kernel5_plus_4_right(double *vol_flux_z, double *mass_flux_z, const int *fields) { if (fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(-4, 0, 0)]); if (fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(-4, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_4_right(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right + idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right * ydim0_update_halo_kernel5_plus_4_right; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right + idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right * ydim1_update_halo_kernel5_plus_4_right; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_4_right(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 134)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(134, "update_halo_kernel5_plus_4_right"); OPS_kernels[134].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel5_plus_4_right_h || ydim0 != ydim0_update_halo_kernel5_plus_4_right_h || xdim1 != xdim1_update_halo_kernel5_plus_4_right_h || ydim1 != ydim1_update_halo_kernel5_plus_4_right_h) { hipMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_right, &xdim0, sizeof(int)); xdim0_update_halo_kernel5_plus_4_right_h = xdim0; hipMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_right, &ydim0, sizeof(int)); ydim0_update_halo_kernel5_plus_4_right_h = ydim0; hipMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_right, &xdim1, sizeof(int)); xdim1_update_halo_kernel5_plus_4_right_h = xdim1; hipMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_right, &ydim1, sizeof(int)); ydim1_update_halo_kernel5_plus_4_right_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[134].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel5_plus_4_right), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[134].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[134].mpi_time += t2 - t1; OPS_kernels[134].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[134].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
c5df48e285dd8873e0c32cfa85eea428c4cdb1e5.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel5_plus_4_right; int xdim0_update_halo_kernel5_plus_4_right_h = -1; __constant__ int ydim0_update_halo_kernel5_plus_4_right; int ydim0_update_halo_kernel5_plus_4_right_h = -1; __constant__ int xdim1_update_halo_kernel5_plus_4_right; int xdim1_update_halo_kernel5_plus_4_right_h = -1; __constant__ int ydim1_update_halo_kernel5_plus_4_right; int ydim1_update_halo_kernel5_plus_4_right_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #define OPS_ACC0(x, y, z) \ (x + xdim0_update_halo_kernel5_plus_4_right * (y) + \ xdim0_update_halo_kernel5_plus_4_right * \ ydim0_update_halo_kernel5_plus_4_right * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_update_halo_kernel5_plus_4_right * (y) + \ xdim1_update_halo_kernel5_plus_4_right * \ ydim1_update_halo_kernel5_plus_4_right * (z)) // user function __device__ inline void update_halo_kernel5_plus_4_right(double *vol_flux_z, double *mass_flux_z, const int *fields) { if (fields[FIELD_VOL_FLUX_Z] == 1) vol_flux_z[OPS_ACC0(0, 0, 0)] = (vol_flux_z[OPS_ACC0(-4, 0, 0)]); if (fields[FIELD_MASS_FLUX_Z] == 1) mass_flux_z[OPS_ACC1(0, 0, 0)] = (mass_flux_z[OPS_ACC1(-4, 0, 0)]); } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel5_plus_4_right(double *__restrict arg0, double *__restrict arg1, const int *__restrict arg2, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right + idx_z * 1 * 1 * xdim0_update_halo_kernel5_plus_4_right * ydim0_update_halo_kernel5_plus_4_right; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right + idx_z * 1 * 1 * xdim1_update_halo_kernel5_plus_4_right * ydim1_update_halo_kernel5_plus_4_right; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel5_plus_4_right(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel5_plus_4_right(char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { // Timing double t1, t2, c1, c2; ops_arg args[3] = {arg0, arg1, arg2}; #ifdef CHECKPOINTING if (!ops_checkpointing_before(args, 3, range, 134)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(134, "update_halo_kernel5_plus_4_right"); OPS_kernels[134].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; if (xdim0 != xdim0_update_halo_kernel5_plus_4_right_h || ydim0 != ydim0_update_halo_kernel5_plus_4_right_h || xdim1 != xdim1_update_halo_kernel5_plus_4_right_h || ydim1 != ydim1_update_halo_kernel5_plus_4_right_h) { cudaMemcpyToSymbol(xdim0_update_halo_kernel5_plus_4_right, &xdim0, sizeof(int)); xdim0_update_halo_kernel5_plus_4_right_h = xdim0; cudaMemcpyToSymbol(ydim0_update_halo_kernel5_plus_4_right, &ydim0, sizeof(int)); ydim0_update_halo_kernel5_plus_4_right_h = ydim0; cudaMemcpyToSymbol(xdim1_update_halo_kernel5_plus_4_right, &xdim1, sizeof(int)); xdim1_update_halo_kernel5_plus_4_right_h = xdim1; cudaMemcpyToSymbol(ydim1_update_halo_kernel5_plus_4_right, &ydim1, sizeof(int)); ydim1_update_halo_kernel5_plus_4_right_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d = 0; d < NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS * sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; // set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args, 3, range); if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[134].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_update_halo_kernel5_plus_4_right<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d, x_size, y_size, z_size); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[134].time += t1 - t2; } ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[1], range); if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[134].mpi_time += t2 - t1; OPS_kernels[134].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[134].transfer += ops_compute_transfer(dim, start, end, &arg1); } }
f6561e999ba1c479a9d5b89a1868828d0bd1100a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void macro_vars(double *f, double *rho, double *u, mystruct *param ) { int ex[Q] = { 0, 1, 0, -1, 0, 1, -1, -1, 1}; int ey[Q] = { 0, 0, 1, 0, -1, 1, 1, -1, -1}; int idx=threadIdx.x + blockIdx.x * blockDim.x, a; if(idx<param->N) { //MACROSCOPIC VARIABLES rho[idx]=0.; if(param->comp==0) { u[idx*DIM+0]=0.; u[idx*DIM+1]=0.; for(a=0; a<Q; a++) { rho[idx] += f[idx*Q + a]; u[idx*DIM+0]+= f[idx*Q+a]*ex[a]; u[idx*DIM+1]+= f[idx*Q+a]*ey[a]; } u[idx*DIM+0] = u[idx*DIM+0]/rho[idx]; u[idx*DIM+1] = u[idx*DIM+1]/rho[idx]; } else { for(a=0; a<5; a++) rho[idx] += f[idx*5 + a]; } } }
f6561e999ba1c479a9d5b89a1868828d0bd1100a.cu
__global__ void macro_vars(double *f, double *rho, double *u, mystruct *param ) { int ex[Q] = { 0, 1, 0, -1, 0, 1, -1, -1, 1}; int ey[Q] = { 0, 0, 1, 0, -1, 1, 1, -1, -1}; int idx=threadIdx.x + blockIdx.x * blockDim.x, a; if(idx<param->N) { //MACROSCOPIC VARIABLES rho[idx]=0.; if(param->comp==0) { u[idx*DIM+0]=0.; u[idx*DIM+1]=0.; for(a=0; a<Q; a++) { rho[idx] += f[idx*Q + a]; u[idx*DIM+0]+= f[idx*Q+a]*ex[a]; u[idx*DIM+1]+= f[idx*Q+a]*ey[a]; } u[idx*DIM+0] = u[idx*DIM+0]/rho[idx]; u[idx*DIM+1] = u[idx*DIM+1]/rho[idx]; } else { for(a=0; a<5; a++) rho[idx] += f[idx*5 + a]; } } }
f934ba5cfa33ef5303af374afa6273fedf740936.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "brick-cuda.h" #include "head.h" #include "headcu.h" #define out(i, j) out_arr[j][i] #define in(i, j) in_arr[j][i] __global__ void arr_kernel(bElem *in_ptr, bElem *out_ptr, bElem *c) { auto in_arr = (bElem (*)[STRIDE]) in_ptr; auto out_arr = (bElem (*)[STRIDE]) out_ptr; #include "arrcusched.h" { #include "kernel.h" } } #undef out #undef in __global__ void brick_kernel(unsigned (*grid)[STRIDE/TILEI], Brick2D in, Brick2D out, bElem *c) { #include "bricusched.h" brick("kernel.py", BVEC, (TILEJ, TILEI), (BFOLD), b); } int main() { // allocations bElem *c = randomArray({49}); bElem *c_dev; copyToDevice({49}, c_dev, c); auto in_arr = randomArray({STRIDE, STRIDE}); bElem *in_dev; copyToDevice({STRIDE, STRIDE}, in_dev, in_arr); auto out_arr = zeroArray({STRIDE, STRIDE}); bElem *out_dev; copyToDevice({STRIDE, STRIDE}, out_dev, out_arr); { auto compute = [&]() -> void { dim3 block(N/TILEI, N/TILEJ), thread(_TILEI, _TILEJ); hipLaunchKernelGGL(( arr_kernel), dim3(block), dim3(thread) , 0, 0, in_dev, out_dev, c_dev); }; #ifndef TYPE #include "cutiming.h" #else compute(); #endif copyFromDevice({STRIDE, STRIDE}, out_arr, out_dev); } #if TYPE == 1 { unsigned *grid_ptr; unsigned bSize = TILEJ * TILEI; auto bInfo = init_grid<2>(grid_ptr, {STRIDE/TILEJ, STRIDE/TILEI}); unsigned *grid_dev; copyToDevice({STRIDE/TILEJ, STRIDE/TILEI}, grid_dev, grid_ptr); auto bStorage = BrickStorage::allocate(bInfo.nbricks, bSize * 2); Brick<Dim<TILEJ, TILEI>, Dim<BFOLD>> in_bri(&bInfo, &bStorage, 0); Brick<Dim<TILEJ, TILEI>, Dim<BFOLD>> out_bri(&bInfo, &bStorage, bSize); BrickInfo<2> *bInfo_dev; auto _bInfo_dev = movBrickInfo(bInfo, hipMemcpyHostToDevice); { unsigned size = sizeof(BrickInfo<2>); hipMalloc(&bInfo_dev, size); hipMemcpy(bInfo_dev, &_bInfo_dev, size, hipMemcpyHostToDevice); } copyBrick<2>({STRIDE, STRIDE}, in_arr, grid_ptr, in_bri); BrickStorage *bStorage_dev; BrickStorage _bStorage_dev = movBrickStorage(bStorage, hipMemcpyHostToDevice); { unsigned size = sizeof(BrickStorage); hipMalloc(&bStorage_dev, size); hipMemcpy(bStorage_dev, &_bStorage_dev, size, hipMemcpyHostToDevice); } auto compute = [&]() -> void { Brick2D bIn(bInfo_dev, &_bStorage_dev, 0); Brick2D bOut(bInfo_dev, &_bStorage_dev, bSize); bIn.bStorage = bStorage_dev; bOut.bStorage = bStorage_dev; auto grid = (unsigned (*)[STRIDE/TILEI]) grid_dev; dim3 block(N/TILEI, N/TILEJ), thread(32); hipLaunchKernelGGL(( brick_kernel), dim3(block), dim3(thread) , 0, 0, grid, bIn, bOut, c_dev); }; #include "cutiming.h" hipDeviceSynchronize(); hipMemcpy(bStorage.dat, _bStorage_dev.dat, bStorage.chunks * bStorage.step * sizeof(bElem), hipMemcpyDeviceToHost); if (!compareBrick<2>({STRIDE, STRIDE}, out_arr, grid_ptr, out_bri)) return 1; } #endif return 0; }
f934ba5cfa33ef5303af374afa6273fedf740936.cu
#include "brick-cuda.h" #include "head.h" #include "headcu.h" #define out(i, j) out_arr[j][i] #define in(i, j) in_arr[j][i] __global__ void arr_kernel(bElem *in_ptr, bElem *out_ptr, bElem *c) { auto in_arr = (bElem (*)[STRIDE]) in_ptr; auto out_arr = (bElem (*)[STRIDE]) out_ptr; #include "arrcusched.h" { #include "kernel.h" } } #undef out #undef in __global__ void brick_kernel(unsigned (*grid)[STRIDE/TILEI], Brick2D in, Brick2D out, bElem *c) { #include "bricusched.h" brick("kernel.py", BVEC, (TILEJ, TILEI), (BFOLD), b); } int main() { // allocations bElem *c = randomArray({49}); bElem *c_dev; copyToDevice({49}, c_dev, c); auto in_arr = randomArray({STRIDE, STRIDE}); bElem *in_dev; copyToDevice({STRIDE, STRIDE}, in_dev, in_arr); auto out_arr = zeroArray({STRIDE, STRIDE}); bElem *out_dev; copyToDevice({STRIDE, STRIDE}, out_dev, out_arr); { auto compute = [&]() -> void { dim3 block(N/TILEI, N/TILEJ), thread(_TILEI, _TILEJ); arr_kernel<<< block, thread >>>(in_dev, out_dev, c_dev); }; #ifndef TYPE #include "cutiming.h" #else compute(); #endif copyFromDevice({STRIDE, STRIDE}, out_arr, out_dev); } #if TYPE == 1 { unsigned *grid_ptr; unsigned bSize = TILEJ * TILEI; auto bInfo = init_grid<2>(grid_ptr, {STRIDE/TILEJ, STRIDE/TILEI}); unsigned *grid_dev; copyToDevice({STRIDE/TILEJ, STRIDE/TILEI}, grid_dev, grid_ptr); auto bStorage = BrickStorage::allocate(bInfo.nbricks, bSize * 2); Brick<Dim<TILEJ, TILEI>, Dim<BFOLD>> in_bri(&bInfo, &bStorage, 0); Brick<Dim<TILEJ, TILEI>, Dim<BFOLD>> out_bri(&bInfo, &bStorage, bSize); BrickInfo<2> *bInfo_dev; auto _bInfo_dev = movBrickInfo(bInfo, cudaMemcpyHostToDevice); { unsigned size = sizeof(BrickInfo<2>); cudaMalloc(&bInfo_dev, size); cudaMemcpy(bInfo_dev, &_bInfo_dev, size, cudaMemcpyHostToDevice); } copyBrick<2>({STRIDE, STRIDE}, in_arr, grid_ptr, in_bri); BrickStorage *bStorage_dev; BrickStorage _bStorage_dev = movBrickStorage(bStorage, cudaMemcpyHostToDevice); { unsigned size = sizeof(BrickStorage); cudaMalloc(&bStorage_dev, size); cudaMemcpy(bStorage_dev, &_bStorage_dev, size, cudaMemcpyHostToDevice); } auto compute = [&]() -> void { Brick2D bIn(bInfo_dev, &_bStorage_dev, 0); Brick2D bOut(bInfo_dev, &_bStorage_dev, bSize); bIn.bStorage = bStorage_dev; bOut.bStorage = bStorage_dev; auto grid = (unsigned (*)[STRIDE/TILEI]) grid_dev; dim3 block(N/TILEI, N/TILEJ), thread(32); brick_kernel<<< block, thread >>>(grid, bIn, bOut, c_dev); }; #include "cutiming.h" cudaDeviceSynchronize(); cudaMemcpy(bStorage.dat, _bStorage_dev.dat, bStorage.chunks * bStorage.step * sizeof(bElem), cudaMemcpyDeviceToHost); if (!compareBrick<2>({STRIDE, STRIDE}, out_arr, grid_ptr, out_bri)) return 1; } #endif return 0; }
d4bc86594a0804f86907742b6d2b245420c3db0b.hip
// !!! This is a file automatically generated by hipify!!! /************************************* * Matrix-Matrix product with CUBLAS * *************************************/ #include <stdio.h> #include <mkl_blas.h> #include <omp.h> #include "rocblas.h" /* Write here the name of the CUBLAS header file */ #define CUDA_SAFE_CALL( call ) { cuAssert((call), __FILE__, __LINE__); } inline void cuAssert( hipError_t err, const char *file, int line, bool abort=true) { if( hipSuccess != err ) { fprintf(stderr, "CUDA: error ocurred in %s %s %d\n", hipGetErrorString(err), file, line ); if( abort ) exit( err ); } } #define CUBLAS_SAFE_CALL( call ) { cublasAssert((call), __FILE__, __LINE__); } inline void cublasAssert( hipblasStatus_t err, const char *file, int line, bool abort=true) { if( HIPBLAS_STATUS_SUCCESS != err ) { fprintf(stderr, "CUBLAS: error ocurred in %s %s %d\n", err, file, line ); if( abort ) exit( err ); } } /* Matrices stored by columns: BLAS style */ #define A(i,j) A[ (i) + ((j)*(n)) ] #define B(i,j) B[ (i) + ((j)*(n)) ] #define C(i,j) C[ (i) + ((j)*(n)) ] #define h_C(i,j) h_C[ (i) + ((j)*(n)) ] #define h_C1(i,j) h_C1[ (i) + ((j)*(n)) ] #define h_C2(i,j) h_C2[ (i) + ((j)*(n)) ] #define het_C(i,j) het_C[ (i) + ((j)*(n)) ] #define d_A(i,j) d_A[ (j) + ((i)*(n)) ] int main( int argc, char *argv[] ) { int n, m, nm, m2, deviceCount, middle; float weigth; unsigned int i, j; if( argc < 3 ) { printf( "Usage: %s n weight\n", argv[0] ); exit( -EXIT_FAILURE ); } sscanf( argv[1],"%d",&n ); sscanf( argv[2],"%f",&weigth ); m = n * weigth; nm = n - m; // General matrices double *A = (double *) malloc( n * n * sizeof(double) ); double *B = (double *) malloc( n * n * sizeof(double) ); // Result matrices double *C = (double *) malloc( n * n * sizeof(double) ); // CPU execution double *h_C = (double *) malloc( n * n * sizeof(double) ); // GPU execution double *het_C = (double *) malloc( n * n * sizeof(double) ); // Heterogeneous execution // GPU matrices double *d_A, *d_B, *d_C; // Heterogenous matrices double *d_A1, *d_A2, *d_B1, *d_B2, *d_C1, *d_C2; double *h_C1 = (double *) malloc( n * m * sizeof(double) ); double *h_C2 = (double *) malloc( n * m * sizeof(double) ); printf( "%s: Generating two random matrices of size %dx%d...\n", argv[0], n, n ); for( i = 0; i < n; i++ ) { for( j = 0; j < n; j++ ) A( i, j ) = 2.0 * ( (double) rand() / RAND_MAX ) - 1.0; } for( i = 0; i < n; i++ ) { for( j = 0; j < n; j++ ) B( i, j ) = 2.0 * ( (double) rand() / RAND_MAX ) - 1.0; } /* STARTUP CUBLAS context */ hipblasHandle_t handle; CUBLAS_SAFE_CALL( hipblasCreate( &handle ) ); hipEvent_t start, stop; CUDA_SAFE_CALL( hipEventCreate( &start ) ); CUDA_SAFE_CALL( hipEventCreate( &stop ) ); CUDA_SAFE_CALL( hipGetDeviceCount( &deviceCount ) ); const char trans = 'N'; const double ONE = 1.0; const double ZERO = 0.0; // MKL execution (CPU) printf( "%s: C = A * B in CPU...\n", argv[0] ); CUDA_SAFE_CALL( hipEventRecord(start, NULL) ); dgemm( &trans, &trans, &n, &n, &n, &ONE, A, &n, B, &n, &ZERO, C, &n ); CUDA_SAFE_CALL( hipEventRecord(stop, NULL) ); CUDA_SAFE_CALL( hipEventSynchronize( stop ) ); float msecCPU = 0.0f; CUDA_SAFE_CALL( hipEventElapsedTime( &msecCPU, start, stop ) ); // CuBLAS execution (GPU) printf( "%s: C = A * B in GPU...\n", argv[0] ); CUDA_SAFE_CALL( hipMalloc( (void **) &d_A, n * n * sizeof(double) ) ); CUDA_SAFE_CALL( hipMalloc( (void **) &d_B, n * n * sizeof(double) ) ); CUDA_SAFE_CALL( hipMalloc( (void **) &d_C, n * n * sizeof(double) ) ); CUBLAS_SAFE_CALL( hipblasSetMatrix( n, n, sizeof(double), A, n, d_A, n ) ); CUBLAS_SAFE_CALL( hipblasSetMatrix( n, n, sizeof(double), B, n, d_B, n ) ); CUDA_SAFE_CALL( hipEventRecord(start, NULL) ); CUBLAS_SAFE_CALL( hipblasDgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, n, n, &ONE, d_A, n, d_B, n, &ZERO, d_C, n ) ); CUDA_SAFE_CALL( hipEventRecord( stop, NULL ) ); CUDA_SAFE_CALL( hipEventSynchronize( stop ) ); CUBLAS_SAFE_CALL( hipblasGetMatrix( n, n, sizeof(double), d_C, n, h_C, n ) ); float msecGPU = 0.0f; CUDA_SAFE_CALL( hipEventElapsedTime( &msecGPU, start, stop ) ); CUDA_SAFE_CALL( hipFree( d_B ) ); CUDA_SAFE_CALL( hipFree( d_C ) ); // Heterogeneous execution (CPU + GPU) printf( "%s: C = A * B in CPU + GPU...\n",argv[0] ); m2 = m/2; middle = ( nm + ( n - 1 ) ) / 2; CUDA_SAFE_CALL( hipEventRecord( start, NULL ) ); #pragma omp parallel sections { #pragma omp section { CUDA_SAFE_CALL( hipSetDevice( 0 ) ); CUDA_SAFE_CALL( hipMalloc( (void **) &d_A1, n * n * sizeof(double) ) ); CUDA_SAFE_CALL( hipMalloc( (void **) &d_B1, n * m2 * sizeof(double) ) ); CUDA_SAFE_CALL( hipMalloc( (void **) &d_C1, n * m2 * sizeof(double) ) ); CUBLAS_SAFE_CALL( hipblasSetMatrix( n, n, sizeof(double), A, n, d_A1, n ) ); CUBLAS_SAFE_CALL( hipblasSetMatrix( n, m2, sizeof(double), &B(0, nm), n, d_B1, n ) ); CUBLAS_SAFE_CALL( hipblasDgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m2, n, &ONE, d_A1, n, d_B1, n, &ZERO, d_C1, n ) ); } #pragma omp section { CUDA_SAFE_CALL( hipMalloc( (void **) &d_A2, n * n * sizeof(double) ) ); CUDA_SAFE_CALL( hipMalloc( (void **) &d_B2, n * m2 * sizeof(double) ) ); CUDA_SAFE_CALL( hipMalloc( (void **) &d_C2, n * m2 * sizeof(double) ) ); CUBLAS_SAFE_CALL( hipblasSetMatrix( n, n, sizeof(double), A, n, d_A2, n ) ); CUBLAS_SAFE_CALL( hipblasSetMatrix( n, m2, sizeof(double), &B(0, middle + 1), n, d_B2, n ) ); CUBLAS_SAFE_CALL( hipblasDgemm( handle, HIPBLAS_OP_N, HIPBLAS_OP_N, n, m2, n, &ONE, d_A2, n, d_B2, n, &ZERO, d_C2, n ) ); } #pragma omp section { dgemm( &trans, &trans, &n, &nm, &n, &ONE, A, &n, B, &n, &ZERO, het_C, &n ); } } CUDA_SAFE_CALL( hipEventRecord( stop, NULL ) ); CUDA_SAFE_CALL( hipEventSynchronize( stop ) ); CUDA_SAFE_CALL( hipSetDevice( 0 ) ); CUBLAS_SAFE_CALL( hipblasGetMatrix( n, m2, sizeof(double), d_C1, n, h_C1, n ) ); memcpy( &het_C(0, nm), h_C1, n * m2 * sizeof(double) ); CUDA_SAFE_CALL( hipSetDevice( 1 ) ); CUBLAS_SAFE_CALL( hipblasGetMatrix( n, m2, sizeof(double), d_C2, n, h_C2, n ) ); memcpy( &het_C(0, middle + 1), h_C2, n * m2 * sizeof(double) ); float msecCPUGPU = 0.0f; CUDA_SAFE_CALL( hipEventElapsedTime( &msecCPUGPU, start, stop ) ); int one = 1; int maxid = idamax( &n, C, &one ); double max = C[maxid]; double error = ZERO; for( j = 1; j < n; j++ ) { for( i = 1; i < n; i++ ) { double a = fabs( C( i, j ) - h_C( i, j ) ) / max; error = a > error ? a : error; } } printf( "Error CPU/GPU = %.3e\n",error ); one = 1; maxid = idamax( &n, C, &one ); max = C[maxid]; error = ZERO; for( j = 1; j < n; j++ ) { for( i = 1; i < n; i++ ) { double a = fabs( C( i, j ) - het_C( i, j ) ) / max; error = a > error ? a : error; } } printf( "Error CPU/CPU + GPU = %.3e\n",error ); double flops = 2.0 * (double) n * (double) n * (double) n; float gigaFlopsCPU = ( flops * 1.0e-9f ) / ( msecCPU / 1000.0f ); float gigaFlopsGPU = ( flops * 1.0e-9f ) / ( msecGPU / 1000.0f ); float gigaFlopsCPUGPU = ( flops * 1.0e-9f ) / ( msecCPUGPU / 1000.0f ); printf( "CPU time = %.2f msec.\n", msecCPU ); printf( "GPU time = %.2f msec.\n", msecGPU ); printf( "CPU + GPU time = %.2f msec.\n", msecCPUGPU ); printf( "GFlops CPU = %.2f \n", gigaFlopsCPU ); printf( "GFlops GPU = %.2f \n", gigaFlopsGPU ); printf( "GFlops CPU + GPU = %.2f \n", gigaFlopsCPUGPU ); // CPU matrices free( A ); free( B ); free( C ); //free( h_C ); free( het_C ); free( h_C2 ); //GPU matrices hipFree( d_A ); hipFree( d_A1 ); hipFree( d_A2 ); hipFree( d_B ); hipFree( d_C ); hipblasDestroy( handle ); }
d4bc86594a0804f86907742b6d2b245420c3db0b.cu
/************************************* * Matrix-Matrix product with CUBLAS * *************************************/ #include <stdio.h> #include <mkl_blas.h> #include <omp.h> #include "cublas_v2.h" /* Write here the name of the CUBLAS header file */ #define CUDA_SAFE_CALL( call ) { cuAssert((call), __FILE__, __LINE__); } inline void cuAssert( cudaError_t err, const char *file, int line, bool abort=true) { if( cudaSuccess != err ) { fprintf(stderr, "CUDA: error ocurred in %s %s %d\n", cudaGetErrorString(err), file, line ); if( abort ) exit( err ); } } #define CUBLAS_SAFE_CALL( call ) { cublasAssert((call), __FILE__, __LINE__); } inline void cublasAssert( cublasStatus_t err, const char *file, int line, bool abort=true) { if( CUBLAS_STATUS_SUCCESS != err ) { fprintf(stderr, "CUBLAS: error ocurred in %s %s %d\n", err, file, line ); if( abort ) exit( err ); } } /* Matrices stored by columns: BLAS style */ #define A(i,j) A[ (i) + ((j)*(n)) ] #define B(i,j) B[ (i) + ((j)*(n)) ] #define C(i,j) C[ (i) + ((j)*(n)) ] #define h_C(i,j) h_C[ (i) + ((j)*(n)) ] #define h_C1(i,j) h_C1[ (i) + ((j)*(n)) ] #define h_C2(i,j) h_C2[ (i) + ((j)*(n)) ] #define het_C(i,j) het_C[ (i) + ((j)*(n)) ] #define d_A(i,j) d_A[ (j) + ((i)*(n)) ] int main( int argc, char *argv[] ) { int n, m, nm, m2, deviceCount, middle; float weigth; unsigned int i, j; if( argc < 3 ) { printf( "Usage: %s n weight\n", argv[0] ); exit( -EXIT_FAILURE ); } sscanf( argv[1],"%d",&n ); sscanf( argv[2],"%f",&weigth ); m = n * weigth; nm = n - m; // General matrices double *A = (double *) malloc( n * n * sizeof(double) ); double *B = (double *) malloc( n * n * sizeof(double) ); // Result matrices double *C = (double *) malloc( n * n * sizeof(double) ); // CPU execution double *h_C = (double *) malloc( n * n * sizeof(double) ); // GPU execution double *het_C = (double *) malloc( n * n * sizeof(double) ); // Heterogeneous execution // GPU matrices double *d_A, *d_B, *d_C; // Heterogenous matrices double *d_A1, *d_A2, *d_B1, *d_B2, *d_C1, *d_C2; double *h_C1 = (double *) malloc( n * m * sizeof(double) ); double *h_C2 = (double *) malloc( n * m * sizeof(double) ); printf( "%s: Generating two random matrices of size %dx%d...\n", argv[0], n, n ); for( i = 0; i < n; i++ ) { for( j = 0; j < n; j++ ) A( i, j ) = 2.0 * ( (double) rand() / RAND_MAX ) - 1.0; } for( i = 0; i < n; i++ ) { for( j = 0; j < n; j++ ) B( i, j ) = 2.0 * ( (double) rand() / RAND_MAX ) - 1.0; } /* STARTUP CUBLAS context */ cublasHandle_t handle; CUBLAS_SAFE_CALL( cublasCreate( &handle ) ); cudaEvent_t start, stop; CUDA_SAFE_CALL( cudaEventCreate( &start ) ); CUDA_SAFE_CALL( cudaEventCreate( &stop ) ); CUDA_SAFE_CALL( cudaGetDeviceCount( &deviceCount ) ); const char trans = 'N'; const double ONE = 1.0; const double ZERO = 0.0; // MKL execution (CPU) printf( "%s: C = A * B in CPU...\n", argv[0] ); CUDA_SAFE_CALL( cudaEventRecord(start, NULL) ); dgemm( &trans, &trans, &n, &n, &n, &ONE, A, &n, B, &n, &ZERO, C, &n ); CUDA_SAFE_CALL( cudaEventRecord(stop, NULL) ); CUDA_SAFE_CALL( cudaEventSynchronize( stop ) ); float msecCPU = 0.0f; CUDA_SAFE_CALL( cudaEventElapsedTime( &msecCPU, start, stop ) ); // CuBLAS execution (GPU) printf( "%s: C = A * B in GPU...\n", argv[0] ); CUDA_SAFE_CALL( cudaMalloc( (void **) &d_A, n * n * sizeof(double) ) ); CUDA_SAFE_CALL( cudaMalloc( (void **) &d_B, n * n * sizeof(double) ) ); CUDA_SAFE_CALL( cudaMalloc( (void **) &d_C, n * n * sizeof(double) ) ); CUBLAS_SAFE_CALL( cublasSetMatrix( n, n, sizeof(double), A, n, d_A, n ) ); CUBLAS_SAFE_CALL( cublasSetMatrix( n, n, sizeof(double), B, n, d_B, n ) ); CUDA_SAFE_CALL( cudaEventRecord(start, NULL) ); CUBLAS_SAFE_CALL( cublasDgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N, n, n, n, &ONE, d_A, n, d_B, n, &ZERO, d_C, n ) ); CUDA_SAFE_CALL( cudaEventRecord( stop, NULL ) ); CUDA_SAFE_CALL( cudaEventSynchronize( stop ) ); CUBLAS_SAFE_CALL( cublasGetMatrix( n, n, sizeof(double), d_C, n, h_C, n ) ); float msecGPU = 0.0f; CUDA_SAFE_CALL( cudaEventElapsedTime( &msecGPU, start, stop ) ); CUDA_SAFE_CALL( cudaFree( d_B ) ); CUDA_SAFE_CALL( cudaFree( d_C ) ); // Heterogeneous execution (CPU + GPU) printf( "%s: C = A * B in CPU + GPU...\n",argv[0] ); m2 = m/2; middle = ( nm + ( n - 1 ) ) / 2; CUDA_SAFE_CALL( cudaEventRecord( start, NULL ) ); #pragma omp parallel sections { #pragma omp section { CUDA_SAFE_CALL( cudaSetDevice( 0 ) ); CUDA_SAFE_CALL( cudaMalloc( (void **) &d_A1, n * n * sizeof(double) ) ); CUDA_SAFE_CALL( cudaMalloc( (void **) &d_B1, n * m2 * sizeof(double) ) ); CUDA_SAFE_CALL( cudaMalloc( (void **) &d_C1, n * m2 * sizeof(double) ) ); CUBLAS_SAFE_CALL( cublasSetMatrix( n, n, sizeof(double), A, n, d_A1, n ) ); CUBLAS_SAFE_CALL( cublasSetMatrix( n, m2, sizeof(double), &B(0, nm), n, d_B1, n ) ); CUBLAS_SAFE_CALL( cublasDgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m2, n, &ONE, d_A1, n, d_B1, n, &ZERO, d_C1, n ) ); } #pragma omp section { CUDA_SAFE_CALL( cudaMalloc( (void **) &d_A2, n * n * sizeof(double) ) ); CUDA_SAFE_CALL( cudaMalloc( (void **) &d_B2, n * m2 * sizeof(double) ) ); CUDA_SAFE_CALL( cudaMalloc( (void **) &d_C2, n * m2 * sizeof(double) ) ); CUBLAS_SAFE_CALL( cublasSetMatrix( n, n, sizeof(double), A, n, d_A2, n ) ); CUBLAS_SAFE_CALL( cublasSetMatrix( n, m2, sizeof(double), &B(0, middle + 1), n, d_B2, n ) ); CUBLAS_SAFE_CALL( cublasDgemm( handle, CUBLAS_OP_N, CUBLAS_OP_N, n, m2, n, &ONE, d_A2, n, d_B2, n, &ZERO, d_C2, n ) ); } #pragma omp section { dgemm( &trans, &trans, &n, &nm, &n, &ONE, A, &n, B, &n, &ZERO, het_C, &n ); } } CUDA_SAFE_CALL( cudaEventRecord( stop, NULL ) ); CUDA_SAFE_CALL( cudaEventSynchronize( stop ) ); CUDA_SAFE_CALL( cudaSetDevice( 0 ) ); CUBLAS_SAFE_CALL( cublasGetMatrix( n, m2, sizeof(double), d_C1, n, h_C1, n ) ); memcpy( &het_C(0, nm), h_C1, n * m2 * sizeof(double) ); CUDA_SAFE_CALL( cudaSetDevice( 1 ) ); CUBLAS_SAFE_CALL( cublasGetMatrix( n, m2, sizeof(double), d_C2, n, h_C2, n ) ); memcpy( &het_C(0, middle + 1), h_C2, n * m2 * sizeof(double) ); float msecCPUGPU = 0.0f; CUDA_SAFE_CALL( cudaEventElapsedTime( &msecCPUGPU, start, stop ) ); int one = 1; int maxid = idamax( &n, C, &one ); double max = C[maxid]; double error = ZERO; for( j = 1; j < n; j++ ) { for( i = 1; i < n; i++ ) { double a = fabs( C( i, j ) - h_C( i, j ) ) / max; error = a > error ? a : error; } } printf( "Error CPU/GPU = %.3e\n",error ); one = 1; maxid = idamax( &n, C, &one ); max = C[maxid]; error = ZERO; for( j = 1; j < n; j++ ) { for( i = 1; i < n; i++ ) { double a = fabs( C( i, j ) - het_C( i, j ) ) / max; error = a > error ? a : error; } } printf( "Error CPU/CPU + GPU = %.3e\n",error ); double flops = 2.0 * (double) n * (double) n * (double) n; float gigaFlopsCPU = ( flops * 1.0e-9f ) / ( msecCPU / 1000.0f ); float gigaFlopsGPU = ( flops * 1.0e-9f ) / ( msecGPU / 1000.0f ); float gigaFlopsCPUGPU = ( flops * 1.0e-9f ) / ( msecCPUGPU / 1000.0f ); printf( "CPU time = %.2f msec.\n", msecCPU ); printf( "GPU time = %.2f msec.\n", msecGPU ); printf( "CPU + GPU time = %.2f msec.\n", msecCPUGPU ); printf( "GFlops CPU = %.2f \n", gigaFlopsCPU ); printf( "GFlops GPU = %.2f \n", gigaFlopsGPU ); printf( "GFlops CPU + GPU = %.2f \n", gigaFlopsCPUGPU ); // CPU matrices free( A ); free( B ); free( C ); //free( h_C ); free( het_C ); free( h_C2 ); //GPU matrices cudaFree( d_A ); cudaFree( d_A1 ); cudaFree( d_A2 ); cudaFree( d_B ); cudaFree( d_C ); cublasDestroy( handle ); }
da4bf3f7b794b9d386d027cc2a4d5c2b9f425eb2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef ATOMIC_GPUNUFFT_KERNELS_H #define ATOMIC_GPUNUFFT_KERNELS_H #include "gpuNUFFT_kernels.hpp" #include "../std_gpuNUFFT_kernels.cu" #include "cuda_utils.cuh" // convolve every data point on grid position -> controlled by threadIdx.x .y and .z // shared data holds grid values as software managed cache __global__ void convolutionKernel3( DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N, int CACHE_SIZE ) { extern __shared__ DType shared_data[];//externally managed shared memory DType2* data_cache =(DType2*)&shared_data[0]; DType3* coord_cache =(DType3*) &shared_data[2*CACHE_SIZE]; __shared__ int sec; sec = blockIdx.x; //start convolution while (sec < N) { int ind, k, i, j; int imin, imax,jmin,jmax,kmin,kmax; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec * 3]; center.y = sector_centers[sec * 3 + 1]; center.z = sector_centers[sec * 3 + 2]; //Grid Points over threads, start position of data points of this sector __shared__ int data_off; data_off = sectors[sec]; int data_max = sectors[sec+1]; //init shared memory data cache int c_ind = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y* threadIdx.z; //load data points into shared mem while (c_ind < CACHE_SIZE && (data_off + c_ind) < data_max) { data_cache[c_ind] = data[data_off + c_ind]; coord_cache[c_ind].x = crds[c_ind + data_off]; coord_cache[c_ind].y = crds[c_ind + data_off+GI.data_count]; coord_cache[c_ind].z = crds[c_ind + data_off+2*GI.data_count]; c_ind += blockDim.x * blockDim.y*blockDim.z; } __syncthreads(); __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); c_ind = 0; __shared__ int reload_count; reload_count = 0; while (data_off+c_ind < data_max) { if (c_ind >= (reload_count+1)*CACHE_SIZE) { __syncthreads(); /* int reload_ind = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y* threadIdx.z; //load next data points into shared mem while (reload_ind < CACHE_SIZE && (data_off + c_ind + reload_ind) < data_max) { data_cache[reload_ind] = data[data_off + c_ind + reload_ind]; coord_cache[reload_ind].x = crds[c_ind + data_off + reload_ind]; coord_cache[reload_ind].y = crds[c_ind + data_off+ reload_ind + GI.data_count]; coord_cache[reload_ind].z = crds[c_ind + data_off+ reload_ind + 2*GI.data_count]; reload_ind += blockDim.x * blockDim.y * blockDim.z; }*/ reload_count++; } __syncthreads(); // DType3 data_point; //datapoint shared in every thread DType3 data_point = coord_cache[c_ind - reload_count*CACHE_SIZE]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius); // grid this point onto the neighboring cartesian points for (k=threadIdx.z;k<=kmax; k += blockDim.z) { j=threadIdx.y; i=threadIdx.x; if ((k<=kmax && k>=kmin) && (j<=jmax && j>=jmin) && (i<=imax && i>=imin)) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { //get kernel value //Calculate Separable Filters val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; if (isOutlier(i,j,k,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXYZ2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(k,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else ind = sector_ind_offset + computeXYZ2Lin(i,j,k,GI.gridDims);//index in output grid atomicAdd(&(gdata[ind].x),val * data_cache[c_ind-reload_count*CACHE_SIZE].x);//Re atomicAdd(&(gdata[ind].y),val * data_cache[c_ind-reload_count*CACHE_SIZE].y);//Im } // kernel bounds check x, spherical support } // kernel bounds check y, spherical support } //kernel bounds check z } //x,y,z bounds check }//for loop over z entries c_ind++; } //grid points per sector __syncthreads(); sec = sec + gridDim.x; }//sec < sector_count } __device__ void convolutionFunction4(int sec, int sec_max, int sec_offset, DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers) { int ind, k, i, j; __shared__ int max_dim, imin, imax,jmin,jmax,kmin,kmax; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec * 3]; center.y = sector_centers[sec * 3 + 1]; center.z = sector_centers[sec * 3 + 2]; __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); __syncthreads(); //Grid Points over threads int data_cnt; data_cnt = sectors[sec]+sec_offset; max_dim = GI.sector_pad_max; int s_ind = getIndex(threadIdx.x,threadIdx.y,threadIdx.z,GI.sector_pad_width); while (data_cnt < sec_max) { __syncthreads(); __shared__ DType3 data_point; //datapoint shared in every thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; data_point.z = crds[data_cnt +2*GI.data_count]; __shared__ DType2 s_data; s_data.x = data[data_cnt].x; s_data.y = data[data_cnt].y; __syncthreads(); // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, max_dim, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, max_dim, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, max_dim, GI.kernel_radius); // grid this point onto the neighboring cartesian points for (k=threadIdx.z;k<=kmax; k += blockDim.z) { if (k<=kmax && k>=kmin) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); // scale distance in z direction with x,y dimension dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { j=threadIdx.y; if (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=threadIdx.x; if (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { //get kernel value //Calculate Separable Filters val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; //each thread writes one position from shared mem to global mem if (isOutlier(i,j,k,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXYZ2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(k,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else ind = sector_ind_offset + computeXYZ2Lin(i,j,k,GI.gridDims);//index in output grid atomicAdd(&(gdata[ind].x),val * s_data.x);//Re atomicAdd(&(gdata[ind].y),val * s_data.y);//Im } // kernel bounds check x, spherical support } // x }// kernel bounds check y, spherical support } // y } //kernel bounds check z } // z }//k, for loop over z entries __syncthreads(); data_cnt++; } //grid points per sector } __global__ void convolutionKernel4(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N ) { int sec; sec = blockIdx.x; //start convolution while (sec < N) { convolutionFunction4(sec,sectors[sec+1],0,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec = sec + gridDim.x; }//sec < sector_count } __global__ void balancedConvolutionKernel4(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType2* sector_processing_order, IndType* sector_centers, int N ) { int sec; int sec_cnt = blockIdx.x; //start convolution while (sec_cnt < N) { sec = sector_processing_order[sec_cnt].x; convolutionFunction4(sec,min(sectors[sec+1],sectors[sec]+sector_processing_order[sec_cnt].y+MAXIMUM_PAYLOAD),sector_processing_order[sec_cnt].y,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec_cnt = sec_cnt + gridDim.x; }//sec < sector_count } // ---------------------------------------------------------------------------- // convolutionKernel: NUFFT^H kernel // // Performs the gpuNUFFT step by convolution of sample points with // interpolation function and resampling onto grid. Basic concept based on Zwart // et al. // // parameters: // * data : complex input sample points // * crds : coordinates of data points (x,y,z) // * gdata : output grid data // * sectors : mapping of sample indices according to each sector // * sector_centers : coordinates (x,y,z) of sector centers // * temp_gdata : temporary grid data // * N : number of threads __device__ void convolutionFunction2(int* sec, int sec_max, int sec_offset, DType2* sdata, DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers) { //init shared memory for (int s_ind=threadIdx.x;s_ind<GI.sector_dim; s_ind+= blockDim.x) { sdata[s_ind].x = 0.0f;//Re sdata[s_ind].y = 0.0f;//Im } __syncthreads(); //start convolution int ind, k, i, j, x, y, z; int imin, imax,jmin,jmax,kmin,kmax; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec[threadIdx.x] * 3]; center.y = sector_centers[sec[threadIdx.x] * 3 + 1]; center.z = sector_centers[sec[threadIdx.x] * 3 + 2]; //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset; //loop over all data points of the current sector, and check if grid position lies inside //affected region, if so, add data point weighted to grid position value while (data_cnt < sec_max) { DType3 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; data_point.z = crds[data_cnt +2*GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius); // grid this point onto its cartesian points neighbors k =kmin; while (k<=kmax && k>=kmin) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i= imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { //get kernel value //Calculate Separable Filters val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; ind = getIndex(i,j,k,GI.sector_pad_width); // multiply data by current kernel val // grid complex or scalar atomicAdd(&(sdata[ind].x),val * data[data_cnt].x); atomicAdd(&(sdata[ind].y),val * data[data_cnt].y); } // kernel bounds check x, spherical support i++; } // x } // kernel bounds check y, spherical support j++; } // y } //kernel bounds check z k++; } // z data_cnt = data_cnt + blockDim.x; } //grid points per sector //write shared data to output grid __syncthreads(); __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); //each thread writes one position from shared mem to global mem for (int s_ind=threadIdx.x;s_ind<GI.sector_dim; s_ind += blockDim.x) { getCoordsFromIndex(s_ind,&x,&y,&z,GI.sector_pad_width); if (isOutlier(x,y,z,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXYZ2Lin(calculateOppositeIndex(x,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(y,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(z,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else ind = sector_ind_offset + computeXYZ2Lin(x,y,z,GI.gridDims);//index in output grid atomicAdd(&(gdata[ind].x),sdata[s_ind].x);//Re atomicAdd(&(gdata[ind].y),sdata[s_ind].y);//Im } } __global__ void convolutionKernel2(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N ) { extern __shared__ DType2 sdata[];//externally managed shared memory __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x] = blockIdx.x; while (sec[threadIdx.x] < N) { __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; convolutionFunction2(sec,data_max,0,sdata,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec[threadIdx.x] = sec[threadIdx.x]+ gridDim.x; }//sec < sector_count } __global__ void balancedConvolutionKernel2(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType2* sector_processing_order, IndType* sector_centers, int N ) { extern __shared__ DType2 sdata[];//externally managed shared memory int sec_cnt = blockIdx.x; __shared__ int sec[THREAD_BLOCK_SIZE]; while (sec_cnt < N) { sec[threadIdx.x] = sector_processing_order[sec_cnt].x; __shared__ int data_max; data_max = min(sectors[sec[threadIdx.x]+1],sectors[sec[threadIdx.x]] + sector_processing_order[sec_cnt].y+MAXIMUM_PAYLOAD); convolutionFunction2(sec,data_max,sector_processing_order[sec_cnt].y,sdata,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec_cnt = sec_cnt + gridDim.x; }//sec < sector_count } // ---------------------------------------------------------------------------- // convolutionKernel: NUFFT^H kernel // // Performs the gpuNUFFT step by convolution of sample points with // interpolation function and resampling onto grid. Basic concept based on Zwart // et al. // // parameters: // * data : complex input sample points // * crds : coordinates of data points (x,y,z) // * gdata : output grid data // * sectors : mapping of sample indices according to each sector // * sector_centers : coordinates (x,y,z) of sector centers // * temp_gdata : temporary grid data // * N : number of threads __device__ void convolutionFunction2D(DType2* sdata,int* sec, int sec_max, int sec_offset, DType2* data, DType* crds, CufftType* gdata,IndType* sectors, IndType* sector_centers) { //init shared memory for (int s_ind=threadIdx.x;s_ind<GI.sector_dim; s_ind+= blockDim.x) { for (int c = threadIdx.z; c < GI.n_coils_cc; c+= blockDim.z) { sdata[s_ind + c*GI.sector_dim].x = 0.0f;//Re sdata[s_ind + c*GI.sector_dim].y = 0.0f;//Im } } __syncthreads(); //start convolution int ind, i, j, x, y; int imin, imax,jmin,jmax; DType dx_sqr, dy_sqr, val, ix, jy; __shared__ IndType2 center; center.x = sector_centers[sec[threadIdx.x] * 2]; center.y = sector_centers[sec[threadIdx.x] * 2 + 1]; //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset; //loop over all data points of the current sector, and check if grid position lies inside //affected region, if so, add data point weighted to grid position value while (data_cnt < sec_max) { DType2 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); // grid this point onto its cartesian points neighbors j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i= imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { //get kernel value //Calculate Separable Filters val = KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; ind = getIndex2D(i,j,GI.sector_pad_width); // multiply data by current kernel val // grid complex or scalar for (int c = threadIdx.z; c < GI.n_coils_cc; c+= blockDim.z) { atomicAdd(&(sdata[ind + c * GI.sector_dim].x),val * data[data_cnt + c * GI.data_count].x); atomicAdd(&(sdata[ind + c * GI.sector_dim].y),val * data[data_cnt + c * GI.data_count].y); } } // kernel bounds check x, spherical support i++; } // x } // kernel bounds check y, spherical support j++; } // y data_cnt = data_cnt + blockDim.x; } //grid points per sector //write shared data to output grid __syncthreads(); //int sector_ind_offset = sec * GI.sector_dim; __shared__ int sector_ind_offset; sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,GI.gridDims); //each thread writes one position from shared mem to global mem for (int s_ind=threadIdx.x;s_ind<GI.sector_dim; s_ind += blockDim.x) { getCoordsFromIndex2D(s_ind,&x,&y,GI.sector_pad_width); if (isOutlier2D(x,y,center.x,center.y,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXY2Lin(calculateOppositeIndex(x,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(y,center.y,GI.gridDims.y,GI.sector_offset), GI.gridDims); else ind = sector_ind_offset + computeXY2Lin(x,y,GI.gridDims);//index in output grid for (int c = threadIdx.z; c < GI.n_coils_cc; c+= blockDim.z) { atomicAdd(&(gdata[ind + c * GI.gridDims_count].x),sdata[s_ind + c * GI.sector_dim].x);//Re atomicAdd(&(gdata[ind + c * GI.gridDims_count].y),sdata[s_ind + c * GI.sector_dim].y);//Im sdata[s_ind + c * GI.sector_dim].x = 0.0f;//Re sdata[s_ind + c * GI.sector_dim].y = 0.0f;//Im } } __syncthreads(); } __global__ void convolutionKernel2D(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N ) { extern __shared__ DType2 sdata[];//externally managed shared memory __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x] = blockIdx.x; while (sec[threadIdx.x] < N) { __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; convolutionFunction2D(sdata,sec,data_max,0,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec[threadIdx.x] = sec[threadIdx.x]+ gridDim.x; }//sec < sector_count } __global__ void balancedConvolutionKernel2D(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType2* sector_processing_order, IndType* sector_centers, int N ) { extern __shared__ DType2 sdata[];//externally managed shared memory int sec_cnt = blockIdx.x; __shared__ int sec[THREAD_BLOCK_SIZE]; while (sec_cnt < N) { sec[threadIdx.x] = sector_processing_order[sec_cnt].x; __shared__ int data_max; data_max = min(sectors[sec[threadIdx.x]+1],sectors[sec[threadIdx.x]] + sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD); convolutionFunction2D(sdata,sec,data_max,sector_processing_order[sec_cnt].y,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec_cnt = sec_cnt+ gridDim.x; }//sec < sector_count } // ---------------------------------------------------------------------------- // convolutionKernel: NUFFT^H kernel // // Performs the gpuNUFFT step by convolution of sample points with // interpolation function and resampling onto grid. Basic concept based on Zwart // et al. // // parameters: // * data : complex input sample points // * crds : coordinates of data points (x,y,z) // * gdata : output grid data // * sectors : mapping of sample indices according to each sector // * sector_centers : coordinates (x,y,z) of sector centers // * temp_gdata : temporary grid data // * N : number of threads __global__ void convolutionKernel( DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N ) { int sec= blockIdx.x; //start convolution while (sec < N) { int ind, imin, imax, jmin, jmax,kmin,kmax, k, i, j; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec * 3]; center.y = sector_centers[sec * 3 + 1]; center.z = sector_centers[sec * 3 + 2]; //Grid Points over Threads int data_cnt = sectors[sec] + threadIdx.x; int data_max = sectors[sec+1]; __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); while (data_cnt < data_max) { DType3 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; data_point.z = crds[data_cnt +2*GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius); // convolve neighboring cartesian points to this data point k = kmin; while (k<=kmax && k>=kmin) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { // get kernel value //Berechnung mit Separable Filters val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; if (isOutlier(i,j,k,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXYZ2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(k,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else ind = sector_ind_offset + computeXYZ2Lin(i,j,k,GI.gridDims);//index in output grid atomicAdd(&(gdata[ind].x),val * data[data_cnt].x);//Re atomicAdd(&(gdata[ind].y),val * data[data_cnt].y);//Im }// kernel bounds check x, spherical support i++; } // x loop } // kernel bounds check y, spherical support j++; } // y loop } //kernel bounds check z k++; } // z loop data_cnt = data_cnt + blockDim.x; } //data points per sector __syncthreads(); sec = sec + gridDim.x; } //sector check } void performConvolution( DType2* data_d, DType* crds_d, CufftType* gdata_d, DType* kernel_d, IndType* sectors_d, IndType* sector_centers_d, gpuNUFFT::GpuNUFFTInfo* gi_host ) { #define CONVKERNEL2 #ifdef CONVKERNEL long shared_mem_size = (gi_host->sector_dim)*sizeof(DType2); dim3 block_dim(THREAD_BLOCK_SIZE); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,THREAD_BLOCK_SIZE)); if (gi_host->is2Dprocessing) hipLaunchKernelGGL(( convolutionKernel2D), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); else hipLaunchKernelGGL(( convolutionKernel), dim3(grid_dim),dim3(block_dim), 0, 0, data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); #else #ifdef CONVKERNEL2 long shared_mem_size = (gi_host->sector_dim) * sizeof(DType2) * gi_host->n_coils_cc; int thread_size = THREAD_BLOCK_SIZE; dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,1)); if (DEBUG) { printf("adjoint convolution requires %ld bytes of shared memory!\n",shared_mem_size); printf("grid dim %u, block dim %u \n",grid_dim.x, block_dim.x); } if (gi_host->is2Dprocessing) { dim3 block_dim(64, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 4 : gi_host->n_coils_cc)); hipLaunchKernelGGL(( convolutionKernel2D), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } else { hipLaunchKernelGGL(( convolutionKernel2), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } #else #ifdef CONVKERNEL4 // TODO tune param z dim // defines size of total shared mem used dim3 block_dim(gi_host->sector_pad_width,gi_host->sector_pad_width,3); long shared_mem_size = block_dim.x*block_dim.y*block_dim.z*sizeof(DType2); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,1)); if (DEBUG) { printf("adjoint convolution requires %ld bytes of shared memory!\n",shared_mem_size); printf("grid dim (%u,%u,%u), block dim (%u,%u,%u) \n",grid_dim.x,grid_dim.y,grid_dim.z, block_dim.x,block_dim.y,block_dim.z); } if (gi_host->is2Dprocessing) { shared_mem_size = (gi_host->sector_dim)*sizeof(DType2); int thread_size =THREAD_BLOCK_SIZE; dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,1)); hipLaunchKernelGGL(( convolutionKernel2D), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } else hipLaunchKernelGGL(( convolutionKernel4), dim3(grid_dim),dim3(block_dim), 0, 0, data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); #else long cache_size = 176; long shared_mem_size = (2*cache_size + 3*cache_size)*sizeof(DType); dim3 block_dim(gi_host->sector_pad_width,gi_host->sector_pad_width,4); dim3 grid_dim(gi_host->sector_count); hipLaunchKernelGGL(( convolutionKernel3), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count,cache_size); #endif #endif #endif if (DEBUG) printf("...finished with: %s\n", hipGetErrorString(hipGetLastError())); } void performConvolution( DType2* data_d, DType* crds_d, CufftType* gdata_d, DType* kernel_d, IndType* sectors_d, IndType2* sector_processing_order_d, IndType* sector_centers_d, gpuNUFFT::GpuNUFFTInfo* gi_host ) { long shared_mem_size = (gi_host->sector_dim)*sizeof(DType2) * gi_host->n_coils_cc; int thread_size =THREAD_BLOCK_SIZE; dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,1)); if (DEBUG) { printf("adjoint convolution requires %ld bytes of shared memory!\n",shared_mem_size); printf("grid dim %u, block dim %u \n",grid_dim.x, block_dim.x); } if (gi_host->is2Dprocessing) { dim3 block_dim(64, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 4 : gi_host->n_coils_cc)); hipLaunchKernelGGL(( balancedConvolutionKernel2D), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_processing_order_d,sector_centers_d,gi_host->sectorsToProcess); } else { hipLaunchKernelGGL(( balancedConvolutionKernel2), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_processing_order_d,sector_centers_d,gi_host->sectorsToProcess); //dim3 block_dim(gi_host->sector_pad_width,gi_host->sector_pad_width,3); //dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,block_dim.x*block_dim.y*block_dim.z)); //balancedConvolutionKernel4<<<grid_dim,block_dim>>>(data_d,crds_d,gdata_d,sectors_d,sector_processing_order_d,sector_centers_d,gi_host->sectorsToProcess); } if (DEBUG) printf("...finished with: %s\n", hipGetErrorString(hipGetLastError())); } // ---------------------------------------------------------------------------- // forwardConvolutionKernel: NUFFT kernel // // Performs the inverse gpuNUFFT step by convolution of grid points with // interpolation function and resampling onto trajectory. // // parameters: // * data : complex output sample points // * crds : coordinates of data points (x,y,z) // * gdata : input grid data // * sectors : mapping of sample indices according to each sector // * sector_centers : coordinates (x,y,z) of sector centers // * N : number of threads __global__ void forwardConvolutionKernel( CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N) { extern __shared__ CufftType shared_out_data[];//externally managed shared memory __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x]= blockIdx.x; //init shared memory shared_out_data[threadIdx.x].x = 0.0f;//Re shared_out_data[threadIdx.x].y = 0.0f;//Im __syncthreads(); //start convolution while (sec[threadIdx.x] < N) { int ind, imin, imax, jmin, jmax,kmin,kmax, k, i, j; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec[threadIdx.x] * 3]; center.y = sector_centers[sec[threadIdx.x] * 3 + 1]; center.z = sector_centers[sec[threadIdx.x] * 3 + 2]; //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x; __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); while (data_cnt < data_max) { DType3 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; data_point.z = crds[data_cnt +2*GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius); // convolve neighboring cartesian points to this data point k = kmin; while (k<=kmax && k>=kmin) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { // get kernel value // calc as separable filter val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; // multiply data by current kernel val // grid complex or scalar if (isOutlier(i,j,k,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXYZ2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(k,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else ind = (sector_ind_offset + computeXYZ2Lin(i,j,k,GI.gridDims)); shared_out_data[threadIdx.x].x += gdata[ind].x * val; shared_out_data[threadIdx.x].y += gdata[ind].y * val; }// kernel bounds check x, spherical support i++; } // x loop } // kernel bounds check y, spherical support j++; } // y loop } //kernel bounds check z k++; } // z loop data[data_cnt].x = shared_out_data[threadIdx.x].x; data[data_cnt].y = shared_out_data[threadIdx.x].y; data_cnt = data_cnt + blockDim.x; shared_out_data[threadIdx.x].x = (DType)0.0;//Re shared_out_data[threadIdx.x].y = (DType)0.0;//Im } //data points per sector __syncthreads(); sec[threadIdx.x]= sec[threadIdx.x] + gridDim.x; } //sector check } __device__ void forwardConvolutionFunction2(int* sec, int sec_max, int sec_offset, DType2* sdata, CufftType* gdata_cache, DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers) { int ind, imin, imax, jmin, jmax,kmin,kmax, k, i, j; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec[threadIdx.x] * 3]; center.y = sector_centers[sec[threadIdx.x] * 3 + 1]; center.z = sector_centers[sec[threadIdx.x] * 3 + 2]; __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); // init sector cache // preload sector grid data into cache for (int ind=threadIdx.x; ind<GI.sector_dim; ind+=blockDim.x) { int grid_index; getCoordsFromIndex(ind,&i,&j,&k,GI.sector_pad_width); if (isOutlier(i,j,k,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index grid_index = computeXYZ2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(k,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else grid_index = (sector_ind_offset + computeXYZ2Lin(i,j,k,GI.gridDims)); gdata_cache[ind].x = gdata[grid_index].x; gdata_cache[ind].y = gdata[grid_index].y; } __syncthreads(); //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset; while (data_cnt < sec_max) { DType3 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt + GI.data_count]; data_point.z = crds[data_cnt + 2*GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius); // convolve neighboring cartesian points to this data point k = kmin; while (k<=kmax && k>=kmin) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { // get kernel value // calc as separable filter val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; ind = getIndex(i,j,k,GI.sector_pad_width); sdata[threadIdx.x].x += gdata_cache[ind].x * val; sdata[threadIdx.x].y += gdata_cache[ind].y * val; }// kernel bounds check x, spherical support i++; } // x loop } // kernel bounds check y, spherical support j++; } // y loop } //kernel bounds check z k++; } // z loop atomicAdd(&(data[data_cnt].x),sdata[threadIdx.x].x); atomicAdd(&(data[data_cnt].y),sdata[threadIdx.x].y); data_cnt = data_cnt + blockDim.x; sdata[threadIdx.x].x = (DType)0.0;//Re sdata[threadIdx.x].y = (DType)0.0;//Im } //data points per sector } // cached version of above kernel __global__ void forwardConvolutionKernel2(CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N) { extern __shared__ CufftType shared[];//externally managed shared memory CufftType* shared_out_data =(CufftType*) &shared[0]; CufftType* gdata_cache =(CufftType*) &shared[blockDim.x]; __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x]= blockIdx.x; //init shared memory shared_out_data[threadIdx.x].x = 0.0f;//Re shared_out_data[threadIdx.x].y = 0.0f;//Im __syncthreads(); //start convolution while (sec[threadIdx.x] < N) { __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; forwardConvolutionFunction2(sec,data_max,0,shared_out_data,gdata_cache,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec[threadIdx.x]= sec[threadIdx.x] + gridDim.x; } //sector check } __global__ void balancedForwardConvolutionKernel2(CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType2* sector_processing_order, IndType* sector_centers, int N) { extern __shared__ CufftType shared[];//externally managed shared memory CufftType* shared_out_data =(CufftType*) &shared[0]; CufftType* gdata_cache =(CufftType*) &shared[blockDim.x]; int sec_cnt = blockIdx.x; __shared__ int sec[THREAD_BLOCK_SIZE]; //init shared memory shared_out_data[threadIdx.x].x = 0.0f;//Re shared_out_data[threadIdx.x].y = 0.0f;//Im __syncthreads(); //start convolution while (sec_cnt < N) { sec[threadIdx.x] = sector_processing_order[sec_cnt].x; __shared__ int data_max; data_max = min(sectors[sec[threadIdx.x]+1],sectors[sec[threadIdx.x]] + sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD); forwardConvolutionFunction2(sec,data_max,sector_processing_order[sec_cnt].y,shared_out_data,gdata_cache,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec_cnt = sec_cnt + gridDim.x; } //sector check } __global__ void forwardConvolutionKernel2D( CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N) { extern __shared__ CufftType shared_out_data[];//externally managed shared memory __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x]= blockIdx.x; //init shared memory for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { shared_out_data[threadIdx.x + c * blockDim.x].x = 0.0f;//Re shared_out_data[threadIdx.x + c * blockDim.x].y = 0.0f;//Im } __syncthreads(); //start convolution while (sec[threadIdx.x] < N) { int ind, imin, imax, jmin, jmax, i, j; DType dx_sqr, dy_sqr, val, ix, jy; __shared__ IndType2 center; center.x = sector_centers[sec[threadIdx.x] * 2]; center.y = sector_centers[sec[threadIdx.x] * 2 + 1]; //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x; __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; __shared__ int sector_ind_offset; sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,GI.gridDims); while (data_cnt < data_max) { DType2 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); // convolve neighboring cartesian points to this data point j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { // get kernel value // calc as separable filter val = KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; // multiply data by current kernel val // grid complex or scalar if (isOutlier2D(i,j,center.x,center.y,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXY2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), GI.gridDims); else ind = (sector_ind_offset + computeXY2Lin(i,j,GI.gridDims)); for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { shared_out_data[threadIdx.x + c * blockDim.x].x += gdata[ind+ c*GI.gridDims_count].x * val; shared_out_data[threadIdx.x + c * blockDim.x].y += gdata[ind+ c*GI.gridDims_count].y * val; } }// kernel bounds check x, spherical support i++; } // x loop } // kernel bounds check y, spherical support j++; } // y loop for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { data[data_cnt + c*GI.data_count].x = shared_out_data[threadIdx.x + c * blockDim.x].x; data[data_cnt + c*GI.data_count].y = shared_out_data[threadIdx.x + c * blockDim.x].y; shared_out_data[threadIdx.x + c*blockDim.x].x = (DType)0.0;//Re shared_out_data[threadIdx.x + c*blockDim.x].y = (DType)0.0;//Im } data_cnt = data_cnt + blockDim.x; } //data points per sector __syncthreads(); sec[threadIdx.x]= sec[threadIdx.x] + gridDim.x; } //sector check } __device__ void forwardConvolutionFunction2D(int* sec, int sec_max, int sec_offset, DType2* sdata, CufftType* gdata_cache, DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers) { int ind, imin, imax, jmin, jmax, i, j; DType dx_sqr, dy_sqr, val, ix, jy; __shared__ IndType2 center; center.x = sector_centers[sec[threadIdx.x] * 2]; center.y = sector_centers[sec[threadIdx.x] * 2 + 1]; __shared__ int sector_ind_offset; sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,GI.gridDims); // init sector cache // preload sector grid data into cache for (int ind=threadIdx.x; ind<GI.sector_dim; ind+=blockDim.x) { int grid_index; getCoordsFromIndex2D(ind,&i,&j,GI.sector_pad_width); // multiply data by current kernel val // grid complex or scalar if (isOutlier2D(i,j,center.x,center.y,GI.gridDims.x,GI.sector_offset)) //calculate opposite index grid_index = getIndex2D(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), GI.gridDims.x); else grid_index = (sector_ind_offset + getIndex2D(i,j,GI.gridDims.x)); for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { gdata_cache[ind + c*GI.sector_dim].x = gdata[grid_index + c*GI.gridDims_count].x; gdata_cache[ind + c*GI.sector_dim].y = gdata[grid_index + c*GI.gridDims_count].y; } } __syncthreads(); //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset; while (data_cnt < sec_max) { DType2 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt + GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); // convolve neighboring cartesian points to this data point j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { // get kernel value // calc as separable filter val = KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; ind = getIndex2D(i,j,GI.sector_pad_width); for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { sdata[threadIdx.x + c*blockDim.x].x += gdata_cache[ind + c*GI.sector_dim].x * val; sdata[threadIdx.x + c*blockDim.x].y += gdata_cache[ind + c*GI.sector_dim].y * val; } }// kernel bounds check x, spherical support i++; } // x loop } // kernel bounds check y, spherical support j++; } // y loop for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { atomicAdd(&(data[data_cnt + c*GI.data_count].x),sdata[threadIdx.x + c*blockDim.x].x); atomicAdd(&(data[data_cnt + c*GI.data_count].y),sdata[threadIdx.x + c*blockDim.x].y); sdata[threadIdx.x + c*blockDim.x].x = (DType)0.0;//Re sdata[threadIdx.x + c*blockDim.x].y = (DType)0.0;//Im } data_cnt = data_cnt + blockDim.x; } //data points per sector } //cached version of above kernel __global__ void forwardConvolutionKernel22D(CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N) { extern __shared__ CufftType shared[];//externally managed shared memory CufftType* shared_out_data =(CufftType*) &shared[0]; CufftType* gdata_cache =(CufftType*) &shared[blockDim.x * GI.n_coils_cc]; __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x]= blockIdx.x; //init shared memory for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { shared_out_data[threadIdx.x + c*blockDim.x].x = 0.0f;//Re shared_out_data[threadIdx.x + c*blockDim.x].y = 0.0f;//Im } __syncthreads(); //start convolution while (sec[threadIdx.x] < N) { __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; forwardConvolutionFunction2D(sec,data_max,0,shared_out_data,gdata_cache,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec[threadIdx.x]= sec[threadIdx.x] + gridDim.x; } //sector check } __global__ void balancedForwardConvolutionKernel22D(CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType2* sector_processing_order, IndType* sector_centers, int N) { extern __shared__ CufftType shared[];//externally managed shared memory CufftType* shared_out_data =(CufftType*) &shared[0]; CufftType* gdata_cache =(CufftType*) &shared[blockDim.x * GI.n_coils_cc]; int sec_cnt= blockIdx.x; __shared__ int sec[THREAD_BLOCK_SIZE]; //init shared memory for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { shared_out_data[threadIdx.x + c * blockDim.x].x = 0.0f;//Re shared_out_data[threadIdx.x + c * blockDim.x].y = 0.0f;//Im } __syncthreads(); //start convolution while (sec_cnt < N) { sec[threadIdx.x] = sector_processing_order[sec_cnt].x; __shared__ int data_max; data_max = min(sectors[sec[threadIdx.x]+1],sectors[sec[threadIdx.x]] + sector_processing_order[sec_cnt].y+MAXIMUM_PAYLOAD); forwardConvolutionFunction2D(sec,data_max,sector_processing_order[sec_cnt].y,shared_out_data,gdata_cache,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec_cnt = sec_cnt + gridDim.x; } //sector check } void performForwardConvolution( CufftType* data_d, DType* crds_d, CufftType* gdata_d, DType* kernel_d, IndType* sectors_d, IndType* sector_centers_d, gpuNUFFT::GpuNUFFTInfo* gi_host ) { // cached version proved to be // faster than non-cached version // even in 2d case bool useCache = true; if (!useCache) { int thread_size =THREAD_BLOCK_SIZE; long shared_mem_size = thread_size * sizeof(CufftType) * gi_host->n_coils_cc;//empiric dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,thread_size)); if (DEBUG) printf("convolution requires %ld bytes of shared memory!\n",shared_mem_size); if (gi_host->is2Dprocessing) { dim3 block_dim(thread_size, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 8 ? 8 : gi_host->n_coils_cc)); hipLaunchKernelGGL(( forwardConvolutionKernel2D), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } else hipLaunchKernelGGL(( forwardConvolutionKernel), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } else { int thread_size = THREAD_BLOCK_SIZE; long shared_mem_size = (thread_size + gi_host->sector_dim) * gi_host->n_coils_cc * sizeof(CufftType);//empiric dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,thread_size)); if (DEBUG) printf("forward convolution requires %ld bytes of shared memory!\n",shared_mem_size); if (gi_host->is2Dprocessing) { dim3 block_dim(thread_size, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 2 : gi_host->n_coils_cc)); hipLaunchKernelGGL(( forwardConvolutionKernel22D), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } else hipLaunchKernelGGL(( forwardConvolutionKernel2), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } } void performForwardConvolution( CufftType* data_d, DType* crds_d, CufftType* gdata_d, DType* kernel_d, IndType* sectors_d, IndType2* sector_processing_order_d, IndType* sector_centers_d, gpuNUFFT::GpuNUFFTInfo* gi_host ) { int thread_size =THREAD_BLOCK_SIZE;//empiric long shared_mem_size = (thread_size + gi_host->sector_dim ) * gi_host->n_coils_cc * sizeof(CufftType); dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,thread_size)); if (DEBUG) printf("balanced convolution requires %ld bytes of shared memory!\n",shared_mem_size); if (gi_host->is2Dprocessing) { dim3 block_dim(160, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 2 : gi_host->n_coils_cc)); hipLaunchKernelGGL(( balancedForwardConvolutionKernel22D), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_processing_order_d,sector_centers_d,gi_host->sectorsToProcess); } else hipLaunchKernelGGL(( balancedForwardConvolutionKernel2), dim3(grid_dim),dim3(block_dim),shared_mem_size, 0, data_d,crds_d,gdata_d,sectors_d,sector_processing_order_d,sector_centers_d,gi_host->sectorsToProcess); } #endif
da4bf3f7b794b9d386d027cc2a4d5c2b9f425eb2.cu
#ifndef ATOMIC_GPUNUFFT_KERNELS_H #define ATOMIC_GPUNUFFT_KERNELS_H #include "gpuNUFFT_kernels.hpp" #include "../std_gpuNUFFT_kernels.cu" #include "cuda_utils.cuh" // convolve every data point on grid position -> controlled by threadIdx.x .y and .z // shared data holds grid values as software managed cache __global__ void convolutionKernel3( DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N, int CACHE_SIZE ) { extern __shared__ DType shared_data[];//externally managed shared memory DType2* data_cache =(DType2*)&shared_data[0]; DType3* coord_cache =(DType3*) &shared_data[2*CACHE_SIZE]; __shared__ int sec; sec = blockIdx.x; //start convolution while (sec < N) { int ind, k, i, j; int imin, imax,jmin,jmax,kmin,kmax; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec * 3]; center.y = sector_centers[sec * 3 + 1]; center.z = sector_centers[sec * 3 + 2]; //Grid Points over threads, start position of data points of this sector __shared__ int data_off; data_off = sectors[sec]; int data_max = sectors[sec+1]; //init shared memory data cache int c_ind = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y* threadIdx.z; //load data points into shared mem while (c_ind < CACHE_SIZE && (data_off + c_ind) < data_max) { data_cache[c_ind] = data[data_off + c_ind]; coord_cache[c_ind].x = crds[c_ind + data_off]; coord_cache[c_ind].y = crds[c_ind + data_off+GI.data_count]; coord_cache[c_ind].z = crds[c_ind + data_off+2*GI.data_count]; c_ind += blockDim.x * blockDim.y*blockDim.z; } __syncthreads(); __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); c_ind = 0; __shared__ int reload_count; reload_count = 0; while (data_off+c_ind < data_max) { if (c_ind >= (reload_count+1)*CACHE_SIZE) { __syncthreads(); /* int reload_ind = threadIdx.x + blockDim.x * threadIdx.y + blockDim.x * blockDim.y* threadIdx.z; //load next data points into shared mem while (reload_ind < CACHE_SIZE && (data_off + c_ind + reload_ind) < data_max) { data_cache[reload_ind] = data[data_off + c_ind + reload_ind]; coord_cache[reload_ind].x = crds[c_ind + data_off + reload_ind]; coord_cache[reload_ind].y = crds[c_ind + data_off+ reload_ind + GI.data_count]; coord_cache[reload_ind].z = crds[c_ind + data_off+ reload_ind + 2*GI.data_count]; reload_ind += blockDim.x * blockDim.y * blockDim.z; }*/ reload_count++; } __syncthreads(); // DType3 data_point; //datapoint shared in every thread DType3 data_point = coord_cache[c_ind - reload_count*CACHE_SIZE]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius); // grid this point onto the neighboring cartesian points for (k=threadIdx.z;k<=kmax; k += blockDim.z) { j=threadIdx.y; i=threadIdx.x; if ((k<=kmax && k>=kmin) && (j<=jmax && j>=jmin) && (i<=imax && i>=imin)) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { //get kernel value //Calculate Separable Filters val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; if (isOutlier(i,j,k,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXYZ2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(k,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else ind = sector_ind_offset + computeXYZ2Lin(i,j,k,GI.gridDims);//index in output grid atomicAdd(&(gdata[ind].x),val * data_cache[c_ind-reload_count*CACHE_SIZE].x);//Re atomicAdd(&(gdata[ind].y),val * data_cache[c_ind-reload_count*CACHE_SIZE].y);//Im } // kernel bounds check x, spherical support } // kernel bounds check y, spherical support } //kernel bounds check z } //x,y,z bounds check }//for loop over z entries c_ind++; } //grid points per sector __syncthreads(); sec = sec + gridDim.x; }//sec < sector_count } __device__ void convolutionFunction4(int sec, int sec_max, int sec_offset, DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers) { int ind, k, i, j; __shared__ int max_dim, imin, imax,jmin,jmax,kmin,kmax; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec * 3]; center.y = sector_centers[sec * 3 + 1]; center.z = sector_centers[sec * 3 + 2]; __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); __syncthreads(); //Grid Points over threads int data_cnt; data_cnt = sectors[sec]+sec_offset; max_dim = GI.sector_pad_max; int s_ind = getIndex(threadIdx.x,threadIdx.y,threadIdx.z,GI.sector_pad_width); while (data_cnt < sec_max) { __syncthreads(); __shared__ DType3 data_point; //datapoint shared in every thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; data_point.z = crds[data_cnt +2*GI.data_count]; __shared__ DType2 s_data; s_data.x = data[data_cnt].x; s_data.y = data[data_cnt].y; __syncthreads(); // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, max_dim, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, max_dim, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, max_dim, GI.kernel_radius); // grid this point onto the neighboring cartesian points for (k=threadIdx.z;k<=kmax; k += blockDim.z) { if (k<=kmax && k>=kmin) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); // scale distance in z direction with x,y dimension dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { j=threadIdx.y; if (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=threadIdx.x; if (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { //get kernel value //Calculate Separable Filters val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; //each thread writes one position from shared mem to global mem if (isOutlier(i,j,k,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXYZ2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(k,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else ind = sector_ind_offset + computeXYZ2Lin(i,j,k,GI.gridDims);//index in output grid atomicAdd(&(gdata[ind].x),val * s_data.x);//Re atomicAdd(&(gdata[ind].y),val * s_data.y);//Im } // kernel bounds check x, spherical support } // x }// kernel bounds check y, spherical support } // y } //kernel bounds check z } // z }//k, for loop over z entries __syncthreads(); data_cnt++; } //grid points per sector } __global__ void convolutionKernel4(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N ) { int sec; sec = blockIdx.x; //start convolution while (sec < N) { convolutionFunction4(sec,sectors[sec+1],0,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec = sec + gridDim.x; }//sec < sector_count } __global__ void balancedConvolutionKernel4(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType2* sector_processing_order, IndType* sector_centers, int N ) { int sec; int sec_cnt = blockIdx.x; //start convolution while (sec_cnt < N) { sec = sector_processing_order[sec_cnt].x; convolutionFunction4(sec,min(sectors[sec+1],sectors[sec]+sector_processing_order[sec_cnt].y+MAXIMUM_PAYLOAD),sector_processing_order[sec_cnt].y,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec_cnt = sec_cnt + gridDim.x; }//sec < sector_count } // ---------------------------------------------------------------------------- // convolutionKernel: NUFFT^H kernel // // Performs the gpuNUFFT step by convolution of sample points with // interpolation function and resampling onto grid. Basic concept based on Zwart // et al. // // parameters: // * data : complex input sample points // * crds : coordinates of data points (x,y,z) // * gdata : output grid data // * sectors : mapping of sample indices according to each sector // * sector_centers : coordinates (x,y,z) of sector centers // * temp_gdata : temporary grid data // * N : number of threads __device__ void convolutionFunction2(int* sec, int sec_max, int sec_offset, DType2* sdata, DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers) { //init shared memory for (int s_ind=threadIdx.x;s_ind<GI.sector_dim; s_ind+= blockDim.x) { sdata[s_ind].x = 0.0f;//Re sdata[s_ind].y = 0.0f;//Im } __syncthreads(); //start convolution int ind, k, i, j, x, y, z; int imin, imax,jmin,jmax,kmin,kmax; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec[threadIdx.x] * 3]; center.y = sector_centers[sec[threadIdx.x] * 3 + 1]; center.z = sector_centers[sec[threadIdx.x] * 3 + 2]; //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset; //loop over all data points of the current sector, and check if grid position lies inside //affected region, if so, add data point weighted to grid position value while (data_cnt < sec_max) { DType3 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; data_point.z = crds[data_cnt +2*GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius); // grid this point onto its cartesian points neighbors k =kmin; while (k<=kmax && k>=kmin) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i= imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { //get kernel value //Calculate Separable Filters val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; ind = getIndex(i,j,k,GI.sector_pad_width); // multiply data by current kernel val // grid complex or scalar atomicAdd(&(sdata[ind].x),val * data[data_cnt].x); atomicAdd(&(sdata[ind].y),val * data[data_cnt].y); } // kernel bounds check x, spherical support i++; } // x } // kernel bounds check y, spherical support j++; } // y } //kernel bounds check z k++; } // z data_cnt = data_cnt + blockDim.x; } //grid points per sector //write shared data to output grid __syncthreads(); __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); //each thread writes one position from shared mem to global mem for (int s_ind=threadIdx.x;s_ind<GI.sector_dim; s_ind += blockDim.x) { getCoordsFromIndex(s_ind,&x,&y,&z,GI.sector_pad_width); if (isOutlier(x,y,z,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXYZ2Lin(calculateOppositeIndex(x,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(y,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(z,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else ind = sector_ind_offset + computeXYZ2Lin(x,y,z,GI.gridDims);//index in output grid atomicAdd(&(gdata[ind].x),sdata[s_ind].x);//Re atomicAdd(&(gdata[ind].y),sdata[s_ind].y);//Im } } __global__ void convolutionKernel2(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N ) { extern __shared__ DType2 sdata[];//externally managed shared memory __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x] = blockIdx.x; while (sec[threadIdx.x] < N) { __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; convolutionFunction2(sec,data_max,0,sdata,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec[threadIdx.x] = sec[threadIdx.x]+ gridDim.x; }//sec < sector_count } __global__ void balancedConvolutionKernel2(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType2* sector_processing_order, IndType* sector_centers, int N ) { extern __shared__ DType2 sdata[];//externally managed shared memory int sec_cnt = blockIdx.x; __shared__ int sec[THREAD_BLOCK_SIZE]; while (sec_cnt < N) { sec[threadIdx.x] = sector_processing_order[sec_cnt].x; __shared__ int data_max; data_max = min(sectors[sec[threadIdx.x]+1],sectors[sec[threadIdx.x]] + sector_processing_order[sec_cnt].y+MAXIMUM_PAYLOAD); convolutionFunction2(sec,data_max,sector_processing_order[sec_cnt].y,sdata,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec_cnt = sec_cnt + gridDim.x; }//sec < sector_count } // ---------------------------------------------------------------------------- // convolutionKernel: NUFFT^H kernel // // Performs the gpuNUFFT step by convolution of sample points with // interpolation function and resampling onto grid. Basic concept based on Zwart // et al. // // parameters: // * data : complex input sample points // * crds : coordinates of data points (x,y,z) // * gdata : output grid data // * sectors : mapping of sample indices according to each sector // * sector_centers : coordinates (x,y,z) of sector centers // * temp_gdata : temporary grid data // * N : number of threads __device__ void convolutionFunction2D(DType2* sdata,int* sec, int sec_max, int sec_offset, DType2* data, DType* crds, CufftType* gdata,IndType* sectors, IndType* sector_centers) { //init shared memory for (int s_ind=threadIdx.x;s_ind<GI.sector_dim; s_ind+= blockDim.x) { for (int c = threadIdx.z; c < GI.n_coils_cc; c+= blockDim.z) { sdata[s_ind + c*GI.sector_dim].x = 0.0f;//Re sdata[s_ind + c*GI.sector_dim].y = 0.0f;//Im } } __syncthreads(); //start convolution int ind, i, j, x, y; int imin, imax,jmin,jmax; DType dx_sqr, dy_sqr, val, ix, jy; __shared__ IndType2 center; center.x = sector_centers[sec[threadIdx.x] * 2]; center.y = sector_centers[sec[threadIdx.x] * 2 + 1]; //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset; //loop over all data points of the current sector, and check if grid position lies inside //affected region, if so, add data point weighted to grid position value while (data_cnt < sec_max) { DType2 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); // grid this point onto its cartesian points neighbors j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i= imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { //get kernel value //Calculate Separable Filters val = KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; ind = getIndex2D(i,j,GI.sector_pad_width); // multiply data by current kernel val // grid complex or scalar for (int c = threadIdx.z; c < GI.n_coils_cc; c+= blockDim.z) { atomicAdd(&(sdata[ind + c * GI.sector_dim].x),val * data[data_cnt + c * GI.data_count].x); atomicAdd(&(sdata[ind + c * GI.sector_dim].y),val * data[data_cnt + c * GI.data_count].y); } } // kernel bounds check x, spherical support i++; } // x } // kernel bounds check y, spherical support j++; } // y data_cnt = data_cnt + blockDim.x; } //grid points per sector //write shared data to output grid __syncthreads(); //int sector_ind_offset = sec * GI.sector_dim; __shared__ int sector_ind_offset; sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,GI.gridDims); //each thread writes one position from shared mem to global mem for (int s_ind=threadIdx.x;s_ind<GI.sector_dim; s_ind += blockDim.x) { getCoordsFromIndex2D(s_ind,&x,&y,GI.sector_pad_width); if (isOutlier2D(x,y,center.x,center.y,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXY2Lin(calculateOppositeIndex(x,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(y,center.y,GI.gridDims.y,GI.sector_offset), GI.gridDims); else ind = sector_ind_offset + computeXY2Lin(x,y,GI.gridDims);//index in output grid for (int c = threadIdx.z; c < GI.n_coils_cc; c+= blockDim.z) { atomicAdd(&(gdata[ind + c * GI.gridDims_count].x),sdata[s_ind + c * GI.sector_dim].x);//Re atomicAdd(&(gdata[ind + c * GI.gridDims_count].y),sdata[s_ind + c * GI.sector_dim].y);//Im sdata[s_ind + c * GI.sector_dim].x = 0.0f;//Re sdata[s_ind + c * GI.sector_dim].y = 0.0f;//Im } } __syncthreads(); } __global__ void convolutionKernel2D(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N ) { extern __shared__ DType2 sdata[];//externally managed shared memory __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x] = blockIdx.x; while (sec[threadIdx.x] < N) { __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; convolutionFunction2D(sdata,sec,data_max,0,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec[threadIdx.x] = sec[threadIdx.x]+ gridDim.x; }//sec < sector_count } __global__ void balancedConvolutionKernel2D(DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType2* sector_processing_order, IndType* sector_centers, int N ) { extern __shared__ DType2 sdata[];//externally managed shared memory int sec_cnt = blockIdx.x; __shared__ int sec[THREAD_BLOCK_SIZE]; while (sec_cnt < N) { sec[threadIdx.x] = sector_processing_order[sec_cnt].x; __shared__ int data_max; data_max = min(sectors[sec[threadIdx.x]+1],sectors[sec[threadIdx.x]] + sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD); convolutionFunction2D(sdata,sec,data_max,sector_processing_order[sec_cnt].y,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec_cnt = sec_cnt+ gridDim.x; }//sec < sector_count } // ---------------------------------------------------------------------------- // convolutionKernel: NUFFT^H kernel // // Performs the gpuNUFFT step by convolution of sample points with // interpolation function and resampling onto grid. Basic concept based on Zwart // et al. // // parameters: // * data : complex input sample points // * crds : coordinates of data points (x,y,z) // * gdata : output grid data // * sectors : mapping of sample indices according to each sector // * sector_centers : coordinates (x,y,z) of sector centers // * temp_gdata : temporary grid data // * N : number of threads __global__ void convolutionKernel( DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N ) { int sec= blockIdx.x; //start convolution while (sec < N) { int ind, imin, imax, jmin, jmax,kmin,kmax, k, i, j; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec * 3]; center.y = sector_centers[sec * 3 + 1]; center.z = sector_centers[sec * 3 + 2]; //Grid Points over Threads int data_cnt = sectors[sec] + threadIdx.x; int data_max = sectors[sec+1]; __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); while (data_cnt < data_max) { DType3 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; data_point.z = crds[data_cnt +2*GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius); // convolve neighboring cartesian points to this data point k = kmin; while (k<=kmax && k>=kmin) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { // get kernel value //Berechnung mit Separable Filters val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; if (isOutlier(i,j,k,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXYZ2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(k,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else ind = sector_ind_offset + computeXYZ2Lin(i,j,k,GI.gridDims);//index in output grid atomicAdd(&(gdata[ind].x),val * data[data_cnt].x);//Re atomicAdd(&(gdata[ind].y),val * data[data_cnt].y);//Im }// kernel bounds check x, spherical support i++; } // x loop } // kernel bounds check y, spherical support j++; } // y loop } //kernel bounds check z k++; } // z loop data_cnt = data_cnt + blockDim.x; } //data points per sector __syncthreads(); sec = sec + gridDim.x; } //sector check } void performConvolution( DType2* data_d, DType* crds_d, CufftType* gdata_d, DType* kernel_d, IndType* sectors_d, IndType* sector_centers_d, gpuNUFFT::GpuNUFFTInfo* gi_host ) { #define CONVKERNEL2 #ifdef CONVKERNEL long shared_mem_size = (gi_host->sector_dim)*sizeof(DType2); dim3 block_dim(THREAD_BLOCK_SIZE); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,THREAD_BLOCK_SIZE)); if (gi_host->is2Dprocessing) convolutionKernel2D<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); else convolutionKernel<<<grid_dim,block_dim>>>(data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); #else #ifdef CONVKERNEL2 long shared_mem_size = (gi_host->sector_dim) * sizeof(DType2) * gi_host->n_coils_cc; int thread_size = THREAD_BLOCK_SIZE; dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,1)); if (DEBUG) { printf("adjoint convolution requires %ld bytes of shared memory!\n",shared_mem_size); printf("grid dim %u, block dim %u \n",grid_dim.x, block_dim.x); } if (gi_host->is2Dprocessing) { dim3 block_dim(64, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 4 : gi_host->n_coils_cc)); convolutionKernel2D<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } else { convolutionKernel2<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } #else #ifdef CONVKERNEL4 // TODO tune param z dim // defines size of total shared mem used dim3 block_dim(gi_host->sector_pad_width,gi_host->sector_pad_width,3); long shared_mem_size = block_dim.x*block_dim.y*block_dim.z*sizeof(DType2); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,1)); if (DEBUG) { printf("adjoint convolution requires %ld bytes of shared memory!\n",shared_mem_size); printf("grid dim (%u,%u,%u), block dim (%u,%u,%u) \n",grid_dim.x,grid_dim.y,grid_dim.z, block_dim.x,block_dim.y,block_dim.z); } if (gi_host->is2Dprocessing) { shared_mem_size = (gi_host->sector_dim)*sizeof(DType2); int thread_size =THREAD_BLOCK_SIZE; dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,1)); convolutionKernel2D<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } else convolutionKernel4<<<grid_dim,block_dim>>>(data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); #else long cache_size = 176; long shared_mem_size = (2*cache_size + 3*cache_size)*sizeof(DType); dim3 block_dim(gi_host->sector_pad_width,gi_host->sector_pad_width,4); dim3 grid_dim(gi_host->sector_count); convolutionKernel3<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count,cache_size); #endif #endif #endif if (DEBUG) printf("...finished with: %s\n", cudaGetErrorString(cudaGetLastError())); } void performConvolution( DType2* data_d, DType* crds_d, CufftType* gdata_d, DType* kernel_d, IndType* sectors_d, IndType2* sector_processing_order_d, IndType* sector_centers_d, gpuNUFFT::GpuNUFFTInfo* gi_host ) { long shared_mem_size = (gi_host->sector_dim)*sizeof(DType2) * gi_host->n_coils_cc; int thread_size =THREAD_BLOCK_SIZE; dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,1)); if (DEBUG) { printf("adjoint convolution requires %ld bytes of shared memory!\n",shared_mem_size); printf("grid dim %u, block dim %u \n",grid_dim.x, block_dim.x); } if (gi_host->is2Dprocessing) { dim3 block_dim(64, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 4 : gi_host->n_coils_cc)); balancedConvolutionKernel2D<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_processing_order_d,sector_centers_d,gi_host->sectorsToProcess); } else { balancedConvolutionKernel2<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_processing_order_d,sector_centers_d,gi_host->sectorsToProcess); //dim3 block_dim(gi_host->sector_pad_width,gi_host->sector_pad_width,3); //dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,block_dim.x*block_dim.y*block_dim.z)); //balancedConvolutionKernel4<<<grid_dim,block_dim>>>(data_d,crds_d,gdata_d,sectors_d,sector_processing_order_d,sector_centers_d,gi_host->sectorsToProcess); } if (DEBUG) printf("...finished with: %s\n", cudaGetErrorString(cudaGetLastError())); } // ---------------------------------------------------------------------------- // forwardConvolutionKernel: NUFFT kernel // // Performs the inverse gpuNUFFT step by convolution of grid points with // interpolation function and resampling onto trajectory. // // parameters: // * data : complex output sample points // * crds : coordinates of data points (x,y,z) // * gdata : input grid data // * sectors : mapping of sample indices according to each sector // * sector_centers : coordinates (x,y,z) of sector centers // * N : number of threads __global__ void forwardConvolutionKernel( CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N) { extern __shared__ CufftType shared_out_data[];//externally managed shared memory __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x]= blockIdx.x; //init shared memory shared_out_data[threadIdx.x].x = 0.0f;//Re shared_out_data[threadIdx.x].y = 0.0f;//Im __syncthreads(); //start convolution while (sec[threadIdx.x] < N) { int ind, imin, imax, jmin, jmax,kmin,kmax, k, i, j; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec[threadIdx.x] * 3]; center.y = sector_centers[sec[threadIdx.x] * 3 + 1]; center.z = sector_centers[sec[threadIdx.x] * 3 + 2]; //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x; __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); while (data_cnt < data_max) { DType3 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; data_point.z = crds[data_cnt +2*GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius); // convolve neighboring cartesian points to this data point k = kmin; while (k<=kmax && k>=kmin) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { // get kernel value // calc as separable filter val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; // multiply data by current kernel val // grid complex or scalar if (isOutlier(i,j,k,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXYZ2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(k,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else ind = (sector_ind_offset + computeXYZ2Lin(i,j,k,GI.gridDims)); shared_out_data[threadIdx.x].x += gdata[ind].x * val; shared_out_data[threadIdx.x].y += gdata[ind].y * val; }// kernel bounds check x, spherical support i++; } // x loop } // kernel bounds check y, spherical support j++; } // y loop } //kernel bounds check z k++; } // z loop data[data_cnt].x = shared_out_data[threadIdx.x].x; data[data_cnt].y = shared_out_data[threadIdx.x].y; data_cnt = data_cnt + blockDim.x; shared_out_data[threadIdx.x].x = (DType)0.0;//Re shared_out_data[threadIdx.x].y = (DType)0.0;//Im } //data points per sector __syncthreads(); sec[threadIdx.x]= sec[threadIdx.x] + gridDim.x; } //sector check } __device__ void forwardConvolutionFunction2(int* sec, int sec_max, int sec_offset, DType2* sdata, CufftType* gdata_cache, DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers) { int ind, imin, imax, jmin, jmax,kmin,kmax, k, i, j; DType dx_sqr, dy_sqr, dz_sqr, val, ix, jy, kz; __shared__ IndType3 center; center.x = sector_centers[sec[threadIdx.x] * 3]; center.y = sector_centers[sec[threadIdx.x] * 3 + 1]; center.z = sector_centers[sec[threadIdx.x] * 3 + 2]; __shared__ int sector_ind_offset; sector_ind_offset = computeXYZ2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,center.z - GI.sector_offset,GI.gridDims); // init sector cache // preload sector grid data into cache for (int ind=threadIdx.x; ind<GI.sector_dim; ind+=blockDim.x) { int grid_index; getCoordsFromIndex(ind,&i,&j,&k,GI.sector_pad_width); if (isOutlier(i,j,k,center.x,center.y,center.z,GI.gridDims,GI.sector_offset)) //calculate opposite index grid_index = computeXYZ2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), calculateOppositeIndex(k,center.z,GI.gridDims.z,GI.sector_offset), GI.gridDims); else grid_index = (sector_ind_offset + computeXYZ2Lin(i,j,k,GI.gridDims)); gdata_cache[ind].x = gdata[grid_index].x; gdata_cache[ind].y = gdata[grid_index].y; } __syncthreads(); //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset; while (data_cnt < sec_max) { DType3 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt + GI.data_count]; data_point.z = crds[data_cnt + 2*GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); kz = mapKSpaceToGrid(data_point.z,GI.gridDims.z,center.z,GI.sector_offset); set_minmax(&kz, &kmin, &kmax, GI.sector_pad_max, GI.kernel_radius); // convolve neighboring cartesian points to this data point k = kmin; while (k<=kmax && k>=kmin) { kz = mapGridToKSpace(k,GI.gridDims.z,center.z,GI.sector_offset); dz_sqr = (kz - data_point.z)*GI.aniso_z_scale; dz_sqr *= dz_sqr; if (dz_sqr < GI.radiusSquared) { j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { // get kernel value // calc as separable filter val = KERNEL[(int) round(dz_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; ind = getIndex(i,j,k,GI.sector_pad_width); sdata[threadIdx.x].x += gdata_cache[ind].x * val; sdata[threadIdx.x].y += gdata_cache[ind].y * val; }// kernel bounds check x, spherical support i++; } // x loop } // kernel bounds check y, spherical support j++; } // y loop } //kernel bounds check z k++; } // z loop atomicAdd(&(data[data_cnt].x),sdata[threadIdx.x].x); atomicAdd(&(data[data_cnt].y),sdata[threadIdx.x].y); data_cnt = data_cnt + blockDim.x; sdata[threadIdx.x].x = (DType)0.0;//Re sdata[threadIdx.x].y = (DType)0.0;//Im } //data points per sector } // cached version of above kernel __global__ void forwardConvolutionKernel2(CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N) { extern __shared__ CufftType shared[];//externally managed shared memory CufftType* shared_out_data =(CufftType*) &shared[0]; CufftType* gdata_cache =(CufftType*) &shared[blockDim.x]; __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x]= blockIdx.x; //init shared memory shared_out_data[threadIdx.x].x = 0.0f;//Re shared_out_data[threadIdx.x].y = 0.0f;//Im __syncthreads(); //start convolution while (sec[threadIdx.x] < N) { __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; forwardConvolutionFunction2(sec,data_max,0,shared_out_data,gdata_cache,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec[threadIdx.x]= sec[threadIdx.x] + gridDim.x; } //sector check } __global__ void balancedForwardConvolutionKernel2(CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType2* sector_processing_order, IndType* sector_centers, int N) { extern __shared__ CufftType shared[];//externally managed shared memory CufftType* shared_out_data =(CufftType*) &shared[0]; CufftType* gdata_cache =(CufftType*) &shared[blockDim.x]; int sec_cnt = blockIdx.x; __shared__ int sec[THREAD_BLOCK_SIZE]; //init shared memory shared_out_data[threadIdx.x].x = 0.0f;//Re shared_out_data[threadIdx.x].y = 0.0f;//Im __syncthreads(); //start convolution while (sec_cnt < N) { sec[threadIdx.x] = sector_processing_order[sec_cnt].x; __shared__ int data_max; data_max = min(sectors[sec[threadIdx.x]+1],sectors[sec[threadIdx.x]] + sector_processing_order[sec_cnt].y + MAXIMUM_PAYLOAD); forwardConvolutionFunction2(sec,data_max,sector_processing_order[sec_cnt].y,shared_out_data,gdata_cache,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec_cnt = sec_cnt + gridDim.x; } //sector check } __global__ void forwardConvolutionKernel2D( CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N) { extern __shared__ CufftType shared_out_data[];//externally managed shared memory __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x]= blockIdx.x; //init shared memory for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { shared_out_data[threadIdx.x + c * blockDim.x].x = 0.0f;//Re shared_out_data[threadIdx.x + c * blockDim.x].y = 0.0f;//Im } __syncthreads(); //start convolution while (sec[threadIdx.x] < N) { int ind, imin, imax, jmin, jmax, i, j; DType dx_sqr, dy_sqr, val, ix, jy; __shared__ IndType2 center; center.x = sector_centers[sec[threadIdx.x] * 2]; center.y = sector_centers[sec[threadIdx.x] * 2 + 1]; //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x; __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; __shared__ int sector_ind_offset; sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,GI.gridDims); while (data_cnt < data_max) { DType2 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt +GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); // convolve neighboring cartesian points to this data point j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { // get kernel value // calc as separable filter val = KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; // multiply data by current kernel val // grid complex or scalar if (isOutlier2D(i,j,center.x,center.y,GI.gridDims,GI.sector_offset)) //calculate opposite index ind = computeXY2Lin(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), GI.gridDims); else ind = (sector_ind_offset + computeXY2Lin(i,j,GI.gridDims)); for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { shared_out_data[threadIdx.x + c * blockDim.x].x += gdata[ind+ c*GI.gridDims_count].x * val; shared_out_data[threadIdx.x + c * blockDim.x].y += gdata[ind+ c*GI.gridDims_count].y * val; } }// kernel bounds check x, spherical support i++; } // x loop } // kernel bounds check y, spherical support j++; } // y loop for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { data[data_cnt + c*GI.data_count].x = shared_out_data[threadIdx.x + c * blockDim.x].x; data[data_cnt + c*GI.data_count].y = shared_out_data[threadIdx.x + c * blockDim.x].y; shared_out_data[threadIdx.x + c*blockDim.x].x = (DType)0.0;//Re shared_out_data[threadIdx.x + c*blockDim.x].y = (DType)0.0;//Im } data_cnt = data_cnt + blockDim.x; } //data points per sector __syncthreads(); sec[threadIdx.x]= sec[threadIdx.x] + gridDim.x; } //sector check } __device__ void forwardConvolutionFunction2D(int* sec, int sec_max, int sec_offset, DType2* sdata, CufftType* gdata_cache, DType2* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers) { int ind, imin, imax, jmin, jmax, i, j; DType dx_sqr, dy_sqr, val, ix, jy; __shared__ IndType2 center; center.x = sector_centers[sec[threadIdx.x] * 2]; center.y = sector_centers[sec[threadIdx.x] * 2 + 1]; __shared__ int sector_ind_offset; sector_ind_offset = computeXY2Lin(center.x - GI.sector_offset,center.y - GI.sector_offset,GI.gridDims); // init sector cache // preload sector grid data into cache for (int ind=threadIdx.x; ind<GI.sector_dim; ind+=blockDim.x) { int grid_index; getCoordsFromIndex2D(ind,&i,&j,GI.sector_pad_width); // multiply data by current kernel val // grid complex or scalar if (isOutlier2D(i,j,center.x,center.y,GI.gridDims.x,GI.sector_offset)) //calculate opposite index grid_index = getIndex2D(calculateOppositeIndex(i,center.x,GI.gridDims.x,GI.sector_offset), calculateOppositeIndex(j,center.y,GI.gridDims.y,GI.sector_offset), GI.gridDims.x); else grid_index = (sector_ind_offset + getIndex2D(i,j,GI.gridDims.x)); for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { gdata_cache[ind + c*GI.sector_dim].x = gdata[grid_index + c*GI.gridDims_count].x; gdata_cache[ind + c*GI.sector_dim].y = gdata[grid_index + c*GI.gridDims_count].y; } } __syncthreads(); //Grid Points over Threads int data_cnt = sectors[sec[threadIdx.x]] + threadIdx.x + sec_offset; while (data_cnt < sec_max) { DType2 data_point; //datapoint per thread data_point.x = crds[data_cnt]; data_point.y = crds[data_cnt + GI.data_count]; // set the boundaries of final dataset for gpuNUFFT this point ix = mapKSpaceToGrid(data_point.x,GI.gridDims.x,center.x,GI.sector_offset); set_minmax(&ix, &imin, &imax, GI.sector_pad_max, GI.kernel_radius); jy = mapKSpaceToGrid(data_point.y,GI.gridDims.y,center.y,GI.sector_offset); set_minmax(&jy, &jmin, &jmax, GI.sector_pad_max, GI.kernel_radius); // convolve neighboring cartesian points to this data point j=jmin; while (j<=jmax && j>=jmin) { jy = mapGridToKSpace(j,GI.gridDims.y,center.y,GI.sector_offset); dy_sqr = (jy - data_point.y) * GI.aniso_y_scale; dy_sqr *= dy_sqr; if (dy_sqr < GI.radiusSquared) { i=imin; while (i<=imax && i>=imin) { ix = mapGridToKSpace(i,GI.gridDims.x,center.x,GI.sector_offset); dx_sqr = (ix - data_point.x)*GI.aniso_x_scale; dx_sqr *= dx_sqr; if (dx_sqr < GI.radiusSquared) { // get kernel value // calc as separable filter val = KERNEL[(int) round(dy_sqr * GI.dist_multiplier)] * KERNEL[(int) round(dx_sqr * GI.dist_multiplier)]; ind = getIndex2D(i,j,GI.sector_pad_width); for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { sdata[threadIdx.x + c*blockDim.x].x += gdata_cache[ind + c*GI.sector_dim].x * val; sdata[threadIdx.x + c*blockDim.x].y += gdata_cache[ind + c*GI.sector_dim].y * val; } }// kernel bounds check x, spherical support i++; } // x loop } // kernel bounds check y, spherical support j++; } // y loop for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { atomicAdd(&(data[data_cnt + c*GI.data_count].x),sdata[threadIdx.x + c*blockDim.x].x); atomicAdd(&(data[data_cnt + c*GI.data_count].y),sdata[threadIdx.x + c*blockDim.x].y); sdata[threadIdx.x + c*blockDim.x].x = (DType)0.0;//Re sdata[threadIdx.x + c*blockDim.x].y = (DType)0.0;//Im } data_cnt = data_cnt + blockDim.x; } //data points per sector } //cached version of above kernel __global__ void forwardConvolutionKernel22D(CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType* sector_centers, int N) { extern __shared__ CufftType shared[];//externally managed shared memory CufftType* shared_out_data =(CufftType*) &shared[0]; CufftType* gdata_cache =(CufftType*) &shared[blockDim.x * GI.n_coils_cc]; __shared__ int sec[THREAD_BLOCK_SIZE]; sec[threadIdx.x]= blockIdx.x; //init shared memory for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { shared_out_data[threadIdx.x + c*blockDim.x].x = 0.0f;//Re shared_out_data[threadIdx.x + c*blockDim.x].y = 0.0f;//Im } __syncthreads(); //start convolution while (sec[threadIdx.x] < N) { __shared__ int data_max; data_max = sectors[sec[threadIdx.x]+1]; forwardConvolutionFunction2D(sec,data_max,0,shared_out_data,gdata_cache,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec[threadIdx.x]= sec[threadIdx.x] + gridDim.x; } //sector check } __global__ void balancedForwardConvolutionKernel22D(CufftType* data, DType* crds, CufftType* gdata, IndType* sectors, IndType2* sector_processing_order, IndType* sector_centers, int N) { extern __shared__ CufftType shared[];//externally managed shared memory CufftType* shared_out_data =(CufftType*) &shared[0]; CufftType* gdata_cache =(CufftType*) &shared[blockDim.x * GI.n_coils_cc]; int sec_cnt= blockIdx.x; __shared__ int sec[THREAD_BLOCK_SIZE]; //init shared memory for (int c=threadIdx.z; c < GI.n_coils_cc; c+=blockDim.z) { shared_out_data[threadIdx.x + c * blockDim.x].x = 0.0f;//Re shared_out_data[threadIdx.x + c * blockDim.x].y = 0.0f;//Im } __syncthreads(); //start convolution while (sec_cnt < N) { sec[threadIdx.x] = sector_processing_order[sec_cnt].x; __shared__ int data_max; data_max = min(sectors[sec[threadIdx.x]+1],sectors[sec[threadIdx.x]] + sector_processing_order[sec_cnt].y+MAXIMUM_PAYLOAD); forwardConvolutionFunction2D(sec,data_max,sector_processing_order[sec_cnt].y,shared_out_data,gdata_cache,data,crds,gdata,sectors,sector_centers); __syncthreads(); sec_cnt = sec_cnt + gridDim.x; } //sector check } void performForwardConvolution( CufftType* data_d, DType* crds_d, CufftType* gdata_d, DType* kernel_d, IndType* sectors_d, IndType* sector_centers_d, gpuNUFFT::GpuNUFFTInfo* gi_host ) { // cached version proved to be // faster than non-cached version // even in 2d case bool useCache = true; if (!useCache) { int thread_size =THREAD_BLOCK_SIZE; long shared_mem_size = thread_size * sizeof(CufftType) * gi_host->n_coils_cc;//empiric dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,thread_size)); if (DEBUG) printf("convolution requires %ld bytes of shared memory!\n",shared_mem_size); if (gi_host->is2Dprocessing) { dim3 block_dim(thread_size, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 8 ? 8 : gi_host->n_coils_cc)); forwardConvolutionKernel2D<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } else forwardConvolutionKernel<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } else { int thread_size = THREAD_BLOCK_SIZE; long shared_mem_size = (thread_size + gi_host->sector_dim) * gi_host->n_coils_cc * sizeof(CufftType);//empiric dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,thread_size)); if (DEBUG) printf("forward convolution requires %ld bytes of shared memory!\n",shared_mem_size); if (gi_host->is2Dprocessing) { dim3 block_dim(thread_size, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 2 : gi_host->n_coils_cc)); forwardConvolutionKernel22D<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } else forwardConvolutionKernel2<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_centers_d,gi_host->sector_count); } } void performForwardConvolution( CufftType* data_d, DType* crds_d, CufftType* gdata_d, DType* kernel_d, IndType* sectors_d, IndType2* sector_processing_order_d, IndType* sector_centers_d, gpuNUFFT::GpuNUFFTInfo* gi_host ) { int thread_size =THREAD_BLOCK_SIZE;//empiric long shared_mem_size = (thread_size + gi_host->sector_dim ) * gi_host->n_coils_cc * sizeof(CufftType); dim3 block_dim(thread_size); dim3 grid_dim(getOptimalGridDim(gi_host->sector_count,thread_size)); if (DEBUG) printf("balanced convolution requires %ld bytes of shared memory!\n",shared_mem_size); if (gi_host->is2Dprocessing) { dim3 block_dim(160, 1, DEFAULT_VALUE(gi_host->n_coils_cc > 4 ? 2 : gi_host->n_coils_cc)); balancedForwardConvolutionKernel22D<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_processing_order_d,sector_centers_d,gi_host->sectorsToProcess); } else balancedForwardConvolutionKernel2<<<grid_dim,block_dim,shared_mem_size>>>(data_d,crds_d,gdata_d,sectors_d,sector_processing_order_d,sector_centers_d,gi_host->sectorsToProcess); } #endif
8688b7a4dbdf3e8664eb575c4ffdf796bcf59da8.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_utils.h" #include <functions/sigmoid.cuh> #include <gtest/gtest.h> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace MLCommon { namespace Functions { template <typename T> struct SigmoidInputs { T tolerance; int len; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const SigmoidInputs<T>& dims) { return os; } template <typename T> class SigmoidTest : public ::testing::TestWithParam<SigmoidInputs<T>> { protected: SigmoidTest() : data(0, stream), result(0, stream), result_ref(0, stream) {} void SetUp() override { params = ::testing::TestWithParam<SigmoidInputs<T>>::GetParam(); int len = params.len; RAFT_CUDA_TRY(hipStreamCreate(&stream)); data.resize(len, stream); T data_h[params.len] = {2.1, -4.5, -0.34, 10.0}; raft::update_device(data.data(), data_h, len, stream); result.resize(len, stream); result_ref.resize(len, stream); T result_ref_h[params.len] = {0.89090318, 0.01098694, 0.41580948, 0.9999546}; raft::update_device(result_ref.data(), result_ref_h, len, stream); sigmoid(result.data(), data.data(), len, stream); RAFT_CUDA_TRY(hipStreamDestroy(stream)); } protected: hipStream_t stream = 0; SigmoidInputs<T> params; rmm::device_uvector<T> data, result, result_ref; }; const std::vector<SigmoidInputs<float>> inputsf2 = {{0.001f, 4}}; const std::vector<SigmoidInputs<double>> inputsd2 = {{0.001, 4}}; typedef SigmoidTest<float> SigmoidTestValF; TEST_P(SigmoidTestValF, Result) { ASSERT_TRUE(MLCommon::devArrMatch(result_ref.data(), result.data(), params.len, MLCommon::CompareApproxAbs<float>(params.tolerance))); } typedef SigmoidTest<double> SigmoidTestValD; TEST_P(SigmoidTestValD, Result) { ASSERT_TRUE(MLCommon::devArrMatch(result_ref.data(), result.data(), params.len, MLCommon::CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValD, ::testing::ValuesIn(inputsd2)); } // end namespace Functions } // end namespace MLCommon
8688b7a4dbdf3e8664eb575c4ffdf796bcf59da8.cu
/* * Copyright (c) 2020-2022, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "test_utils.h" #include <functions/sigmoid.cuh> #include <gtest/gtest.h> #include <raft/util/cuda_utils.cuh> #include <raft/util/cudart_utils.hpp> #include <rmm/device_uvector.hpp> namespace MLCommon { namespace Functions { template <typename T> struct SigmoidInputs { T tolerance; int len; }; template <typename T> ::std::ostream& operator<<(::std::ostream& os, const SigmoidInputs<T>& dims) { return os; } template <typename T> class SigmoidTest : public ::testing::TestWithParam<SigmoidInputs<T>> { protected: SigmoidTest() : data(0, stream), result(0, stream), result_ref(0, stream) {} void SetUp() override { params = ::testing::TestWithParam<SigmoidInputs<T>>::GetParam(); int len = params.len; RAFT_CUDA_TRY(cudaStreamCreate(&stream)); data.resize(len, stream); T data_h[params.len] = {2.1, -4.5, -0.34, 10.0}; raft::update_device(data.data(), data_h, len, stream); result.resize(len, stream); result_ref.resize(len, stream); T result_ref_h[params.len] = {0.89090318, 0.01098694, 0.41580948, 0.9999546}; raft::update_device(result_ref.data(), result_ref_h, len, stream); sigmoid(result.data(), data.data(), len, stream); RAFT_CUDA_TRY(cudaStreamDestroy(stream)); } protected: cudaStream_t stream = 0; SigmoidInputs<T> params; rmm::device_uvector<T> data, result, result_ref; }; const std::vector<SigmoidInputs<float>> inputsf2 = {{0.001f, 4}}; const std::vector<SigmoidInputs<double>> inputsd2 = {{0.001, 4}}; typedef SigmoidTest<float> SigmoidTestValF; TEST_P(SigmoidTestValF, Result) { ASSERT_TRUE(MLCommon::devArrMatch(result_ref.data(), result.data(), params.len, MLCommon::CompareApproxAbs<float>(params.tolerance))); } typedef SigmoidTest<double> SigmoidTestValD; TEST_P(SigmoidTestValD, Result) { ASSERT_TRUE(MLCommon::devArrMatch(result_ref.data(), result.data(), params.len, MLCommon::CompareApproxAbs<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(SigmoidTests, SigmoidTestValD, ::testing::ValuesIn(inputsd2)); } // end namespace Functions } // end namespace MLCommon
539b55f9056cc3b41cbef237e2d8bac9651af216.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <ATen/native/hip/UniqueCub.cuh> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/hip/cub.cuh> namespace at { namespace native { namespace internal { namespace { template <typename scalar_t> __global__ void adjacent_difference_kernel( int64_t n, const scalar_t* input, int* output) { CUDA_KERNEL_LOOP(i, n) { output[i] = i > 0 ? input[i] != input[i - 1] : 0; } } __global__ void scatter_kernel( int64_t n, const int64_t* input, const int64_t* indices, int64_t* output) { CUDA_KERNEL_LOOP(i, n) { output[indices[i]] = input[i]; } } // A variation of compute_unique (defined in Unique.cu) that doesn't allow // customizing equal and not_equal (CUB doesn't allow them). template <typename scalar_t> std::tuple<Tensor, Tensor, Tensor, int64_t> compute_unique( const Tensor& sorted, const Tensor& sorted_indices, const bool return_inverse, const bool return_counts, const bool consecutive) { int64_t num_inp = sorted.numel(); TORCH_CHECK( num_inp <= INT_MAX, "num_inp ", num_inp, " is too big to for CUB"); auto options = sorted.options().dtype(kLong); const scalar_t* data = sorted.data_ptr<scalar_t>(); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); // inverse indices Tensor inverse_indices; if (!return_inverse || num_inp == 0) { inverse_indices = at::empty({0}, options); } else { inverse_indices = at::empty({num_inp}, options); Tensor inv_loc = consecutive ? at::empty({num_inp}, options.dtype(kInt)) : inverse_indices; int* inv_loc_ptr = static_cast<int*>(inv_loc.data_ptr()); const dim3 block = dim3(::min(static_cast<int64_t>(cuda::getApplyBlock().x), num_inp)); dim3 grid; int curDevice = -1; hipGetDevice(&curDevice); cuda::getApplyGrid(num_inp, grid, curDevice); hipLaunchKernelGGL(( adjacent_difference_kernel<scalar_t>) , dim3(grid), dim3(block), 0, stream, num_inp, data, inv_loc_ptr); C10_HIP_KERNEL_LAUNCH_CHECK(); Tensor inv_loc_out = consecutive ? inverse_indices : at::empty({num_inp}, options); CUB_WRAPPER( hipcub::DeviceScan::InclusiveSum, inv_loc_ptr, inv_loc_out.data_ptr<int64_t>(), num_inp, stream); if (!consecutive) { TORCH_INTERNAL_ASSERT( sorted_indices.defined(), "return_inverse is set to true, but sorted_indices is undefined. Send a bug report!"); hipLaunchKernelGGL(( scatter_kernel), dim3(grid), dim3(block), 0, stream, num_inp, inv_loc_out.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(), inverse_indices.data_ptr<int64_t>()); C10_HIP_KERNEL_LAUNCH_CHECK(); } } // unique and count Tensor data_out = at::empty({num_inp}, sorted.options()); Tensor counts = at::empty({0}, options); Tensor length = at::empty({1}, options); int64_t num_out; if (!return_counts) { cuda::cub::unique(data, data_out.data_ptr<scalar_t>(), length.data_ptr<int64_t>(), num_inp); num_out = length.item<int64_t>(); } else { counts.resize_(num_inp); CUB_WRAPPER( hipcub::DeviceRunLengthEncode::Encode, data, data_out.data_ptr<scalar_t>(), counts.data_ptr<int64_t>(), length.data_ptr<int64_t>(), num_inp, stream); num_out = length.item<int64_t>(); counts.resize_(num_out); } return std::tuple<Tensor, Tensor, Tensor, int64_t>( data_out, inverse_indices, counts, num_out); } } // namespace // This function (and compute_unique above) are defined in a separate file from // Unique.cu because for now ATen/cuda/cub.cuh can't be used together with // thrust in the same compilation unit. template <typename scalar_t> std::tuple<Tensor, Tensor, Tensor> unique_cuda_template( const Tensor& self, const bool consecutive, const bool return_inverse, const bool return_counts) { hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); auto options = self.options().dtype(kLong); int64_t num_inp = self.numel(); Tensor sorted; Tensor self_c = self.contiguous(); if (consecutive) { sorted = self_c; } else { sorted = at::empty({num_inp}, self.options()); } scalar_t* sorted_data = sorted.data_ptr<scalar_t>(); Tensor sorted_indices; if (!return_inverse) { if (!consecutive) { cuda::cub::sort_keys(self_c.data_ptr<scalar_t>(), sorted_data, num_inp); } } else { if (!consecutive) { Tensor range = at::arange(0, num_inp, options); sorted_indices = at::empty({num_inp}, options); cuda::cub::sort_pairs( self_c.data_ptr<scalar_t>(), sorted_data, range.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(), num_inp); } } Tensor output, inverse_indices, counts; int64_t num_out; std::tie(output, inverse_indices, counts, num_out) = compute_unique<scalar_t>( sorted, sorted_indices, return_inverse, return_counts, consecutive); output.resize_(num_out); if (return_inverse) { inverse_indices.resize_(self.sizes()); } return std::tuple<Tensor, Tensor, Tensor>(output, inverse_indices, counts); } #define INSTANTIATE_UNIQUE_CUDA_TEMPLATE(TYPE) \ template std::tuple<Tensor, Tensor, Tensor> unique_cuda_template<TYPE>( \ const Tensor& self, \ const bool consecutive, \ const bool return_inverse, \ const bool return_counts) INSTANTIATE_UNIQUE_CUDA_TEMPLATE(uint8_t); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int8_t); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(double); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(float); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int32_t); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int64_t); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int16_t); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(bool); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(at::Half); #undef INSTANTIATE } // namespace internal } // namespace native } // namespace at
539b55f9056cc3b41cbef237e2d8bac9651af216.cu
#include <ATen/native/cuda/UniqueCub.cuh> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/cuda/cub.cuh> namespace at { namespace native { namespace internal { namespace { template <typename scalar_t> __global__ void adjacent_difference_kernel( int64_t n, const scalar_t* input, int* output) { CUDA_KERNEL_LOOP(i, n) { output[i] = i > 0 ? input[i] != input[i - 1] : 0; } } __global__ void scatter_kernel( int64_t n, const int64_t* input, const int64_t* indices, int64_t* output) { CUDA_KERNEL_LOOP(i, n) { output[indices[i]] = input[i]; } } // A variation of compute_unique (defined in Unique.cu) that doesn't allow // customizing equal and not_equal (CUB doesn't allow them). template <typename scalar_t> std::tuple<Tensor, Tensor, Tensor, int64_t> compute_unique( const Tensor& sorted, const Tensor& sorted_indices, const bool return_inverse, const bool return_counts, const bool consecutive) { int64_t num_inp = sorted.numel(); TORCH_CHECK( num_inp <= INT_MAX, "num_inp ", num_inp, " is too big to for CUB"); auto options = sorted.options().dtype(kLong); const scalar_t* data = sorted.data_ptr<scalar_t>(); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); // inverse indices Tensor inverse_indices; if (!return_inverse || num_inp == 0) { inverse_indices = at::empty({0}, options); } else { inverse_indices = at::empty({num_inp}, options); Tensor inv_loc = consecutive ? at::empty({num_inp}, options.dtype(kInt)) : inverse_indices; int* inv_loc_ptr = static_cast<int*>(inv_loc.data_ptr()); const dim3 block = dim3(std::min(static_cast<int64_t>(cuda::getApplyBlock().x), num_inp)); dim3 grid; int curDevice = -1; cudaGetDevice(&curDevice); cuda::getApplyGrid(num_inp, grid, curDevice); adjacent_difference_kernel<scalar_t> <<<grid, block, 0, stream>>>(num_inp, data, inv_loc_ptr); C10_CUDA_KERNEL_LAUNCH_CHECK(); Tensor inv_loc_out = consecutive ? inverse_indices : at::empty({num_inp}, options); CUB_WRAPPER( cub::DeviceScan::InclusiveSum, inv_loc_ptr, inv_loc_out.data_ptr<int64_t>(), num_inp, stream); if (!consecutive) { TORCH_INTERNAL_ASSERT( sorted_indices.defined(), "return_inverse is set to true, but sorted_indices is undefined. Send a bug report!"); scatter_kernel<<<grid, block, 0, stream>>>( num_inp, inv_loc_out.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(), inverse_indices.data_ptr<int64_t>()); C10_CUDA_KERNEL_LAUNCH_CHECK(); } } // unique and count Tensor data_out = at::empty({num_inp}, sorted.options()); Tensor counts = at::empty({0}, options); Tensor length = at::empty({1}, options); int64_t num_out; if (!return_counts) { cuda::cub::unique(data, data_out.data_ptr<scalar_t>(), length.data_ptr<int64_t>(), num_inp); num_out = length.item<int64_t>(); } else { counts.resize_(num_inp); CUB_WRAPPER( cub::DeviceRunLengthEncode::Encode, data, data_out.data_ptr<scalar_t>(), counts.data_ptr<int64_t>(), length.data_ptr<int64_t>(), num_inp, stream); num_out = length.item<int64_t>(); counts.resize_(num_out); } return std::tuple<Tensor, Tensor, Tensor, int64_t>( data_out, inverse_indices, counts, num_out); } } // namespace // This function (and compute_unique above) are defined in a separate file from // Unique.cu because for now ATen/cuda/cub.cuh can't be used together with // thrust in the same compilation unit. template <typename scalar_t> std::tuple<Tensor, Tensor, Tensor> unique_cuda_template( const Tensor& self, const bool consecutive, const bool return_inverse, const bool return_counts) { cudaStream_t stream = at::cuda::getCurrentCUDAStream(); auto options = self.options().dtype(kLong); int64_t num_inp = self.numel(); Tensor sorted; Tensor self_c = self.contiguous(); if (consecutive) { sorted = self_c; } else { sorted = at::empty({num_inp}, self.options()); } scalar_t* sorted_data = sorted.data_ptr<scalar_t>(); Tensor sorted_indices; if (!return_inverse) { if (!consecutive) { cuda::cub::sort_keys(self_c.data_ptr<scalar_t>(), sorted_data, num_inp); } } else { if (!consecutive) { Tensor range = at::arange(0, num_inp, options); sorted_indices = at::empty({num_inp}, options); cuda::cub::sort_pairs( self_c.data_ptr<scalar_t>(), sorted_data, range.data_ptr<int64_t>(), sorted_indices.data_ptr<int64_t>(), num_inp); } } Tensor output, inverse_indices, counts; int64_t num_out; std::tie(output, inverse_indices, counts, num_out) = compute_unique<scalar_t>( sorted, sorted_indices, return_inverse, return_counts, consecutive); output.resize_(num_out); if (return_inverse) { inverse_indices.resize_(self.sizes()); } return std::tuple<Tensor, Tensor, Tensor>(output, inverse_indices, counts); } #define INSTANTIATE_UNIQUE_CUDA_TEMPLATE(TYPE) \ template std::tuple<Tensor, Tensor, Tensor> unique_cuda_template<TYPE>( \ const Tensor& self, \ const bool consecutive, \ const bool return_inverse, \ const bool return_counts) INSTANTIATE_UNIQUE_CUDA_TEMPLATE(uint8_t); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int8_t); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(double); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(float); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int32_t); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int64_t); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(int16_t); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(bool); INSTANTIATE_UNIQUE_CUDA_TEMPLATE(at::Half); #undef INSTANTIATE } // namespace internal } // namespace native } // namespace at
72208e83dd60213be02508c9395249f6079913e3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <math.h> // Kernel function with grid-stride loop to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } // Monolithic kernel function to add the elements of two arrays __global__ void add2(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; // int stride = blockDim.x * gridDim.x; if (index < n) y[index] = x[index] + y[index]; } int main(void) { int N = 1<<20; // 2^20 elements float *x, *y; // Allocate unified memory - accessible from CPU or GPU hipMallocManaged(&x, N*sizeof(float)); hipMallocManaged(&y, N*sizeof(float)); // Initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on GPU int blockSize = 256; int numBlocks = (N + blockSize - 1)/blockSize; hipLaunchKernelGGL(( add), dim3(numBlocks), dim3(blockSize), 0, 0, N, x, y); // Wait for GPU to finish before accessing on host hipDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error is: " << maxError << std::endl; // Free memory hipFree (x); hipFree (y); return 0; }
72208e83dd60213be02508c9395249f6079913e3.cu
#include <iostream> #include <math.h> // Kernel function with grid-stride loop to add the elements of two arrays __global__ void add(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; int stride = blockDim.x * gridDim.x; for (int i = index; i < n; i += stride) y[i] = x[i] + y[i]; } // Monolithic kernel function to add the elements of two arrays __global__ void add2(int n, float *x, float *y) { int index = blockIdx.x * blockDim.x + threadIdx.x; // int stride = blockDim.x * gridDim.x; if (index < n) y[index] = x[index] + y[index]; } int main(void) { int N = 1<<20; // 2^20 elements float *x, *y; // Allocate unified memory - accessible from CPU or GPU cudaMallocManaged(&x, N*sizeof(float)); cudaMallocManaged(&y, N*sizeof(float)); // Initialize x and y arrays on the host for (int i = 0; i < N; i++) { x[i] = 1.0f; y[i] = 2.0f; } // Run kernel on 1M elements on GPU int blockSize = 256; int numBlocks = (N + blockSize - 1)/blockSize; add<<<numBlocks, blockSize>>>(N, x, y); // Wait for GPU to finish before accessing on host cudaDeviceSynchronize(); // Check for errors (all values should be 3.0f) float maxError = 0.0f; for (int i = 0; i < N; i++) maxError = fmax(maxError, fabs(y[i]-3.0f)); std::cout << "Max error is: " << maxError << std::endl; // Free memory cudaFree (x); cudaFree (y); return 0; }
652dd7616a1ca06f23b8d09bd70fe9cd6b83fc4d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sparse_matrix_nnz_impl.cuh" template <typename T> __global__ void SparseMatrixNNZ(const size_t size, const T *input, int32_t *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output[pos] = input[pos + 1] - (pos == 0 ? 0 : input[pos]); } return; } template <typename T> void CalSparseMatrixNNZ(const size_t size, const T *input, int32_t *output, const uint32_t &device_id, hipStream_t cuda_stream) { hipLaunchKernelGGL(( SparseMatrixNNZ), dim3(CUDA_BLOCKS(device_id, size)), dim3(CUDA_THREADS(device_id)), 0, cuda_stream, size, input, output); return; } template CUDA_LIB_EXPORT void CalSparseMatrixNNZ<int32_t>(const size_t size, const int32_t *input, int32_t *output, const uint32_t &device_id, hipStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixNNZ<int64_t>(const size_t size, const int64_t *input, int32_t *output, const uint32_t &device_id, hipStream_t cuda_stream);
652dd7616a1ca06f23b8d09bd70fe9cd6b83fc4d.cu
/** * Copyright 2022 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "sparse_matrix_nnz_impl.cuh" template <typename T> __global__ void SparseMatrixNNZ(const size_t size, const T *input, int32_t *output) { for (size_t pos = blockIdx.x * blockDim.x + threadIdx.x; pos < size; pos += blockDim.x * gridDim.x) { output[pos] = input[pos + 1] - (pos == 0 ? 0 : input[pos]); } return; } template <typename T> void CalSparseMatrixNNZ(const size_t size, const T *input, int32_t *output, const uint32_t &device_id, cudaStream_t cuda_stream) { SparseMatrixNNZ<<<CUDA_BLOCKS(device_id, size), CUDA_THREADS(device_id), 0, cuda_stream>>>(size, input, output); return; } template CUDA_LIB_EXPORT void CalSparseMatrixNNZ<int32_t>(const size_t size, const int32_t *input, int32_t *output, const uint32_t &device_id, cudaStream_t cuda_stream); template CUDA_LIB_EXPORT void CalSparseMatrixNNZ<int64_t>(const size_t size, const int64_t *input, int32_t *output, const uint32_t &device_id, cudaStream_t cuda_stream);
575f71264635bda5f55973300a39d096ddc440eb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 Stanford, NVIDIA, Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::dense(const Tensor& input, int outDim, ActiMode activation, bool use_bias, const Op* shared_op, Initializer* kernel_initializer, Initializer* bias_initializer, const char *name) { if (kernel_initializer == NULL) { int seed = std::rand(); kernel_initializer = new GlorotUniform(seed); } if (bias_initializer == NULL) { bias_initializer = new ZeroInitializer(); } Linear *li = new Linear(*this, input, outDim, activation, use_bias, shared_op, kernel_initializer, bias_initializer, name); layers.push_back(li); return li->outputs[0]; } Linear::Linear(FFModel& model, const Tensor& _input, int out_dim, ActiMode _activation, bool _use_bias, const Op* shared_op, Initializer* _kernel_initializer, Initializer* _bias_initializer, const char* name) : Op(model, OP_LINEAR, shared_op, name, _input), in_channels(_input.adim[0]), out_channels(out_dim), activation(_activation), use_bias(_use_bias), kernel_initializer(_kernel_initializer), bias_initializer(_bias_initializer) { numInputs = 1; numOutputs = 1; outputs[0].numDim = _input.numDim; for (int i = 1; i < outputs[0].numDim; i++) outputs[0].adim[i] = _input.adim[i]; outputs[0].adim[0] = out_dim; weights[0].numDim = 2; weights[0].adim[0] = in_channels; weights[0].adim[1] = out_channels; numWeights = 1; if (use_bias) { weights[1].numDim = 1; weights[1].adim[0] = out_channels; numWeights = 2; } } void Linear::create_weights(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ create_weights_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim assert(false); } } } template<int NDIM> void Linear::create_weights_with_dim(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname)); #ifdef FF_USE_NCCL ParameterSyncType comm_type = ParameterSyncType::NCCL; #else ParameterSyncType comm_type = ParameterSyncType::PS; #endif // Create kernel tensor { const int dims[2] = {out_channels, in_channels}; weights[0] = model.create_linear_weight<2, NDIM>(this, dims, DT_FLOAT, kernel_initializer, true/*create_grad*/, comm_type); } // Create bias tensor if (use_bias) { const int dims[1] = {out_channels}; weights[1] = model.create_linear_weight<1, NDIM>(this, dims, DT_FLOAT, bias_initializer, true/*create_grad*/, comm_type); assert(numWeights == 2); } else { assert(numWeights == 1); } } void Linear::create_output_and_partition(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ create_output_and_partition_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim for ElementWiseBinary operator assert(false); } } } template<int NDIM> void Linear::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_n = part_rect.hi[NDIM-1] - part_rect.lo[NDIM-1] + 1; int in_dim = inputs[0].adim[0]; assert(in_dim == in_channels); int batch_size = inputs[0].adim[NDIM-1]; { int dims[NDIM]; for (int i = 0; i < NDIM; i++) dims[i] = outputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Compute partition bound for input Rect<NDIM> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); // Create replica tensor if (num_par_c > 1) { { Rect<NDIM> extent; for (int i = 1; i < NDIM; i++) { extent.lo[i] = 0; assert(outputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0); extent.hi[i] = outputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1; } extent.lo[0] = 0; extent.hi[0] = in_dim-1; Transform<NDIM, NDIM> transform; for (int i = 0; i < NDIM; i++) for (int j = 0; j < NDIM; j++) transform[i][j] = 0; for (int i = 1; i < NDIM; i++) transform[i][i] = extent.hi[i] + 1; IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); assert(runtime->is_index_partition_complete(ctx, ip)); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); } if (model.config.computationMode == COMP_MODE_TRAINING) { if (NDIM==1) { const int dims[2] = {num_par_c, in_dim}; replica = model.create_linear_replica<2>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else if (NDIM==2) { const int dims[3] = {num_par_c, batch_size, in_dim}; replica = model.create_linear_replica<3>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else if (NDIM==3) { const int dims[4] = {num_par_c, batch_size, inputs[0].adim[1], in_dim}; replica = model.create_linear_replica<4>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else { assert(false && "Unsupported dimension for parallelizing Linear operators" " using the parameter dim."); } // Backward use the same ip as inputs[0] input_grad_lps[0] = inputs[0].part_grad; { IndexSpaceT<NDIM> input_task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(input_rect)); Rect<NDIM+1> extent; for (int i = 0; i < NDIM; i++) { extent.lo[i] = 0; assert(inputs[0].adim[i] % (input_rect.hi[i] - input_rect.lo[i] + 1) == 0); extent.hi[i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1) - 1; } extent.lo[NDIM] = 0; extent.hi[NDIM] = num_par_c - 1; Transform<NDIM+1, NDIM> transform; for (int i = 0; i < NDIM+1; i++) for (int j = 0; j < NDIM; j++) transform[i][j] = 0; for (int i = 0; i < NDIM; i++) transform[i][i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1); IndexPartition ip = runtime->create_partition_by_restriction( ctx, replica.region_grad.get_index_space(), input_task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); // Note we use replica.part to save how to partition the replica // to compute input_grad_lps replica.part = runtime->get_logical_partition( ctx, replica.region_grad, ip); } } // if COMP_MODE_TRAINING } else { // when num_par_c == 1 if (input_rect == part_rect) { input_lps[0] = inputs[0].part; if (model.config.computationMode == COMP_MODE_TRAINING) { input_grad_lps[0] = inputs[0].part_grad; } } else { Rect<NDIM> extent; for (int i = 0; i < NDIM; i++) { extent.lo[i] = 0; assert(inputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0); extent.hi[i] = inputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1; } Transform<NDIM, NDIM> transform; for (int i = 0; i < NDIM; i++) for (int j = 0; j < NDIM; j++) { transform[i][j] = 0; if (i==j) transform[i][j] = extent.hi[i] + 1; } IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); if (model.config.computationMode == COMP_MODE_TRAINING) { input_grad_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region_grad, ip); } } } } /* regions[0](O): output regions[1](I): kernel regions[2](I): bias */ OpMeta* Linear::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (out_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return init_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } return NULL; } bool Linear::use_cudnn_activation(ActiMode mode) { switch (mode) { case AC_MODE_RELU: case AC_MODE_SIGMOID: case AC_MODE_TANH: return true; } return false; } template<int NDIM> OpMeta* Linear::init_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == task->regions.size()); assert(regions.size() == 2 || regions.size() == 3); const Linear* linear = (Linear*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); //TensorAccessorR<float, 2> acc_input( // regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, NDIM> acc_output( regions[0], task->regions[0], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[1], task->regions[1], FID_DATA, ctx, runtime); // TensorAccessorR<float, 1> acc_bias( // regions[3], task->regions[3], FID_DATA, ctx, runtime); //int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int in_dim = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; printf("init linear (input): in_dim(%d) out_dim(%d) batch_size(%d)\n", in_dim, out_dim, batch_size); LinearMeta* m = new LinearMeta(handle, batch_size); m->activation = linear->activation; m->use_bias = linear->use_bias; m->profiling = linear->profiling; m->trainableInputs[0] = linear->trainableInputs[0]; std::strcpy(m->op_name, linear->name); if (use_cudnn_activation(m->activation)) { cudnnActivationMode_t mode; switch (linear->activation) { case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; default: // Unsupported activation mode assert(false); } checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode, CUDNN_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, out_dim, 1, 1)); } return m; } void Linear::init(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return init_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::init_with_dim(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); ParallelConfig pc; std::string pcname = name; ff.config.find_parallel_config(NDIM, pcname, pc); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { FFHandler handle = ff.handlers[pc.device_ids[idx++]]; #ifdef FF_USE_NCCL handle.ncclComm = pc.nccl_comms[idx-1]; #endif argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(LINEAR_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); //launcher.add_region_requirement( // RegionRequirement(input_lps[0], 0/*projection id*/, // READ_ONLY, EXCLUSIVE, inputs[0].region)); //launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(1, FID_DATA); // launcher.add_region_requirement( // RegionRequirement(weights[1].part, 0/*projection id*/, // READ_ONLY, EXCLUSIVE, weights[1].region)); // launcher.add_field(3, FID_DATA); if (ff.config.computationMode == COMP_MODE_TRAINING) { // Add inputs[0].region_grad to avoid Legion warning //launcher.add_region_requirement( // RegionRequirement(input_grad_lps[0], 0/*projection id*/, // WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); //launcher.add_field(2, FID_DATA); } FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /*static*/ void Linear::forward_kernel(const LinearMeta* m, const float* input_ptr, float* output_ptr, const float* kernel_ptr, const float* bias_ptr, int in_dim, int out_dim, int batch_size, hipStream_t stream) { checkCUDA(hipblasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); float alpha = 1.0f, beta = 0.0f; checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_T, HIPBLAS_OP_N, out_dim, batch_size, in_dim, &alpha, kernel_ptr, in_dim, input_ptr, in_dim, &beta, output_ptr, out_dim)); // use_bias = True if (bias_ptr != NULL) { checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_T, HIPBLAS_OP_N, out_dim, batch_size, 1, &alpha, bias_ptr, 1, m->one_ptr, 1, &alpha, output_ptr, out_dim)); } if (use_cudnn_activation(m->activation)) { checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc, &alpha, m->outputTensor, output_ptr, &beta, m->outputTensor, output_ptr)); } else if (m->activation == AC_MODE_GELU) { size_t elements = (size_t)out_dim * (size_t) batch_size; constexpr float B = 0.7978845608028654f; // sqrt(2.0/M_PI) constexpr float C = 0.035677408136300125f; // 0.044715 * sqrt(2.0/M_PI) hipLaunchKernelGGL(( gelu_forward_kernel), dim3(GET_BLOCKS(elements)), dim3(CUDA_NUM_THREADS), 0, 0, elements, B, C, output_ptr); } else if (m->activation == AC_MODE_NONE) { // Do nothing } else { assert(false && "Unsupported activation for Linear"); } } __host__ void Linear::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return forward_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I); input regions[1](O): output regions[2](I): kernel regions[3](I): bias */ template<int NDIM> void Linear::forward_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); assert(regions.size() == (3 + int(m->use_bias))); assert(task->regions.size() == (3 + int(m->use_bias))); TensorAccessorR<float, NDIM> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, NDIM> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_input.rect.volume() == in_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); const float* acc_bias_ptr = NULL; if (m->use_bias) { TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); assert(acc_bias.rect.volume() == out_dim); acc_bias_ptr = acc_bias.ptr; } hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } Linear::forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_kernel.ptr, acc_bias_ptr, in_dim, out_dim, batch_size, stream); if (m->profiling) { hipEventRecord(t_end, stream); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("%s [Linear] forward time = %.2lfms\n", m->op_name, elapsed); //print_tensor<NDIM, float>(acc_input.ptr, acc_input.rect, "[Linear:forward:input]"); //print_tensor<2, float>(acc_kernel.ptr, acc_kernel.rect, "[Linear:forward:kernel]"); //print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Linear:forward:bias]"); //print_tensor<NDIM, float>(acc_output.ptr, acc_output.rect, "[Linear:forward:output]"); } } void Linear::forward(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return forward_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::forward_with_dim(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(LINEAR_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); if (use_bias) { launcher.add_region_requirement( RegionRequirement(weights[1].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); } runtime->execute_index_space(ctx, launcher); } __global__ void sigmoid_backward(float *grad_ptr, const float *output, int n) { CUDA_KERNEL_LOOP(i, n) { grad_ptr[i] = grad_ptr[i] * output[i] * (1 - output[i]); } } /*static*/ void Linear::backward_kernel(const LinearMeta* m, const float* input_ptr, float* input_grad_ptr, const float* output_ptr, float* output_grad_ptr, const float* kernel_ptr, float* kernel_grad_ptr, float* bias_grad_ptr, int in_dim, int out_dim, int batch_size, hipStream_t stream) { checkCUDA(hipblasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); float alpha = 1.0f; int output_size = out_dim * batch_size; if (m->activation == AC_MODE_RELU) { hipLaunchKernelGGL(( reluBackward), dim3(GET_BLOCKS(output_size)), dim3(CUDA_NUM_THREADS), 0, stream, output_grad_ptr, output_ptr, output_size); } else if (m->activation == AC_MODE_SIGMOID) { hipLaunchKernelGGL(( sigmoid_backward), dim3(GET_BLOCKS(output_size)), dim3(CUDA_NUM_THREADS), 0, stream, output_grad_ptr, output_ptr, output_size); } else { // TODO: only support relu and sigmoid for now assert(m->activation == AC_MODE_NONE); } // Compute weight gradiant // NOTE: we use alpha=1 for kernel_grad to accumulate gradients checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_N, HIPBLAS_OP_T, in_dim, out_dim, batch_size, &alpha, input_ptr, in_dim, output_grad_ptr, out_dim, &alpha, kernel_grad_ptr, in_dim)); // Compute bias gradiant // NOTE: we use alpha=1 for bias_grad to accumulate gradients // use_bias = True if (bias_grad_ptr != NULL) { checkCUDA(hipblasSgemv(m->handle.blas, HIPBLAS_OP_N, out_dim, batch_size, &alpha, output_grad_ptr, out_dim, m->one_ptr, 1, &alpha, bias_grad_ptr, 1)); } // Compute data gradiant // NOTE: we use alpha=1 for input_grad to accumulate gradients if (input_grad_ptr != NULL) { checkCUDA(hipblasSgemm(m->handle.blas, HIPBLAS_OP_N, HIPBLAS_OP_N, in_dim, batch_size, out_dim, &alpha, kernel_ptr, in_dim, output_grad_ptr, out_dim, &alpha, input_grad_ptr, in_dim)); } } void Linear::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return backward_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I): input regions[1](I/O): replica_grad or input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): filter regions[5](I/O): filter_grad regions[6](I/O): bias_grad */ template<int NDIM> __host__ void Linear::backward_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); assert(regions.size() == (5 + int(m->trainableInputs[0]) + int(m->use_bias))); assert(task->regions.size() == (5 + int(m->trainableInputs[0]) + int(m->use_bias))); float* input_grad = NULL; size_t rid = 0; TensorAccessorR<float, NDIM> acc_input( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid++; if (m->trainableInputs[0]) { Domain domain = runtime->get_index_space_domain( ctx, task->regions[rid].region.get_index_space()); if (domain.get_dim() == NDIM+1) { assert(domain.get_volume() == acc_input.rect.volume()); input_grad = helperGetTensorPointerWO<float>( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); } else { TensorAccessorW<float, NDIM> acc_replica_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); assert(acc_replica_grad.rect.volume() == acc_input.rect.volume()); input_grad = acc_replica_grad.ptr; } rid++; } TensorAccessorR<float, NDIM> acc_output( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid++; TensorAccessorW<float, NDIM> acc_output_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); rid++; TensorAccessorR<float, 2> acc_kernel( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid++; TensorAccessorW<float, 2> acc_kernel_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); rid++; // make sure the sizes match int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_output_grad.rect.volume() == out_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); assert(acc_kernel_grad.rect.volume() == in_dim * out_dim); float* acc_bias_grad_ptr = NULL; if (m->use_bias) { TensorAccessorW<float, 1> acc_bias_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); rid++; assert(acc_bias_grad.rect.volume() == out_dim); acc_bias_grad_ptr = static_cast<float*>(acc_bias_grad.ptr); } assert(rid == regions.size()); hipStream_t stream; checkCUDA(get_legion_stream(&stream)); hipEvent_t t_start, t_end; if (m->profiling) { hipEventCreate(&t_start); hipEventCreate(&t_end); hipEventRecord(t_start, stream); } Linear::backward_kernel(m, acc_input.ptr, input_grad, acc_output.ptr, acc_output_grad.ptr, acc_kernel.ptr, acc_kernel_grad.ptr, acc_bias_grad_ptr, in_dim, out_dim, batch_size, stream); if (m->profiling) { hipEventRecord(t_end, stream); checkCUDA(hipEventSynchronize(t_end)); float elapsed = 0; checkCUDA(hipEventElapsedTime(&elapsed, t_start, t_end)); hipEventDestroy(t_start); hipEventDestroy(t_end); printf("Linear backward time = %.2lfms\n", elapsed); //print_tensor<NDIM, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Linear:backward:output_grad]"); //print_tensor<2, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Linear:backward:kernel_grad]"); //print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Linear:backward:bias_grad]"); //print_tensor<2, float>(input_grad, acc_input.rect, "[Linear:backward:input_grad]"); } } void Linear::backward2_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return backward2_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I/O): input_grad regions[1](I): replicas */ template<int NDIM> __host__ void Linear::backward2_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //const LinearMeta* m = *((LinearMeta**) task->local_args); TensorAccessorW<float, NDIM> acc_input_grad( regions[0], task->regions[0], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 3> acc_replica( regions[1], task->regions[1], FID_DATA, ctx, runtime); assert(acc_input_grad.rect.hi[0] == acc_replica.rect.hi[0]); assert(acc_input_grad.rect.lo[0] == acc_replica.rect.lo[0]); assert(acc_input_grad.rect.hi[1] == acc_replica.rect.hi[1]); assert(acc_input_grad.rect.lo[1] == acc_replica.rect.lo[1]); hipStream_t stream; checkCUDA(get_legion_stream(&stream)); int num_replica = acc_replica.rect.hi[NDIM] - acc_replica.rect.lo[NDIM] + 1; const float *replica_ptr = acc_replica.ptr; for (int i = 0; i < num_replica; i++) { size_t num_elements = acc_input_grad.rect.volume(); hipLaunchKernelGGL(( apply_add_with_scale), dim3(GET_BLOCKS(num_elements)), dim3(CUDA_NUM_THREADS), 0, stream, acc_input_grad.ptr, replica_ptr, num_elements, 1.0f); replica_ptr += acc_input_grad.rect.volume(); } } void Linear::backward(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return backward_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::backward_with_dim(const FFModel& ff) { Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; { ArgumentMap argmap; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(LINEAR_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); int rid = 0; // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(rid++, FID_DATA); // regions[1](I/O): replica_grad if (trainableInputs[0]) { if (replica.region_grad != LogicalRegion::NO_REGION) { launcher.add_region_requirement( RegionRequirement(replica.part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(rid++, FID_DATA); } else { launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(rid++, FID_DATA); } } // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(rid++, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(rid++, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(rid++, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(rid++, FID_DATA); if (use_bias) { // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(rid++, FID_DATA); } runtime->execute_index_space(ctx, launcher); } if (replica.region_grad != LogicalRegion::NO_REGION && trainableInputs[0]) { // We aggregate parameters from replica tensor to input tensor // Note we use input's task_is to reduce extra data transfers ArgumentMap argmap; Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part_grad.get_index_partition()); IndexSpaceT<2> input_task_is = IndexSpaceT<2>(ff.get_task_is(input_rect)); // If we are the first layer, our input uses data parallel and does // not have an owner std::string input_pcname = ""; if (inputs[0].owner_op != NULL) input_pcname = std::string(inputs[0].owner_op->name); IndexLauncher launcher(LINEAR_BWD2_TASK_ID, input_task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(input_pcname)); launcher.add_region_requirement( RegionRequirement(inputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(0, FID_DATA); // Note that replica.part save's a partition of replica.region_grad launcher.add_region_requirement( RegionRequirement(replica.part, 0/*partition id*/, READ_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } } /* __host__ Parameter* Linear::get_parameter(int index) { if (index == 0) { return &weights[0]; } else if (index == 1){ return &weights[1]; } else { assert(0); return NULL; } } */ __host__ void Linear::print_layer(const FFModel& ff) { printf("linear layer\n"); Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region); kernel_req.add_field(FID_DATA); InlineLauncher kernel_launcher(kernel_req); PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher); kernel_region.wait_until_valid(); RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region); bias_req.add_field(FID_DATA); InlineLauncher bias_launcher(bias_req); PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher); bias_region.wait_until_valid(); TensorAccessorW<float, 2> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true); TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true); const float *kernel_ptr = acc_kernel.ptr; const float *bias_ptr = acc_bias.ptr; size_t kernel_size = acc_kernel.rect.volume(); int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1; size_t bias_size = acc_bias.rect.volume(); printf("kernel, %p, %zu, [%d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2); printf("bias, %p, %zu\n", bias_ptr, bias_size); for (int i = 0; i < bias_size; i++) { printf("%f ", bias_ptr[i]); } printf("\n"); for (int i = 0; i < kernel_size; i++) { printf("%f ", kernel_ptr[i]); } printf("\n"); runtime->unmap_region(ctx, kernel_region); runtime->unmap_region(ctx, bias_region); } LinearMeta::LinearMeta(FFHandler handler, int batch_size) : OpMeta(handler) { // Allocate an all-one's vector float* dram_one_ptr = (float *) malloc(sizeof(float) * batch_size); for (int i = 0; i < batch_size; i++) dram_one_ptr[i] = 1.0f; float* fb_one_ptr; checkCUDA(hipMalloc(&fb_one_ptr, sizeof(float) * batch_size)); checkCUDA(hipMemcpy(fb_one_ptr, dram_one_ptr, sizeof(float) * batch_size, hipMemcpyHostToDevice)); one_ptr = (const float*) fb_one_ptr; // Allocate descriptors checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); } bool Linear::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { Tensor sub_output, sub_input; if (!outputs[0].get_output_sub_tensor(pc, sub_output, OP_LINEAR)) return false; if (!inputs[0].get_input_sub_tensor(pc, sub_input, OP_LINEAR)) return false; int input_c = sub_input.adim[0]; int input_n = sub_input.get_volume() / input_c; int output_c = sub_output.adim[0]; int output_n = sub_output.get_volume() / output_c; LinearMeta* m = sim->linear_meta; m->activation = activation; if (use_cudnn_activation(m->activation)) { cudnnActivationMode_t mode; switch (activation) { case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; default: // Unsupported activation mode assert(false); } checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode, CUDNN_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, 1, 1)); } // allocate tensors in simulator sim->free_all(); float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert(input_ptr != NULL); float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert(output_ptr != NULL); float* kernel_ptr = (float*)sim->allocate((size_t)output_c * input_c, DT_FLOAT); assert(kernel_ptr != NULL); float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_ptr != NULL); hipStream_t stream; checkCUDA(get_legion_stream(&stream)); std::function<void()> forward, backward; forward = [&] { forward_kernel(m, input_ptr, output_ptr, kernel_ptr, bias_ptr, input_c, output_c, input_n, stream); }; if (sim->computationMode == COMP_MODE_TRAINING) { float* input_grad_ptr = NULL; if (trainableInputs[0]) { input_grad_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); } float *output_grad_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); float* kernel_grad_ptr = (float*)sim->allocate((size_t)output_c * input_c, DT_FLOAT); float* bias_grad_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_grad_ptr != NULL); backward = [&] { backward_kernel(m, input_ptr, input_grad_ptr, output_ptr, output_grad_ptr, kernel_ptr, kernel_grad_ptr, bias_grad_ptr, input_c, output_c, input_n, stream); }; } inner_measure_operator_cost(sim, forward, backward, cost_metrics); if (sim->computationMode == COMP_MODE_TRAINING) { printf("[Measure Linear] name(%s) in(%d %d) out(%d %d) forward_time(%.4lf) backward_time(%.4lf)\n", name, input_n, input_c, output_n, output_c, cost_metrics.forward_time, cost_metrics.backward_time); } else { printf("[Measure Linear] name(%s) in(%d %d) out(%d %d) forward_time(%.4lf)\n", name, input_n, input_c, output_n, output_c, cost_metrics.forward_time); } return true; } ParallelConfig Linear::get_random_parallel_config(const FFModel& ff) const { if (!ff.config.enable_parameter_parallel) return Op::get_random_parallel_config(ff); std::vector<int> batch_candidates; std::vector<int> channel_candidates; int batch = outputs[0].adim[outputs[0].numDim-1]; int channel = outputs[0].adim[0]; int total_devices = ff.config.workersPerNode * ff.config.numNodes; for (int i = 1; i <= ff.config.workersPerNode; i++) if (channel % i == 0) for (int j = 1; i * j <= total_devices; j++) if (batch % j == 0) { batch_candidates.push_back(j); channel_candidates.push_back(i); } assert(batch_candidates.size() > 0); int idx = std::rand() % batch_candidates.size(); int num_par_c = channel_candidates[idx]; int num_par_b = batch_candidates[idx]; ParallelConfig pc; pc.device_type = ParallelConfig::GPU; pc.nDims = outputs[0].numDim; pc.dim[0] = num_par_c; pc.dim[pc.nDims-1] = num_par_b; for (int i = 1; i < pc.nDims - 1; i++) pc.dim[i] = 1; int start_idx = std::rand() % (total_devices - num_par_c * num_par_b + 1); start_idx = start_idx - start_idx % num_par_c; for (int i = 0; i < num_par_c * num_par_b; i++) pc.device_ids[i] = start_idx + i; return pc; } bool Linear::is_valid_parallel_config(const FFModel& ff, const ParallelConfig& pc) const { if (!ff.config.enable_parameter_parallel) return Op::is_valid_parallel_config(ff, pc); // Support data and parameter parallel if (pc.nDims != outputs[0].numDim) return false; for (int i = 1; i < pc.nDims-1; i++) if (pc.dim[i] != 1) return false; return true; }
575f71264635bda5f55973300a39d096ddc440eb.cu
/* Copyright 2020 Stanford, NVIDIA, Facebook * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "model.h" #include "cuda_helper.h" Tensor FFModel::dense(const Tensor& input, int outDim, ActiMode activation, bool use_bias, const Op* shared_op, Initializer* kernel_initializer, Initializer* bias_initializer, const char *name) { if (kernel_initializer == NULL) { int seed = std::rand(); kernel_initializer = new GlorotUniform(seed); } if (bias_initializer == NULL) { bias_initializer = new ZeroInitializer(); } Linear *li = new Linear(*this, input, outDim, activation, use_bias, shared_op, kernel_initializer, bias_initializer, name); layers.push_back(li); return li->outputs[0]; } Linear::Linear(FFModel& model, const Tensor& _input, int out_dim, ActiMode _activation, bool _use_bias, const Op* shared_op, Initializer* _kernel_initializer, Initializer* _bias_initializer, const char* name) : Op(model, OP_LINEAR, shared_op, name, _input), in_channels(_input.adim[0]), out_channels(out_dim), activation(_activation), use_bias(_use_bias), kernel_initializer(_kernel_initializer), bias_initializer(_bias_initializer) { numInputs = 1; numOutputs = 1; outputs[0].numDim = _input.numDim; for (int i = 1; i < outputs[0].numDim; i++) outputs[0].adim[i] = _input.adim[i]; outputs[0].adim[0] = out_dim; weights[0].numDim = 2; weights[0].adim[0] = in_channels; weights[0].adim[1] = out_channels; numWeights = 1; if (use_bias) { weights[1].numDim = 1; weights[1].adim[0] = out_channels; numWeights = 2; } } void Linear::create_weights(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ create_weights_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim assert(false); } } } template<int NDIM> void Linear::create_weights_with_dim(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname)); #ifdef FF_USE_NCCL ParameterSyncType comm_type = ParameterSyncType::NCCL; #else ParameterSyncType comm_type = ParameterSyncType::PS; #endif // Create kernel tensor { const int dims[2] = {out_channels, in_channels}; weights[0] = model.create_linear_weight<2, NDIM>(this, dims, DT_FLOAT, kernel_initializer, true/*create_grad*/, comm_type); } // Create bias tensor if (use_bias) { const int dims[1] = {out_channels}; weights[1] = model.create_linear_weight<1, NDIM>(this, dims, DT_FLOAT, bias_initializer, true/*create_grad*/, comm_type); assert(numWeights == 2); } else { assert(numWeights == 1); } } void Linear::create_output_and_partition(FFModel& model) { int dim = inputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ { \ create_output_and_partition_with_dim<DIM>(model); \ break; \ } LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: { // Unsupported dim for ElementWiseBinary operator assert(false); } } } template<int NDIM> void Linear::create_output_and_partition_with_dim(FFModel& model) { // Retrive the task indexspace for the op std::string pcname = name; task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(NDIM, pcname)); Context ctx = model.config.lg_ctx; Runtime* runtime = model.config.lg_hlr; Rect<NDIM> part_rect = runtime->get_index_space_domain(ctx, task_is); int num_par_c = part_rect.hi[0] - part_rect.lo[0] + 1; int num_par_n = part_rect.hi[NDIM-1] - part_rect.lo[NDIM-1] + 1; int in_dim = inputs[0].adim[0]; assert(in_dim == in_channels); int batch_size = inputs[0].adim[NDIM-1]; { int dims[NDIM]; for (int i = 0; i < NDIM; i++) dims[i] = outputs[0].adim[NDIM-1-i]; outputs[0] = model.create_tensor<NDIM>(dims, DT_FLOAT, this); outputs[0].owner_op = this; outputs[0].owner_idx = 0; } // Compute partition bound for input Rect<NDIM> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part.get_index_partition()); // Create replica tensor if (num_par_c > 1) { { Rect<NDIM> extent; for (int i = 1; i < NDIM; i++) { extent.lo[i] = 0; assert(outputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0); extent.hi[i] = outputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1; } extent.lo[0] = 0; extent.hi[0] = in_dim-1; Transform<NDIM, NDIM> transform; for (int i = 0; i < NDIM; i++) for (int j = 0; j < NDIM; j++) transform[i][j] = 0; for (int i = 1; i < NDIM; i++) transform[i][i] = extent.hi[i] + 1; IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); assert(runtime->is_index_partition_complete(ctx, ip)); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); } if (model.config.computationMode == COMP_MODE_TRAINING) { if (NDIM==1) { const int dims[2] = {num_par_c, in_dim}; replica = model.create_linear_replica<2>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else if (NDIM==2) { const int dims[3] = {num_par_c, batch_size, in_dim}; replica = model.create_linear_replica<3>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else if (NDIM==3) { const int dims[4] = {num_par_c, batch_size, inputs[0].adim[1], in_dim}; replica = model.create_linear_replica<4>(dims, (IndexSpaceT<NDIM>)task_is, DT_FLOAT); } else { assert(false && "Unsupported dimension for parallelizing Linear operators" " using the parameter dim."); } // Backward use the same ip as inputs[0] input_grad_lps[0] = inputs[0].part_grad; { IndexSpaceT<NDIM> input_task_is = IndexSpaceT<NDIM>(model.get_or_create_task_is(input_rect)); Rect<NDIM+1> extent; for (int i = 0; i < NDIM; i++) { extent.lo[i] = 0; assert(inputs[0].adim[i] % (input_rect.hi[i] - input_rect.lo[i] + 1) == 0); extent.hi[i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1) - 1; } extent.lo[NDIM] = 0; extent.hi[NDIM] = num_par_c - 1; Transform<NDIM+1, NDIM> transform; for (int i = 0; i < NDIM+1; i++) for (int j = 0; j < NDIM; j++) transform[i][j] = 0; for (int i = 0; i < NDIM; i++) transform[i][i] = inputs[0].adim[i] / (input_rect.hi[i] - input_rect.lo[i] + 1); IndexPartition ip = runtime->create_partition_by_restriction( ctx, replica.region_grad.get_index_space(), input_task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); // Note we use replica.part to save how to partition the replica // to compute input_grad_lps replica.part = runtime->get_logical_partition( ctx, replica.region_grad, ip); } } // if COMP_MODE_TRAINING } else { // when num_par_c == 1 if (input_rect == part_rect) { input_lps[0] = inputs[0].part; if (model.config.computationMode == COMP_MODE_TRAINING) { input_grad_lps[0] = inputs[0].part_grad; } } else { Rect<NDIM> extent; for (int i = 0; i < NDIM; i++) { extent.lo[i] = 0; assert(inputs[0].adim[i] % (part_rect.hi[i] - part_rect.lo[i] + 1) == 0); extent.hi[i] = inputs[0].adim[i] / (part_rect.hi[i] - part_rect.lo[i] + 1) - 1; } Transform<NDIM, NDIM> transform; for (int i = 0; i < NDIM; i++) for (int j = 0; j < NDIM; j++) { transform[i][j] = 0; if (i==j) transform[i][j] = extent.hi[i] + 1; } IndexPartition ip = runtime->create_partition_by_restriction( ctx, inputs[0].region.get_index_space(), task_is, transform, extent); assert(runtime->is_index_partition_disjoint(ctx, ip)); assert(runtime->is_index_partition_complete(ctx, ip)); input_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region, ip); if (model.config.computationMode == COMP_MODE_TRAINING) { input_grad_lps[0] = runtime->get_logical_partition( ctx, inputs[0].region_grad, ip); } } } } /* regions[0](O): output regions[1](I): kernel regions[2](I): bias */ OpMeta* Linear::init_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime* runtime) { Domain out_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (out_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return init_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } return NULL; } bool Linear::use_cudnn_activation(ActiMode mode) { switch (mode) { case AC_MODE_RELU: case AC_MODE_SIGMOID: case AC_MODE_TANH: return true; } return false; } template<int NDIM> OpMeta* Linear::init_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { assert(regions.size() == task->regions.size()); assert(regions.size() == 2 || regions.size() == 3); const Linear* linear = (Linear*) task->args; FFHandler handle = *((const FFHandler*) task->local_args); //TensorAccessorR<float, 2> acc_input( // regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, NDIM> acc_output( regions[0], task->regions[0], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[1], task->regions[1], FID_DATA, ctx, runtime); // TensorAccessorR<float, 1> acc_bias( // regions[3], task->regions[3], FID_DATA, ctx, runtime); //int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int in_dim = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; printf("init linear (input): in_dim(%d) out_dim(%d) batch_size(%d)\n", in_dim, out_dim, batch_size); LinearMeta* m = new LinearMeta(handle, batch_size); m->activation = linear->activation; m->use_bias = linear->use_bias; m->profiling = linear->profiling; m->trainableInputs[0] = linear->trainableInputs[0]; std::strcpy(m->op_name, linear->name); if (use_cudnn_activation(m->activation)) { cudnnActivationMode_t mode; switch (linear->activation) { case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; default: // Unsupported activation mode assert(false); } checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode, CUDNN_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, batch_size, out_dim, 1, 1)); } return m; } void Linear::init(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return init_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::init_with_dim(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); ParallelConfig pc; std::string pcname = name; ff.config.find_parallel_config(NDIM, pcname, pc); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { FFHandler handle = ff.handlers[pc.device_ids[idx++]]; #ifdef FF_USE_NCCL handle.ncclComm = pc.nccl_comms[idx-1]; #endif argmap.set_point(*it, TaskArgument(&handle, sizeof(FFHandler))); } IndexLauncher launcher(LINEAR_INIT_TASK_ID, task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); //launcher.add_region_requirement( // RegionRequirement(input_lps[0], 0/*projection id*/, // READ_ONLY, EXCLUSIVE, inputs[0].region)); //launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(1, FID_DATA); // launcher.add_region_requirement( // RegionRequirement(weights[1].part, 0/*projection id*/, // READ_ONLY, EXCLUSIVE, weights[1].region)); // launcher.add_field(3, FID_DATA); if (ff.config.computationMode == COMP_MODE_TRAINING) { // Add inputs[0].region_grad to avoid Legion warning //launcher.add_region_requirement( // RegionRequirement(input_grad_lps[0], 0/*projection id*/, // WRITE_ONLY, EXCLUSIVE, inputs[0].region_grad)); //launcher.add_field(2, FID_DATA); } FutureMap fm = runtime->execute_index_space(ctx, launcher); fm.wait_all_results(); idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { meta[idx++] = fm.get_result<OpMeta*>(*it); } } /*static*/ void Linear::forward_kernel(const LinearMeta* m, const float* input_ptr, float* output_ptr, const float* kernel_ptr, const float* bias_ptr, int in_dim, int out_dim, int batch_size, cudaStream_t stream) { checkCUDA(cublasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); float alpha = 1.0f, beta = 0.0f; checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N, out_dim, batch_size, in_dim, &alpha, kernel_ptr, in_dim, input_ptr, in_dim, &beta, output_ptr, out_dim)); // use_bias = True if (bias_ptr != NULL) { checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_T, CUBLAS_OP_N, out_dim, batch_size, 1, &alpha, bias_ptr, 1, m->one_ptr, 1, &alpha, output_ptr, out_dim)); } if (use_cudnn_activation(m->activation)) { checkCUDNN(cudnnActivationForward(m->handle.dnn, m->actiDesc, &alpha, m->outputTensor, output_ptr, &beta, m->outputTensor, output_ptr)); } else if (m->activation == AC_MODE_GELU) { size_t elements = (size_t)out_dim * (size_t) batch_size; constexpr float B = 0.7978845608028654f; // sqrt(2.0/M_PI) constexpr float C = 0.035677408136300125f; // 0.044715 * sqrt(2.0/M_PI) gelu_forward_kernel<<<GET_BLOCKS(elements), CUDA_NUM_THREADS>>>( elements, B, C, output_ptr); } else if (m->activation == AC_MODE_NONE) { // Do nothing } else { assert(false && "Unsupported activation for Linear"); } } __host__ void Linear::forward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return forward_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I); input regions[1](O): output regions[2](I): kernel regions[3](I): bias */ template<int NDIM> void Linear::forward_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); assert(regions.size() == (3 + int(m->use_bias))); assert(task->regions.size() == (3 + int(m->use_bias))); TensorAccessorR<float, NDIM> acc_input( regions[0], task->regions[0], FID_DATA, ctx, runtime); TensorAccessorW<float, NDIM> acc_output( regions[1], task->regions[1], FID_DATA, ctx, runtime, false/*readOutput*/); TensorAccessorR<float, 2> acc_kernel( regions[2], task->regions[2], FID_DATA, ctx, runtime); int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_input.rect.volume() == in_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); const float* acc_bias_ptr = NULL; if (m->use_bias) { TensorAccessorR<float, 1> acc_bias( regions[3], task->regions[3], FID_DATA, ctx, runtime); assert(acc_bias.rect.volume() == out_dim); acc_bias_ptr = acc_bias.ptr; } cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } Linear::forward_kernel(m, acc_input.ptr, acc_output.ptr, acc_kernel.ptr, acc_bias_ptr, in_dim, out_dim, batch_size, stream); if (m->profiling) { cudaEventRecord(t_end, stream); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("%s [Linear] forward time = %.2lfms\n", m->op_name, elapsed); //print_tensor<NDIM, float>(acc_input.ptr, acc_input.rect, "[Linear:forward:input]"); //print_tensor<2, float>(acc_kernel.ptr, acc_kernel.rect, "[Linear:forward:kernel]"); //print_tensor<1, float>(acc_bias.ptr, acc_bias.rect, "[Linear:forward:bias]"); //print_tensor<NDIM, float>(acc_output.ptr, acc_output.rect, "[Linear:forward:output]"); } } void Linear::forward(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return forward_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::forward_with_dim(const FFModel& ff) { ArgumentMap argmap; Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(LINEAR_FWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(0, FID_DATA); launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(1, FID_DATA); launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(2, FID_DATA); if (use_bias) { launcher.add_region_requirement( RegionRequirement(weights[1].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[1].region)); launcher.add_field(3, FID_DATA); } runtime->execute_index_space(ctx, launcher); } __global__ void sigmoid_backward(float *grad_ptr, const float *output, int n) { CUDA_KERNEL_LOOP(i, n) { grad_ptr[i] = grad_ptr[i] * output[i] * (1 - output[i]); } } /*static*/ void Linear::backward_kernel(const LinearMeta* m, const float* input_ptr, float* input_grad_ptr, const float* output_ptr, float* output_grad_ptr, const float* kernel_ptr, float* kernel_grad_ptr, float* bias_grad_ptr, int in_dim, int out_dim, int batch_size, cudaStream_t stream) { checkCUDA(cublasSetStream(m->handle.blas, stream)); checkCUDNN(cudnnSetStream(m->handle.dnn, stream)); float alpha = 1.0f; int output_size = out_dim * batch_size; if (m->activation == AC_MODE_RELU) { reluBackward<<<GET_BLOCKS(output_size), CUDA_NUM_THREADS, 0, stream>>>( output_grad_ptr, output_ptr, output_size); } else if (m->activation == AC_MODE_SIGMOID) { sigmoid_backward<<<GET_BLOCKS(output_size), CUDA_NUM_THREADS, 0, stream>>>( output_grad_ptr, output_ptr, output_size); } else { // TODO: only support relu and sigmoid for now assert(m->activation == AC_MODE_NONE); } // Compute weight gradiant // NOTE: we use alpha=1 for kernel_grad to accumulate gradients checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_T, in_dim, out_dim, batch_size, &alpha, input_ptr, in_dim, output_grad_ptr, out_dim, &alpha, kernel_grad_ptr, in_dim)); // Compute bias gradiant // NOTE: we use alpha=1 for bias_grad to accumulate gradients // use_bias = True if (bias_grad_ptr != NULL) { checkCUDA(cublasSgemv(m->handle.blas, CUBLAS_OP_N, out_dim, batch_size, &alpha, output_grad_ptr, out_dim, m->one_ptr, 1, &alpha, bias_grad_ptr, 1)); } // Compute data gradiant // NOTE: we use alpha=1 for input_grad to accumulate gradients if (input_grad_ptr != NULL) { checkCUDA(cublasSgemm(m->handle.blas, CUBLAS_OP_N, CUBLAS_OP_N, in_dim, batch_size, out_dim, &alpha, kernel_ptr, in_dim, output_grad_ptr, out_dim, &alpha, input_grad_ptr, in_dim)); } } void Linear::backward_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return backward_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I): input regions[1](I/O): replica_grad or input_grad regions[2](I): output regions[3](I/O): output_grad regions[4](I): filter regions[5](I/O): filter_grad regions[6](I/O): bias_grad */ template<int NDIM> __host__ void Linear::backward_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //Linear* linear = (Linear*) task->args; const LinearMeta* m = *((LinearMeta**) task->local_args); assert(regions.size() == (5 + int(m->trainableInputs[0]) + int(m->use_bias))); assert(task->regions.size() == (5 + int(m->trainableInputs[0]) + int(m->use_bias))); float* input_grad = NULL; size_t rid = 0; TensorAccessorR<float, NDIM> acc_input( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid++; if (m->trainableInputs[0]) { Domain domain = runtime->get_index_space_domain( ctx, task->regions[rid].region.get_index_space()); if (domain.get_dim() == NDIM+1) { assert(domain.get_volume() == acc_input.rect.volume()); input_grad = helperGetTensorPointerWO<float>( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); } else { TensorAccessorW<float, NDIM> acc_replica_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); assert(acc_replica_grad.rect.volume() == acc_input.rect.volume()); input_grad = acc_replica_grad.ptr; } rid++; } TensorAccessorR<float, NDIM> acc_output( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid++; TensorAccessorW<float, NDIM> acc_output_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); rid++; TensorAccessorR<float, 2> acc_kernel( regions[rid], task->regions[rid], FID_DATA, ctx, runtime); rid++; TensorAccessorW<float, 2> acc_kernel_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); rid++; // make sure the sizes match int in_dim = acc_input.rect.hi[0] - acc_input.rect.lo[0] + 1; int out_dim = acc_output.rect.hi[0] - acc_output.rect.lo[0] + 1; int batch_size = acc_output.rect.volume() / out_dim; assert(acc_output.rect.volume() == out_dim * batch_size); assert(acc_output_grad.rect.volume() == out_dim * batch_size); assert(acc_kernel.rect.volume() == in_dim * out_dim); assert(acc_kernel_grad.rect.volume() == in_dim * out_dim); float* acc_bias_grad_ptr = NULL; if (m->use_bias) { TensorAccessorW<float, 1> acc_bias_grad( regions[rid], task->regions[rid], FID_DATA, ctx, runtime, true/*readOutput*/); rid++; assert(acc_bias_grad.rect.volume() == out_dim); acc_bias_grad_ptr = static_cast<float*>(acc_bias_grad.ptr); } assert(rid == regions.size()); cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); cudaEvent_t t_start, t_end; if (m->profiling) { cudaEventCreate(&t_start); cudaEventCreate(&t_end); cudaEventRecord(t_start, stream); } Linear::backward_kernel(m, acc_input.ptr, input_grad, acc_output.ptr, acc_output_grad.ptr, acc_kernel.ptr, acc_kernel_grad.ptr, acc_bias_grad_ptr, in_dim, out_dim, batch_size, stream); if (m->profiling) { cudaEventRecord(t_end, stream); checkCUDA(cudaEventSynchronize(t_end)); float elapsed = 0; checkCUDA(cudaEventElapsedTime(&elapsed, t_start, t_end)); cudaEventDestroy(t_start); cudaEventDestroy(t_end); printf("Linear backward time = %.2lfms\n", elapsed); //print_tensor<NDIM, float>(acc_output_grad.ptr, acc_output_grad.rect, "[Linear:backward:output_grad]"); //print_tensor<2, float>(acc_kernel_grad.ptr, acc_kernel_grad.rect, "[Linear:backward:kernel_grad]"); //print_tensor<1, float>(acc_bias_grad.ptr, acc_bias_grad.rect, "[Linear:backward:bias_grad]"); //print_tensor<2, float>(input_grad, acc_input.rect, "[Linear:backward:input_grad]"); } } void Linear::backward2_task(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { Domain in_domain = runtime->get_index_space_domain( ctx, task->regions[0].region.get_index_space()); switch (in_domain.get_dim()) { #define DIMFUNC(DIM) \ case DIM: \ return backward2_task_with_dim<DIM>(task, regions, ctx, runtime); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } /* regions[0](I/O): input_grad regions[1](I): replicas */ template<int NDIM> __host__ void Linear::backward2_task_with_dim(const Task *task, const std::vector<PhysicalRegion> &regions, Context ctx, Runtime *runtime) { //const LinearMeta* m = *((LinearMeta**) task->local_args); TensorAccessorW<float, NDIM> acc_input_grad( regions[0], task->regions[0], FID_DATA, ctx, runtime, true/*readOutput*/); TensorAccessorR<float, 3> acc_replica( regions[1], task->regions[1], FID_DATA, ctx, runtime); assert(acc_input_grad.rect.hi[0] == acc_replica.rect.hi[0]); assert(acc_input_grad.rect.lo[0] == acc_replica.rect.lo[0]); assert(acc_input_grad.rect.hi[1] == acc_replica.rect.hi[1]); assert(acc_input_grad.rect.lo[1] == acc_replica.rect.lo[1]); cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); int num_replica = acc_replica.rect.hi[NDIM] - acc_replica.rect.lo[NDIM] + 1; const float *replica_ptr = acc_replica.ptr; for (int i = 0; i < num_replica; i++) { size_t num_elements = acc_input_grad.rect.volume(); apply_add_with_scale<<<GET_BLOCKS(num_elements), CUDA_NUM_THREADS, 0, stream>>>( acc_input_grad.ptr, replica_ptr, num_elements, 1.0f); replica_ptr += acc_input_grad.rect.volume(); } } void Linear::backward(const FFModel& ff) { int dim = outputs[0].numDim; switch (dim) { #define DIMFUNC(DIM) \ case DIM: \ return backward_with_dim<DIM>(ff); LEGION_FOREACH_N(DIMFUNC) #undef DIMFUNC default: assert(false); } } template<int NDIM> void Linear::backward_with_dim(const FFModel& ff) { Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; { ArgumentMap argmap; Rect<NDIM> rect = runtime->get_index_space_domain(ctx, task_is); int idx = 0; for (PointInRectIterator<NDIM> it(rect); it(); it++) { OpMeta* mp = meta[idx++]; argmap.set_point(*it, TaskArgument(&mp, sizeof(OpMeta*))); } IndexLauncher launcher(LINEAR_BWD_TASK_ID, task_is, TaskArgument(NULL, 0), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(std::string(name))); int rid = 0; // regions[0](I): input launcher.add_region_requirement( RegionRequirement(input_lps[0], 0/*projection id*/, READ_ONLY, EXCLUSIVE, inputs[0].region)); launcher.add_field(rid++, FID_DATA); // regions[1](I/O): replica_grad if (trainableInputs[0]) { if (replica.region_grad != LogicalRegion::NO_REGION) { launcher.add_region_requirement( RegionRequirement(replica.part_grad, 0/*projection id*/, WRITE_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(rid++, FID_DATA); } else { launcher.add_region_requirement( RegionRequirement(input_grad_lps[0], 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(rid++, FID_DATA); } } // regions[2](I): output launcher.add_region_requirement( RegionRequirement(outputs[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, outputs[0].region)); launcher.add_field(rid++, FID_DATA); // regions[3](I/O): output_grad launcher.add_region_requirement( RegionRequirement(outputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, outputs[0].region_grad)); launcher.add_field(rid++, FID_DATA); // regions[4](I): filter launcher.add_region_requirement( RegionRequirement(weights[0].part, 0/*projection id*/, READ_ONLY, EXCLUSIVE, weights[0].region)); launcher.add_field(rid++, FID_DATA); // regions[5](I/O): filter_grad launcher.add_region_requirement( RegionRequirement(weights[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[0].region_grad)); launcher.add_field(rid++, FID_DATA); if (use_bias) { // regions[6](I/O): bias_grad launcher.add_region_requirement( RegionRequirement(weights[1].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, weights[1].region_grad)); launcher.add_field(rid++, FID_DATA); } runtime->execute_index_space(ctx, launcher); } if (replica.region_grad != LogicalRegion::NO_REGION && trainableInputs[0]) { // We aggregate parameters from replica tensor to input tensor // Note we use input's task_is to reduce extra data transfers ArgumentMap argmap; Rect<2> input_rect = runtime->get_index_partition_color_space( ctx, inputs[0].part_grad.get_index_partition()); IndexSpaceT<2> input_task_is = IndexSpaceT<2>(ff.get_task_is(input_rect)); // If we are the first layer, our input uses data parallel and does // not have an owner std::string input_pcname = ""; if (inputs[0].owner_op != NULL) input_pcname = std::string(inputs[0].owner_op->name); IndexLauncher launcher(LINEAR_BWD2_TASK_ID, input_task_is, TaskArgument(this, sizeof(Linear)), argmap, Predicate::TRUE_PRED, false/*must*/, 0/*mapper_id*/, FFConfig::get_hash_id(input_pcname)); launcher.add_region_requirement( RegionRequirement(inputs[0].part_grad, 0/*projection id*/, READ_WRITE, EXCLUSIVE, inputs[0].region_grad)); launcher.add_field(0, FID_DATA); // Note that replica.part save's a partition of replica.region_grad launcher.add_region_requirement( RegionRequirement(replica.part, 0/*partition id*/, READ_ONLY, EXCLUSIVE, replica.region_grad)); launcher.add_field(1, FID_DATA); runtime->execute_index_space(ctx, launcher); } } /* __host__ Parameter* Linear::get_parameter(int index) { if (index == 0) { return &weights[0]; } else if (index == 1){ return &weights[1]; } else { assert(0); return NULL; } } */ __host__ void Linear::print_layer(const FFModel& ff) { printf("linear layer\n"); Context ctx = ff.config.lg_ctx; Runtime* runtime = ff.config.lg_hlr; RegionRequirement kernel_req(weights[0].region, READ_WRITE, EXCLUSIVE, weights[0].region); kernel_req.add_field(FID_DATA); InlineLauncher kernel_launcher(kernel_req); PhysicalRegion kernel_region = runtime->map_region(ctx, kernel_launcher); kernel_region.wait_until_valid(); RegionRequirement bias_req(weights[1].region, READ_WRITE, EXCLUSIVE, weights[1].region); bias_req.add_field(FID_DATA); InlineLauncher bias_launcher(bias_req); PhysicalRegion bias_region = runtime->map_region(ctx, bias_launcher); bias_region.wait_until_valid(); TensorAccessorW<float, 2> acc_kernel(kernel_region, kernel_req, FID_DATA, ctx, runtime, true); TensorAccessorW<float, 1> acc_bias(bias_region, bias_req, FID_DATA, ctx, runtime, true); const float *kernel_ptr = acc_kernel.ptr; const float *bias_ptr = acc_bias.ptr; size_t kernel_size = acc_kernel.rect.volume(); int kernel_dim1 = acc_kernel.rect.hi[0] - acc_kernel.rect.lo[0] + 1; int kernel_dim2 = acc_kernel.rect.hi[1] - acc_kernel.rect.lo[1] + 1; size_t bias_size = acc_bias.rect.volume(); printf("kernel, %p, %zu, [%d, %d]\n", kernel_ptr, kernel_size, kernel_dim1, kernel_dim2); printf("bias, %p, %zu\n", bias_ptr, bias_size); for (int i = 0; i < bias_size; i++) { printf("%f ", bias_ptr[i]); } printf("\n"); for (int i = 0; i < kernel_size; i++) { printf("%f ", kernel_ptr[i]); } printf("\n"); runtime->unmap_region(ctx, kernel_region); runtime->unmap_region(ctx, bias_region); } LinearMeta::LinearMeta(FFHandler handler, int batch_size) : OpMeta(handler) { // Allocate an all-one's vector float* dram_one_ptr = (float *) malloc(sizeof(float) * batch_size); for (int i = 0; i < batch_size; i++) dram_one_ptr[i] = 1.0f; float* fb_one_ptr; checkCUDA(cudaMalloc(&fb_one_ptr, sizeof(float) * batch_size)); checkCUDA(cudaMemcpy(fb_one_ptr, dram_one_ptr, sizeof(float) * batch_size, cudaMemcpyHostToDevice)); one_ptr = (const float*) fb_one_ptr; // Allocate descriptors checkCUDNN(cudnnCreateActivationDescriptor(&actiDesc)); checkCUDNN(cudnnCreateTensorDescriptor(&outputTensor)); } bool Linear::measure_operator_cost(Simulator* sim, const ParallelConfig& pc, CostMetrics& cost_metrics) { Tensor sub_output, sub_input; if (!outputs[0].get_output_sub_tensor(pc, sub_output, OP_LINEAR)) return false; if (!inputs[0].get_input_sub_tensor(pc, sub_input, OP_LINEAR)) return false; int input_c = sub_input.adim[0]; int input_n = sub_input.get_volume() / input_c; int output_c = sub_output.adim[0]; int output_n = sub_output.get_volume() / output_c; LinearMeta* m = sim->linear_meta; m->activation = activation; if (use_cudnn_activation(m->activation)) { cudnnActivationMode_t mode; switch (activation) { case AC_MODE_RELU: mode = CUDNN_ACTIVATION_RELU; break; case AC_MODE_SIGMOID: mode = CUDNN_ACTIVATION_SIGMOID; break; default: // Unsupported activation mode assert(false); } checkCUDNN(cudnnSetActivationDescriptor(m->actiDesc, mode, CUDNN_PROPAGATE_NAN, 0.0)); checkCUDNN(cudnnSetTensor4dDescriptor(m->outputTensor, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, output_n, output_c, 1, 1)); } // allocate tensors in simulator sim->free_all(); float* input_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); assert(input_ptr != NULL); float *output_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); assert(output_ptr != NULL); float* kernel_ptr = (float*)sim->allocate((size_t)output_c * input_c, DT_FLOAT); assert(kernel_ptr != NULL); float* bias_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_ptr != NULL); cudaStream_t stream; checkCUDA(get_legion_stream(&stream)); std::function<void()> forward, backward; forward = [&] { forward_kernel(m, input_ptr, output_ptr, kernel_ptr, bias_ptr, input_c, output_c, input_n, stream); }; if (sim->computationMode == COMP_MODE_TRAINING) { float* input_grad_ptr = NULL; if (trainableInputs[0]) { input_grad_ptr = (float*)sim->allocate(sub_input.get_volume(), DT_FLOAT); } float *output_grad_ptr = (float*)sim->allocate(sub_output.get_volume(), DT_FLOAT); float* kernel_grad_ptr = (float*)sim->allocate((size_t)output_c * input_c, DT_FLOAT); float* bias_grad_ptr = (float*)sim->allocate(output_c, DT_FLOAT); assert(bias_grad_ptr != NULL); backward = [&] { backward_kernel(m, input_ptr, input_grad_ptr, output_ptr, output_grad_ptr, kernel_ptr, kernel_grad_ptr, bias_grad_ptr, input_c, output_c, input_n, stream); }; } inner_measure_operator_cost(sim, forward, backward, cost_metrics); if (sim->computationMode == COMP_MODE_TRAINING) { printf("[Measure Linear] name(%s) in(%d %d) out(%d %d) forward_time(%.4lf) backward_time(%.4lf)\n", name, input_n, input_c, output_n, output_c, cost_metrics.forward_time, cost_metrics.backward_time); } else { printf("[Measure Linear] name(%s) in(%d %d) out(%d %d) forward_time(%.4lf)\n", name, input_n, input_c, output_n, output_c, cost_metrics.forward_time); } return true; } ParallelConfig Linear::get_random_parallel_config(const FFModel& ff) const { if (!ff.config.enable_parameter_parallel) return Op::get_random_parallel_config(ff); std::vector<int> batch_candidates; std::vector<int> channel_candidates; int batch = outputs[0].adim[outputs[0].numDim-1]; int channel = outputs[0].adim[0]; int total_devices = ff.config.workersPerNode * ff.config.numNodes; for (int i = 1; i <= ff.config.workersPerNode; i++) if (channel % i == 0) for (int j = 1; i * j <= total_devices; j++) if (batch % j == 0) { batch_candidates.push_back(j); channel_candidates.push_back(i); } assert(batch_candidates.size() > 0); int idx = std::rand() % batch_candidates.size(); int num_par_c = channel_candidates[idx]; int num_par_b = batch_candidates[idx]; ParallelConfig pc; pc.device_type = ParallelConfig::GPU; pc.nDims = outputs[0].numDim; pc.dim[0] = num_par_c; pc.dim[pc.nDims-1] = num_par_b; for (int i = 1; i < pc.nDims - 1; i++) pc.dim[i] = 1; int start_idx = std::rand() % (total_devices - num_par_c * num_par_b + 1); start_idx = start_idx - start_idx % num_par_c; for (int i = 0; i < num_par_c * num_par_b; i++) pc.device_ids[i] = start_idx + i; return pc; } bool Linear::is_valid_parallel_config(const FFModel& ff, const ParallelConfig& pc) const { if (!ff.config.enable_parameter_parallel) return Op::is_valid_parallel_config(ff, pc); // Support data and parameter parallel if (pc.nDims != outputs[0].numDim) return false; for (int i = 1; i < pc.nDims-1; i++) if (pc.dim[i] != 1) return false; return true; }
8440d68500427fde742ab3290289e85248b4a757.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // This is the REAL "hello world" for CUDA! // It takes the string "Hello ", prints it, then passes it to CUDA with an array // of offsets. Then the offsets are added in parallel to produce the string "World!" // By Ingemar Ragnemalm 2010 #include <stdio.h> const int N = 7; const int blocksize = 7; __global__ void hello(char *a, int *b) { a[threadIdx.x] += b[threadIdx.x]; } int main() { char a[N] = "Hello "; int b[N] = {15, 10, 6, 0, -11, 1, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); hipMalloc( (void**)&ad, csize ); hipMalloc( (void**)&bd, isize ); hipMemcpy( ad, a, csize, hipMemcpyHostToDevice ); hipMemcpy( bd, b, isize, hipMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hipLaunchKernelGGL(( hello), dim3(dimGrid), dim3(dimBlock), 0, 0, ad, bd); hipMemcpy( a, ad, csize, hipMemcpyDeviceToHost ); hipFree( ad ); printf("%s\n", a); return EXIT_SUCCESS; }
8440d68500427fde742ab3290289e85248b4a757.cu
// This is the REAL "hello world" for CUDA! // It takes the string "Hello ", prints it, then passes it to CUDA with an array // of offsets. Then the offsets are added in parallel to produce the string "World!" // By Ingemar Ragnemalm 2010 #include <stdio.h> const int N = 7; const int blocksize = 7; __global__ void hello(char *a, int *b) { a[threadIdx.x] += b[threadIdx.x]; } int main() { char a[N] = "Hello "; int b[N] = {15, 10, 6, 0, -11, 1, 0}; char *ad; int *bd; const int csize = N*sizeof(char); const int isize = N*sizeof(int); printf("%s", a); cudaMalloc( (void**)&ad, csize ); cudaMalloc( (void**)&bd, isize ); cudaMemcpy( ad, a, csize, cudaMemcpyHostToDevice ); cudaMemcpy( bd, b, isize, cudaMemcpyHostToDevice ); dim3 dimBlock( blocksize, 1 ); dim3 dimGrid( 1, 1 ); hello<<<dimGrid, dimBlock>>>(ad, bd); cudaMemcpy( a, ad, csize, cudaMemcpyDeviceToHost ); cudaFree( ad ); printf("%s\n", a); return EXIT_SUCCESS; }
d7a526897faac3bebf113f9dae9c93e21a9e1919.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void add(int *result, int *num1, int *num2){ *result = *num1 + *num2; }
d7a526897faac3bebf113f9dae9c93e21a9e1919.cu
#include "includes.h" __global__ void add(int *result, int *num1, int *num2){ *result = *num1 + *num2; }
8d44b1a0d6aac585a5cf63c53498546ff0584c60.hip
// !!! This is a file automatically generated by hipify!!! /* Include standard C/C++ */ #include <iostream> /* support pour le format PAM */ #include "pamalign.h" // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_cuda.h> #define RADIUS 1 /* Dfinition de l'oprateur += pour le type ushort4 en ignorant la dernire composante */ inline __device__ void operator+=(ushort4 &a, const ushort4 b) { a.x += b.x; a.y += b.y; a.z += b.z; } /* Dfinition de l'oprateur / entre les type ushort4 et ushort en ignorant la dernire composante */ inline __device__ ushort4 operator/(const ushort4 &a, const ushort b) { return make_ushort4(a.x / b, a.y / b, a.z / b, 0); } // Kernel de traitement d'image __global__ void filtre_device(ushort4 *dst, const ushort4 *src, const uint width, const uint height) { // A COMPLETER } int main(int argc, char **argv) { const char imgname[] = "image.pam"; const char imgsave[] = "saved.pam"; const char refname[] = "reference.pam"; int pam_status; imgInfo img, ref; /* Chargement de l'image source */ if (pam_status = load_pam(imgname, &img)) { return pam_status; } printf("Image %s charge (%u-canaux), %ux%ux%u\n", imgname, img.channels, img.width, img.height, img.depth); ushort4 *d_Src, *d_Dst; size_t memSize = img.data_size; // Allocation de la mmoire sur la GPU checkCudaErrors(hipMalloc(&d_Src, memSize)); checkCudaErrors(hipMalloc(&d_Dst, memSize)); // Copie de l'image dans la mmoire GPU checkCudaErrors(hipMemcpy(d_Src, img.data, memSize, hipMemcpyHostToDevice)); dim3 blockSize; blockSize.x = 32; blockSize.y = 32; dim3 gridSize; gridSize.x = ceil((float)img.width/blockSize.x); gridSize.y = ceil((float)img.height/blockSize.y); // Lancement du kernel hipLaunchKernelGGL(( filtre_device), dim3(gridSize), dim3(blockSize), 0, 0, d_Dst, d_Src, img.width, img.height); checkCudaErrors(hipPeekAtLastError()); checkCudaErrors(hipDeviceSynchronize()); // Copie de l'image traite checkCudaErrors(hipMemcpy(img.data, d_Dst, memSize, hipMemcpyDeviceToHost)); // Verification du rsultat if (RADIUS == 1) { // Chargement de l'image de rfrence if (pam_status = load_pam(refname, &ref)) { return pam_status; } // Comparaison if (memcmp(img.data, ref.data, img.data_size)) { printf("Erreur de traitement\n"); save_pam(imgsave, &img); } else printf("Traitement correct\n"); } // Timing float runtime; hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); const int nb = 5; for (int i=0;i <nb; i++) hipLaunchKernelGGL(( filtre_device), dim3(gridSize), dim3(blockSize), 0, 0, d_Dst, d_Src, img.width, img.height); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&runtime, start, stop); runtime /= (float) nb; printf("Temps de traitement: %g (ms)\n", runtime); hipEventDestroy(stop); hipEventDestroy(start); hipFree(d_Dst); hipFree(d_Src); /* Sauvegarde de l'image traite */ if (pam_status = save_pam(imgsave, &img)) { return pam_status; } printf("Image %s sauve (%u-canaux), %ux%ux%u\n", imgsave, img.channels, img.width, img.height, img.depth); }
8d44b1a0d6aac585a5cf63c53498546ff0584c60.cu
/* Include standard C/C++ */ #include <iostream> /* support pour le format PAM */ #include "pamalign.h" // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include <helper_cuda.h> #define RADIUS 1 /* Définition de l'opérateur += pour le type ushort4 en ignorant la dernière composante */ inline __device__ void operator+=(ushort4 &a, const ushort4 b) { a.x += b.x; a.y += b.y; a.z += b.z; } /* Définition de l'opérateur / entre les type ushort4 et ushort en ignorant la dernière composante */ inline __device__ ushort4 operator/(const ushort4 &a, const ushort b) { return make_ushort4(a.x / b, a.y / b, a.z / b, 0); } // Kernel de traitement d'image __global__ void filtre_device(ushort4 *dst, const ushort4 *src, const uint width, const uint height) { // A COMPLETER } int main(int argc, char **argv) { const char imgname[] = "image.pam"; const char imgsave[] = "saved.pam"; const char refname[] = "reference.pam"; int pam_status; imgInfo img, ref; /* Chargement de l'image source */ if (pam_status = load_pam(imgname, &img)) { return pam_status; } printf("Image %s chargée (%u-canaux), %ux%ux%u\n", imgname, img.channels, img.width, img.height, img.depth); ushort4 *d_Src, *d_Dst; size_t memSize = img.data_size; // Allocation de la mémoire sur la GPU checkCudaErrors(cudaMalloc(&d_Src, memSize)); checkCudaErrors(cudaMalloc(&d_Dst, memSize)); // Copie de l'image dans la mémoire GPU checkCudaErrors(cudaMemcpy(d_Src, img.data, memSize, cudaMemcpyHostToDevice)); dim3 blockSize; blockSize.x = 32; blockSize.y = 32; dim3 gridSize; gridSize.x = ceil((float)img.width/blockSize.x); gridSize.y = ceil((float)img.height/blockSize.y); // Lancement du kernel filtre_device<<<gridSize, blockSize>>>(d_Dst, d_Src, img.width, img.height); checkCudaErrors(cudaPeekAtLastError()); checkCudaErrors(cudaDeviceSynchronize()); // Copie de l'image traitée checkCudaErrors(cudaMemcpy(img.data, d_Dst, memSize, cudaMemcpyDeviceToHost)); // Verification du résultat if (RADIUS == 1) { // Chargement de l'image de référence if (pam_status = load_pam(refname, &ref)) { return pam_status; } // Comparaison if (memcmp(img.data, ref.data, img.data_size)) { printf("Erreur de traitement\n"); save_pam(imgsave, &img); } else printf("Traitement correct\n"); } // Timing float runtime; cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); const int nb = 5; for (int i=0;i <nb; i++) filtre_device<<<gridSize, blockSize>>>(d_Dst, d_Src, img.width, img.height); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&runtime, start, stop); runtime /= (float) nb; printf("Temps de traitement: %g (ms)\n", runtime); cudaEventDestroy(stop); cudaEventDestroy(start); cudaFree(d_Dst); cudaFree(d_Src); /* Sauvegarde de l'image traitée */ if (pam_status = save_pam(imgsave, &img)) { return pam_status; } printf("Image %s sauvée (%u-canaux), %ux%ux%u\n", imgsave, img.channels, img.width, img.height, img.depth); }
d97a7316659e07fb6723e874f3eca17b2e44ba15.hip
// !!! This is a file automatically generated by hipify!!! // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef PADDLE_WITH_HIP // HIP not support cusolver #include "paddle/phi/backends/dynload/cusolver.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/kernels/impl/lu_kernel_impl.h" #include "paddle/phi/kernels/lu_kernel.h" namespace phi { template <typename T> void cusolver_bufferSize(const hipsolverDnHandle_t& cusolverH, int m, int n, T* d_A, int lda, int* lwork); template <typename T> void cusolver_getrf(const hipsolverDnHandle_t& cusolverH, int m, int n, T* d_A, int lda, T* d_work, int* d_Ipiv, int* d_info); template <> void cusolver_bufferSize<float>(const hipsolverDnHandle_t& cusolverH, int m, int n, float* d_A, int lda, int* lwork) { PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnSgetrf_bufferSize(cusolverH, m, n, d_A, lda, lwork)); } template <> void cusolver_bufferSize<double>(const hipsolverDnHandle_t& cusolverH, int m, int n, double* d_A, int lda, int* lwork) { PADDLE_ENFORCE_GPU_SUCCESS( dynload::hipsolverDnDgetrf_bufferSize(cusolverH, m, n, d_A, lda, lwork)); } template <> void cusolver_getrf<float>(const hipsolverDnHandle_t& cusolverH, int m, int n, float* d_A, int lda, float* d_work, int* d_Ipiv, int* d_info) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnSgetrf( cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info)); } template <> void cusolver_getrf<double>(const hipsolverDnHandle_t& cusolverH, int m, int n, double* d_A, int lda, double* d_work, int* d_Ipiv, int* d_info) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::hipsolverDnDgetrf( cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info)); } template <typename T, typename Context> void lu_decomposed_kernel(const Context& dev_ctx, int m, int n, T* d_A, int lda, int* d_Ipiv, int* d_info) { /* step 1: get cusolver handle*/ auto cusolverH = dev_ctx.cusolver_dn_handle(); /* step 2: query working space of getrf */ int lwork; cusolver_bufferSize(cusolverH, m, n, d_A, lda, &lwork); auto work_buff = phi::memory_utils::Alloc( dev_ctx.GetPlace(), lwork * sizeof(T), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); T* d_work = reinterpret_cast<T*>(work_buff->ptr()); /* step 3: LU factorization */ if (d_Ipiv) { cusolver_getrf(cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info); } else { cusolver_getrf(cusolverH, m, n, d_A, lda, d_work, NULL, d_info); } PADDLE_ENFORCE_GPU_SUCCESS(hipDeviceSynchronize()); } template <typename T, typename Context> void LUKernel(const Context& dev_ctx, const DenseTensor& x, bool pivot, DenseTensor* out, DenseTensor* pivots, DenseTensor* infos) { const int64_t kMaxBlockDim = 512; *out = Transpose2DTo6D<Context, T>(dev_ctx, x); auto outdims = out->dims(); auto outrank = outdims.size(); int m = static_cast<int>(outdims[outrank - 1]); int n = static_cast<int>(outdims[outrank - 2]); int lda = ::max(1, m); if (pivot) { auto ipiv_dims = phi::slice_ddim(outdims, 0, outrank - 1); ipiv_dims[outrank - 2] = ::min(m, n); pivots->Resize(ipiv_dims); } dev_ctx.template Alloc<int>(pivots); auto ipiv_data = pivots->data<int>(); auto info_dims = phi::slice_ddim(outdims, 0, outrank - 2); if (info_dims.size() == 0) { info_dims = phi::make_ddim({1}); } infos->Resize(info_dims); dev_ctx.template Alloc<int>(infos); auto info_data = infos->data<int>(); auto batchsize = product(info_dims); batchsize = ::max(static_cast<int>(batchsize), 1); dev_ctx.template Alloc<T>(out); auto out_data = out->data<T>(); for (int b = 0; b < batchsize; b++) { auto out_data_item = &out_data[b * m * n]; int* info_data_item = &info_data[b]; if (pivot) { auto ipiv_data_item = &ipiv_data[b * ::min(m, n)]; lu_decomposed_kernel( dev_ctx, m, n, out_data_item, lda, ipiv_data_item, info_data_item); } else { lu_decomposed_kernel( dev_ctx, m, n, out_data_item, lda, NULL, info_data_item); } } *out = Transpose2DTo6D<Context, T>(dev_ctx, *out); } } // namespace phi PD_REGISTER_KERNEL(lu, // cuda_only GPU, ALL_LAYOUT, phi::LUKernel, float, double) { kernel->OutputAt(1).SetDataType(phi::DataType::INT32); kernel->OutputAt(2).SetDataType(phi::DataType::INT32); } #endif // not PADDLE_WITH_HIP
d97a7316659e07fb6723e874f3eca17b2e44ba15.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #ifndef PADDLE_WITH_HIP // HIP not support cusolver #include "paddle/phi/backends/dynload/cusolver.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" #include "paddle/phi/common/memory_utils.h" #include "paddle/phi/kernels/impl/lu_kernel_impl.h" #include "paddle/phi/kernels/lu_kernel.h" namespace phi { template <typename T> void cusolver_bufferSize(const cusolverDnHandle_t& cusolverH, int m, int n, T* d_A, int lda, int* lwork); template <typename T> void cusolver_getrf(const cusolverDnHandle_t& cusolverH, int m, int n, T* d_A, int lda, T* d_work, int* d_Ipiv, int* d_info); template <> void cusolver_bufferSize<float>(const cusolverDnHandle_t& cusolverH, int m, int n, float* d_A, int lda, int* lwork) { PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnSgetrf_bufferSize(cusolverH, m, n, d_A, lda, lwork)); } template <> void cusolver_bufferSize<double>(const cusolverDnHandle_t& cusolverH, int m, int n, double* d_A, int lda, int* lwork) { PADDLE_ENFORCE_GPU_SUCCESS( dynload::cusolverDnDgetrf_bufferSize(cusolverH, m, n, d_A, lda, lwork)); } template <> void cusolver_getrf<float>(const cusolverDnHandle_t& cusolverH, int m, int n, float* d_A, int lda, float* d_work, int* d_Ipiv, int* d_info) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnSgetrf( cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info)); } template <> void cusolver_getrf<double>(const cusolverDnHandle_t& cusolverH, int m, int n, double* d_A, int lda, double* d_work, int* d_Ipiv, int* d_info) { PADDLE_ENFORCE_GPU_SUCCESS(dynload::cusolverDnDgetrf( cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info)); } template <typename T, typename Context> void lu_decomposed_kernel(const Context& dev_ctx, int m, int n, T* d_A, int lda, int* d_Ipiv, int* d_info) { /* step 1: get cusolver handle*/ auto cusolverH = dev_ctx.cusolver_dn_handle(); /* step 2: query working space of getrf */ int lwork; cusolver_bufferSize(cusolverH, m, n, d_A, lda, &lwork); auto work_buff = phi::memory_utils::Alloc( dev_ctx.GetPlace(), lwork * sizeof(T), phi::Stream(reinterpret_cast<phi::StreamId>(dev_ctx.stream()))); T* d_work = reinterpret_cast<T*>(work_buff->ptr()); /* step 3: LU factorization */ if (d_Ipiv) { cusolver_getrf(cusolverH, m, n, d_A, lda, d_work, d_Ipiv, d_info); } else { cusolver_getrf(cusolverH, m, n, d_A, lda, d_work, NULL, d_info); } PADDLE_ENFORCE_GPU_SUCCESS(cudaDeviceSynchronize()); } template <typename T, typename Context> void LUKernel(const Context& dev_ctx, const DenseTensor& x, bool pivot, DenseTensor* out, DenseTensor* pivots, DenseTensor* infos) { const int64_t kMaxBlockDim = 512; *out = Transpose2DTo6D<Context, T>(dev_ctx, x); auto outdims = out->dims(); auto outrank = outdims.size(); int m = static_cast<int>(outdims[outrank - 1]); int n = static_cast<int>(outdims[outrank - 2]); int lda = std::max(1, m); if (pivot) { auto ipiv_dims = phi::slice_ddim(outdims, 0, outrank - 1); ipiv_dims[outrank - 2] = std::min(m, n); pivots->Resize(ipiv_dims); } dev_ctx.template Alloc<int>(pivots); auto ipiv_data = pivots->data<int>(); auto info_dims = phi::slice_ddim(outdims, 0, outrank - 2); if (info_dims.size() == 0) { info_dims = phi::make_ddim({1}); } infos->Resize(info_dims); dev_ctx.template Alloc<int>(infos); auto info_data = infos->data<int>(); auto batchsize = product(info_dims); batchsize = std::max(static_cast<int>(batchsize), 1); dev_ctx.template Alloc<T>(out); auto out_data = out->data<T>(); for (int b = 0; b < batchsize; b++) { auto out_data_item = &out_data[b * m * n]; int* info_data_item = &info_data[b]; if (pivot) { auto ipiv_data_item = &ipiv_data[b * std::min(m, n)]; lu_decomposed_kernel( dev_ctx, m, n, out_data_item, lda, ipiv_data_item, info_data_item); } else { lu_decomposed_kernel( dev_ctx, m, n, out_data_item, lda, NULL, info_data_item); } } *out = Transpose2DTo6D<Context, T>(dev_ctx, *out); } } // namespace phi PD_REGISTER_KERNEL(lu, // cuda_only GPU, ALL_LAYOUT, phi::LUKernel, float, double) { kernel->OutputAt(1).SetDataType(phi::DataType::INT32); kernel->OutputAt(2).SetDataType(phi::DataType::INT32); } #endif // not PADDLE_WITH_HIP
301cb955106962529df0a6ce320f20ce3497af1c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cstdint> __global__ void computePatchGraph( const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const float inCons[][DATAYSIZE][DATAXSIZE][NSZ][NSY][NSX], float affGraph[], const unsigned pairsIDs[], const uint64_t numPairs, int offset) { uint64_t id1 = blockIdx.x*blockDim.x + threadIdx.x; if(id1 >= numPairs) return; id1 += offset; int idz = pairsIDs[id1*6]; int idy = pairsIDs[id1*6+1]; int idx = pairsIDs[id1*6+2]; int idz2 = pairsIDs[id1*6+3]; int idy2 = pairsIDs[id1*6+4]; int idx2 = pairsIDs[id1*6+5]; uint32_t rnd = uint32_t(idz)*uint32_t(idz2)* uint32_t(idy)*uint32_t(idy2)* uint32_t(idx)*uint32_t(idx2); unsigned int mid = int((PSX*PSY*PSZ)/2); unsigned const PSXH = int(PSX/2); unsigned const PSYH = int(PSY/2); unsigned const PSZH = int(PSZ/2); float acc = 0.0f; unsigned int fgCnt = 0; // iterate over all pixel in patch for(int pz1 = 0; pz1 < PSZ; pz1++) { for(int py1 = 0; py1 < PSY; py1++) { for(int px1 = 0; px1 < PSX; px1++) { const int z1 = idz+pz1-PSZH; const int y1 = idy+py1-PSYH; const int x1 = idx+px1-PSXH; if(inPred[mid][z1][y1][x1] <= TH) { continue; } int po1 = px1 + PSX * py1 + PSX * PSY * pz1; // if pred affinity in patch smaller than threshold, continue if(inPred[po1][idz][idy][idx] <= TH) { continue; } for(int pz2 = 0; pz2 < PSZ; pz2++) { for(int py2 = 0; py2 < PSY; py2++) { for(int px2 = 0; px2 < PSX; px2++) { const int z2 = idz2+pz2-PSZH; const int y2 = idy2+py2-PSYH; const int x2 = idx2+px2-PSXH; if(inPred[mid][z2][y2][x2] <= TH) { continue; } int po2 = px2 + PSX * py2 + PSX * PSY * pz2; if(inPred[po2][idz2][idy2][idx2] <= TH) { continue; } int gz1 = x1 + DATAXSIZE * y1 + DATAXSIZE * DATAYSIZE * z1; // bug? was idx2+py2 int gz2 = x2 + DATAXSIZE * y2 + DATAXSIZE * DATAYSIZE * (z2); // intersection if (abs(int(x1-idx2)) <= PSXH && abs(int(y1-idy2)) <= PSYH && abs(int(z1-idz2)) <= PSZH && abs(int(x2-idx)) <= PSXH && abs(int(y2-idy)) <= PSYH && abs(int(z2-idz)) <= PSZH) { rnd = rnd*1103515245U; float rndT = rnd/4294967296.0f; if (rndT > 0.2) continue; } if(gz1 <= gz2) { int zo = idz2+pz2-idz-pz1; int yo = idy2+py2-idy-py1; int xo = idx2+px2-idx-px1; zo += PSZ-1; yo += PSY-1; xo += PSX-1; if(zo < 0 || zo >= 2*PSZ || yo < 0 || yo >= 2*PSY || xo < 0 || xo >= 2*PSX) continue; // float v3 = inCons[zo][yo][xo][z1][y1][x1]; float v3 = inCons[z1][y1][x1][zo][yo][xo]; acc += v3; fgCnt += 1; } else if(gz2 < gz1) { int zo = idz+pz1-idz2-pz2; int yo = idy+py1-idy2-py2; int xo = idx+px1-idx2-px2; zo += PSZ-1; yo += PSY-1; xo += PSX-1; if(zo < 0 || zo >= 2*PSZ || yo < 0 || yo >= 2*PSY || xo < 0 || xo >= 2*PSX) continue; // float v3 = inCons[zo][yo][xo][z2][y2][x2]; float v3 = inCons[z2][y2][x2][zo][yo][xo]; acc += v3; fgCnt += 1; } } } } } } } #ifdef NORM_PATCH_AFFINITY affGraph[id1] = acc/float(max(1, fgCnt)); #else affGraph[id1] = acc; #endif } #ifdef MAIN_PATCHGRAPH #include "verySimpleArgParse.h" #include "cuda_vote_instances.h" int main(int argc, char *argv[]) { std::string affinitiesFileName = getAndCheckArg(argc, argv, "--affinities"); std::string consensusFileName = getAndCheckArg(argc, argv, "--consensus");; std::string selPatchesFileName = getAndCheckArg(argc, argv, "--selected_patches"); std::string patchAffGraphFileName = getAndCheckArg(argc, argv, "--affGraph"); predAff_t *inPredAffinitiesGPU = allocLoadPred(affinitiesFileName); consensus_t *inConsensusGPU = allocLoadConsensus(consensusFileName); unsigned *pairIDsGPU = nullptr; unsigned numPatchPairs = allocLoadFgCover(selPatchesFileName, pairIDsGPU); float* patchAffGraphGPU = allocInitPatchAffGraph(numPatchPairs); computePatchAffGraph(patchAffGraphFileName, inPredAffinitiesGPU, inConsensusGPU, patchAffGraphGPU, pairIDsGPU, numPatchPairs); return 0; } #endif
301cb955106962529df0a6ce320f20ce3497af1c.cu
#include <cstdint> __global__ void computePatchGraph( const float inPred[][DATAZSIZE][DATAYSIZE][DATAXSIZE], const float inCons[][DATAYSIZE][DATAXSIZE][NSZ][NSY][NSX], float affGraph[], const unsigned pairsIDs[], const uint64_t numPairs, int offset) { uint64_t id1 = blockIdx.x*blockDim.x + threadIdx.x; if(id1 >= numPairs) return; id1 += offset; int idz = pairsIDs[id1*6]; int idy = pairsIDs[id1*6+1]; int idx = pairsIDs[id1*6+2]; int idz2 = pairsIDs[id1*6+3]; int idy2 = pairsIDs[id1*6+4]; int idx2 = pairsIDs[id1*6+5]; uint32_t rnd = uint32_t(idz)*uint32_t(idz2)* uint32_t(idy)*uint32_t(idy2)* uint32_t(idx)*uint32_t(idx2); unsigned int mid = int((PSX*PSY*PSZ)/2); unsigned const PSXH = int(PSX/2); unsigned const PSYH = int(PSY/2); unsigned const PSZH = int(PSZ/2); float acc = 0.0f; unsigned int fgCnt = 0; // iterate over all pixel in patch for(int pz1 = 0; pz1 < PSZ; pz1++) { for(int py1 = 0; py1 < PSY; py1++) { for(int px1 = 0; px1 < PSX; px1++) { const int z1 = idz+pz1-PSZH; const int y1 = idy+py1-PSYH; const int x1 = idx+px1-PSXH; if(inPred[mid][z1][y1][x1] <= TH) { continue; } int po1 = px1 + PSX * py1 + PSX * PSY * pz1; // if pred affinity in patch smaller than threshold, continue if(inPred[po1][idz][idy][idx] <= TH) { continue; } for(int pz2 = 0; pz2 < PSZ; pz2++) { for(int py2 = 0; py2 < PSY; py2++) { for(int px2 = 0; px2 < PSX; px2++) { const int z2 = idz2+pz2-PSZH; const int y2 = idy2+py2-PSYH; const int x2 = idx2+px2-PSXH; if(inPred[mid][z2][y2][x2] <= TH) { continue; } int po2 = px2 + PSX * py2 + PSX * PSY * pz2; if(inPred[po2][idz2][idy2][idx2] <= TH) { continue; } int gz1 = x1 + DATAXSIZE * y1 + DATAXSIZE * DATAYSIZE * z1; // bug? was idx2+py2 int gz2 = x2 + DATAXSIZE * y2 + DATAXSIZE * DATAYSIZE * (z2); // intersection if (abs(int(x1-idx2)) <= PSXH && abs(int(y1-idy2)) <= PSYH && abs(int(z1-idz2)) <= PSZH && abs(int(x2-idx)) <= PSXH && abs(int(y2-idy)) <= PSYH && abs(int(z2-idz)) <= PSZH) { rnd = rnd*1103515245U; float rndT = rnd/4294967296.0f; if (rndT > 0.2) continue; } if(gz1 <= gz2) { int zo = idz2+pz2-idz-pz1; int yo = idy2+py2-idy-py1; int xo = idx2+px2-idx-px1; zo += PSZ-1; yo += PSY-1; xo += PSX-1; if(zo < 0 || zo >= 2*PSZ || yo < 0 || yo >= 2*PSY || xo < 0 || xo >= 2*PSX) continue; // float v3 = inCons[zo][yo][xo][z1][y1][x1]; float v3 = inCons[z1][y1][x1][zo][yo][xo]; acc += v3; fgCnt += 1; } else if(gz2 < gz1) { int zo = idz+pz1-idz2-pz2; int yo = idy+py1-idy2-py2; int xo = idx+px1-idx2-px2; zo += PSZ-1; yo += PSY-1; xo += PSX-1; if(zo < 0 || zo >= 2*PSZ || yo < 0 || yo >= 2*PSY || xo < 0 || xo >= 2*PSX) continue; // float v3 = inCons[zo][yo][xo][z2][y2][x2]; float v3 = inCons[z2][y2][x2][zo][yo][xo]; acc += v3; fgCnt += 1; } } } } } } } #ifdef NORM_PATCH_AFFINITY affGraph[id1] = acc/float(max(1, fgCnt)); #else affGraph[id1] = acc; #endif } #ifdef MAIN_PATCHGRAPH #include "verySimpleArgParse.h" #include "cuda_vote_instances.h" int main(int argc, char *argv[]) { std::string affinitiesFileName = getAndCheckArg(argc, argv, "--affinities"); std::string consensusFileName = getAndCheckArg(argc, argv, "--consensus");; std::string selPatchesFileName = getAndCheckArg(argc, argv, "--selected_patches"); std::string patchAffGraphFileName = getAndCheckArg(argc, argv, "--affGraph"); predAff_t *inPredAffinitiesGPU = allocLoadPred(affinitiesFileName); consensus_t *inConsensusGPU = allocLoadConsensus(consensusFileName); unsigned *pairIDsGPU = nullptr; unsigned numPatchPairs = allocLoadFgCover(selPatchesFileName, pairIDsGPU); float* patchAffGraphGPU = allocInitPatchAffGraph(numPatchPairs); computePatchAffGraph(patchAffGraphFileName, inPredAffinitiesGPU, inConsensusGPU, patchAffGraphGPU, pairIDsGPU, numPatchPairs); return 0; } #endif
7c8e85b7f482935200d7f8b4abc36a63d521ce0f.hip
// !!! This is a file automatically generated by hipify!!! #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/hip/HIPBlas.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/HIPApplyUtils.cuh> #include <ATen/native/hip/vol2col.cuh> namespace at { namespace native { namespace { static inline void conv_transpose3d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int kernel_depth, int kernel_width, int kernel_height, int stride_depth, int stride_width, int stride_height, int padding_depth, int padding_width, int padding_height, int dilation_depth, int dilation_width, int dilation_height, int output_padding_depth, int output_padding_width, int output_padding_height, int weight_nullable) { TORCH_CHECK( input.numel() != 0 && (input.dim() == 4 || input.dim() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input.sizes()); TORCH_CHECK( stride_depth > 0 && stride_width > 0 && stride_height > 0, "stride should be greater than zero, but got stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width); TORCH_CHECK( dilation_depth > 0 && dilation_width > 0 && dilation_height > 0, "dilation should be greater than zero, but got dilation_depth: ", dilation_depth, ", dilation_height: ", dilation_height, ", dilation_width: ", dilation_width); TORCH_CHECK( (output_padding_depth < stride_depth || output_padding_depth < dilation_depth) && (output_padding_width < stride_width || output_padding_width < dilation_width) && (output_padding_height < stride_height || output_padding_height < dilation_height), "output padding must be smaller than either stride or dilation,", " but got output_padding_depth: ", output_padding_depth, " output_padding_height: ", output_padding_height, " output_padding_width: ", output_padding_width, " stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width, " dilation_depth: ", dilation_depth, " dilation_height: ", dilation_height, " dilation_width: ", dilation_width); // number of input & output planes and kernel size is indirectly defined by // the weight tensor if (weight.defined()) { TORCH_CHECK( weight.numel() != 0 && weight.dim() == 5, "non-empty 5D (n_output_plane x n_input_plane ", "x kernel_depth x kernel_height x kernel_width) tensor ", "expected for weight, but got: ", weight.sizes()); if (bias.defined()) { check_dim_size(bias, 1, 0, weight.size(1)); } } else if (!weight_nullable) { AT_ERROR("weight tensor is expected to be non-nullable"); } int ndim = input.dim(); int dimf = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimd++; dimh++; dimw++; } if (weight.defined()) { const int64_t n_input_plane = weight.size(0); check_dim_size(input, ndim, dimf, n_input_plane); } int64_t input_width = input.size(dimw); int64_t input_height = input.size(dimh); int64_t input_depth = input.size(dimd); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; if (output_depth < 1 || output_width < 1 || output_height < 1) { AT_ERROR( "Given input size per channel: (", input_depth, " x ", input_height, " x ", input_width, "). Calculated output size per channel: (", output_depth, " x ", output_height, " x ", output_width, "). Output size is too small"); } if (grad_output.defined()) { if (weight.defined()) { const int64_t n_output_plane = weight.size(1); check_dim_size(grad_output, ndim, dimf, n_output_plane); } else if (bias.defined()) { const int64_t n_output_plane = bias.size(0); check_dim_size(grad_output, ndim, dimf, n_output_plane); } check_dim_size(grad_output, ndim, dimd, output_depth); check_dim_size(grad_output, ndim, dimh, output_height); check_dim_size(grad_output, ndim, dimw, output_width); } } void conv_transpose3d_out_cuda_template( Tensor& output, const Tensor& input_, const Tensor& weight_, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& finput, Tensor& fgrad_input) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; Tensor columns = finput; Tensor ones = fgrad_input; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2}, weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4}, columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6}; checkAllSameGPU( "conv_transpose3d_out_cuda", {input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg}); conv_transpose3d_shape_check( input_, Tensor(), weight_, bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); TORCH_CHECK( !bias.defined() || bias.is_contiguous(), "bias tensor has to be contiguous"); Tensor input = input_.contiguous(); Tensor weight = weight_.contiguous(); int is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output output.resize_( {batch_size, n_output_plane, output_depth, output_height, output_width}); // Resize temporary columns columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets // increased, and always contains ones. if (ones.dim() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < output_depth * output_height * output_width) { // Resize plane and fill with ones... ones.resize_({output_depth, output_height, output_width}); ones.fill_(1); } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "conv_transpose3d_out_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; // Helpers Tensor input_n; Tensor output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: input_n = input.select(0, elt); output_n = output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); int64_t n = columns.size(1); int64_t k = weight.size(0); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), 'n', 't', n, m, k, static_cast<scalar_t>(1), input_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, static_cast<scalar_t>(0), columns.data<scalar_t>(), n); // Unpack columns back into input: at::native::col2vol<scalar_t, accscalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), columns.data<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, output_n.data<scalar_t>()); // Do Bias after: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t n_ = output_depth * output_height * output_width; int64_t k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) if (bias.defined()) { at::cuda::blas::gemm<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), 't', 'n', n_, m_, k_, static_cast<scalar_t>(1), ones.data<scalar_t>(), k_, bias.data<scalar_t>(), k_, static_cast<scalar_t>(1), output_n.data<scalar_t>(), n_); } } // Resize output if (is_batch) { output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void conv_transpose3d_backward_out_cuda_template( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_input, const Tensor& weight_, const Tensor& finput, const Tensor& fgrad_input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); Tensor grad_columns = finput; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, weight_arg{weight_, "weight", 3}, grad_columns_arg{grad_columns, "grad_columns", 4}, grad_input_arg{grad_input, "grad_input", 5}; checkAllSameGPU( "conv_transpose3d_backward_out_cuda", {input_arg, grad_output_arg, weight_arg, grad_columns_arg, grad_input_arg}); conv_transpose3d_shape_check( input_, grad_output_, weight_, Tensor(), kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); Tensor weight = weight_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output grad_input.resize_( {batch_size, n_input_plane, input_depth, input_height, input_width}); // Resize temporary columns grad_columns.resize_( {n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "conv_transpose3d_backward_out_cuda", [&] { // Helpers Tensor grad_input_n; Tensor grad_output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per sample: grad_input_n = grad_input.select(0, elt); grad_output_n = grad_output.select(0, elt); // Extract columns: at::native::vol2col<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_n.data<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, grad_columns.data<scalar_t>()); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(0); int64_t n = grad_columns.size(1); int64_t k = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), 'n', 'n', n, m, k, static_cast<scalar_t>(1), grad_columns.data<scalar_t>(), n, weight.data<scalar_t>(), k, static_cast<scalar_t>(0), grad_input_n.data<scalar_t>(), n); } // Resize output if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); grad_input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void conv_transpose3d_acc_grad_parameters_cuda( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_weight, Tensor& grad_bias, const Tensor& finput, const Tensor& fgrad_input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, int scale_) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; Tensor columns = finput; Tensor ones = fgrad_input; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, grad_weight_arg{grad_weight, "grad_weight", 3}, grad_bias_arg{grad_bias, "grad_bias", 4}, columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6}; checkAllSameGPU( "conv_transpose3d_acc_grad_parameters_cuda", {input_arg, grad_output_arg, grad_weight_arg, grad_bias_arg, columns_arg, ones_arg}); conv_transpose3d_shape_check( input_, grad_output_, grad_weight, grad_bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 1); int n_output_plane; if (grad_weight.defined()) { n_output_plane = grad_weight.size(1); } else if (grad_bias.defined()) { n_output_plane = grad_bias.size(0); } else { return; } if (grad_weight.defined()) { TORCH_CHECK( grad_weight.is_contiguous(), "grad_weight needs to be contiguous"); } if (grad_bias.defined()) { TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous"); TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous"); } Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Define a buffer of ones, for bias accumulation if (ones.dim() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < output_depth * output_height * output_width) { // Resize plane and fill with ones... ones.resize_({output_depth, output_height, output_width}); ones.fill_(1); } // Resize temporary columns columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "conv_transpose3d_acc_grad_parameters_cuda", [&] { // Helpers Tensor input_n; Tensor grad_output_n; scalar_t scale = static_cast<scalar_t>(scale_); // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: grad_output_n = grad_output.select(0, elt); // Do Weight: if (grad_weight.defined()) { // Matrix mulitply per output: input_n = input.select(0, elt); // Extract columns: at::native::vol2col<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_output_n.data<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, columns.data<scalar_t>()); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t n = columns.size(0); // n_output_plane * kt * kh * kw int64_t m = input_n.size(0); // n_input_plane int64_t k = columns.size(1); // input_height * input_width // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), 't', 'n', n, m, k, scale, columns.data<scalar_t>(), k, input_n.data<scalar_t>(), k, static_cast<scalar_t>(1), grad_weight.data<scalar_t>(), n); } // Do Bias: if (grad_bias.defined()) { // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t k_ = output_depth * output_height * output_width; // Do GEMV (note: this is a bit confusing because gemv assumes // column-major matrices) at::cuda::blas::gemv<scalar_t>( at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), 't', k_, m_, scale, grad_output_n.data<scalar_t>(), k_, ones.data<scalar_t>(), 1, static_cast<scalar_t>(1), grad_bias.data<scalar_t>(), 1); } } // Resize if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {input.size(1), input_depth, input_height, input_width}); } }); } } // namespace Tensor& conv_transpose3d_out_cuda( Tensor& output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { Tensor finput = at::empty_like(input); Tensor fgrad = at::empty_like(input); conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation, finput, fgrad); return output; } Tensor conv_transpose3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { Tensor output = at::empty_like(input); Tensor finput = at::empty_like(input); Tensor fgrad = at::empty_like(input); conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation, finput, fgrad); return output; } std::tuple<Tensor&, Tensor&, Tensor&> conv_transpose3d_backward_out_cuda( Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias, const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor& finput, const Tensor& fgrad) { if (grad_input.defined()) { conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, finput, fgrad, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, finput, fgrad, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor&, Tensor&, Tensor&>( grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor, Tensor> conv_transpose3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor& finput, const Tensor& fgrad, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } else { grad_input = Tensor(); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } else { grad_weight = Tensor(); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } else { grad_bias = Tensor(); } if (grad_input.defined()) { conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, finput, fgrad, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, finput, fgrad, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias); } } // namespace native } // namespace at
7c8e85b7f482935200d7f8b4abc36a63d521ce0f.cu
#include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/NativeFunctions.h> #include <ATen/TensorUtils.h> #include <ATen/Utils.h> #include <ATen/cuda/CUDABlas.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/CUDAApplyUtils.cuh> #include <ATen/native/cuda/vol2col.cuh> namespace at { namespace native { namespace { static inline void conv_transpose3d_shape_check( const Tensor& input, const Tensor& grad_output, const Tensor& weight, const Tensor& bias, int kernel_depth, int kernel_width, int kernel_height, int stride_depth, int stride_width, int stride_height, int padding_depth, int padding_width, int padding_height, int dilation_depth, int dilation_width, int dilation_height, int output_padding_depth, int output_padding_width, int output_padding_height, int weight_nullable) { TORCH_CHECK( input.numel() != 0 && (input.dim() == 4 || input.dim() == 5), "non-empty 4D or 5D (batch mode) tensor expected for input, but got: ", input.sizes()); TORCH_CHECK( stride_depth > 0 && stride_width > 0 && stride_height > 0, "stride should be greater than zero, but got stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width); TORCH_CHECK( dilation_depth > 0 && dilation_width > 0 && dilation_height > 0, "dilation should be greater than zero, but got dilation_depth: ", dilation_depth, ", dilation_height: ", dilation_height, ", dilation_width: ", dilation_width); TORCH_CHECK( (output_padding_depth < stride_depth || output_padding_depth < dilation_depth) && (output_padding_width < stride_width || output_padding_width < dilation_width) && (output_padding_height < stride_height || output_padding_height < dilation_height), "output padding must be smaller than either stride or dilation,", " but got output_padding_depth: ", output_padding_depth, " output_padding_height: ", output_padding_height, " output_padding_width: ", output_padding_width, " stride_depth: ", stride_depth, " stride_height: ", stride_height, " stride_width: ", stride_width, " dilation_depth: ", dilation_depth, " dilation_height: ", dilation_height, " dilation_width: ", dilation_width); // number of input & output planes and kernel size is indirectly defined by // the weight tensor if (weight.defined()) { TORCH_CHECK( weight.numel() != 0 && weight.dim() == 5, "non-empty 5D (n_output_plane x n_input_plane ", "x kernel_depth x kernel_height x kernel_width) tensor ", "expected for weight, but got: ", weight.sizes()); if (bias.defined()) { check_dim_size(bias, 1, 0, weight.size(1)); } } else if (!weight_nullable) { AT_ERROR("weight tensor is expected to be non-nullable"); } int ndim = input.dim(); int dimf = 0; int dimd = 1; int dimh = 2; int dimw = 3; if (ndim == 5) { dimf++; dimd++; dimh++; dimw++; } if (weight.defined()) { const int64_t n_input_plane = weight.size(0); check_dim_size(input, ndim, dimf, n_input_plane); } int64_t input_width = input.size(dimw); int64_t input_height = input.size(dimh); int64_t input_depth = input.size(dimd); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; if (output_depth < 1 || output_width < 1 || output_height < 1) { AT_ERROR( "Given input size per channel: (", input_depth, " x ", input_height, " x ", input_width, "). Calculated output size per channel: (", output_depth, " x ", output_height, " x ", output_width, "). Output size is too small"); } if (grad_output.defined()) { if (weight.defined()) { const int64_t n_output_plane = weight.size(1); check_dim_size(grad_output, ndim, dimf, n_output_plane); } else if (bias.defined()) { const int64_t n_output_plane = bias.size(0); check_dim_size(grad_output, ndim, dimf, n_output_plane); } check_dim_size(grad_output, ndim, dimd, output_depth); check_dim_size(grad_output, ndim, dimh, output_height); check_dim_size(grad_output, ndim, dimw, output_width); } } void conv_transpose3d_out_cuda_template( Tensor& output, const Tensor& input_, const Tensor& weight_, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, Tensor& finput, Tensor& fgrad_input) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; Tensor columns = finput; Tensor ones = fgrad_input; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2}, weight_arg{weight_, "weight", 3}, bias_arg{bias, "bias", 4}, columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6}; checkAllSameGPU( "conv_transpose3d_out_cuda", {input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg}); conv_transpose3d_shape_check( input_, Tensor(), weight_, bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); TORCH_CHECK( !bias.defined() || bias.is_contiguous(), "bias tensor has to be contiguous"); Tensor input = input_.contiguous(); Tensor weight = weight_.contiguous(); int is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output output.resize_( {batch_size, n_output_plane, output_depth, output_height, output_width}); // Resize temporary columns columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); // Define a buffer of ones, for bias accumulation // Note: this buffer can be shared with other modules, it only ever gets // increased, and always contains ones. if (ones.dim() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < output_depth * output_height * output_width) { // Resize plane and fill with ones... ones.resize_({output_depth, output_height, output_width}); ones.fill_(1); } AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "conv_transpose3d_out_cuda", [&] { using accscalar_t = at::acc_type<scalar_t, true>; // Helpers Tensor input_n; Tensor output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: input_n = input.select(0, elt); output_n = output.select(0, elt); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); int64_t n = columns.size(1); int64_t k = weight.size(0); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::cuda::getCurrentCUDAStream(), 'n', 't', n, m, k, static_cast<scalar_t>(1), input_n.data<scalar_t>(), n, weight.data<scalar_t>(), m, static_cast<scalar_t>(0), columns.data<scalar_t>(), n); // Unpack columns back into input: at::native::col2vol<scalar_t, accscalar_t>( at::cuda::getCurrentCUDAStream(), columns.data<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, output_n.data<scalar_t>()); // Do Bias after: // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t n_ = output_depth * output_height * output_width; int64_t k_ = 1; // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) if (bias.defined()) { at::cuda::blas::gemm<scalar_t>( at::cuda::getCurrentCUDAStream(), 't', 'n', n_, m_, k_, static_cast<scalar_t>(1), ones.data<scalar_t>(), k_, bias.data<scalar_t>(), k_, static_cast<scalar_t>(1), output_n.data<scalar_t>(), n_); } } // Resize output if (is_batch) { output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void conv_transpose3d_backward_out_cuda_template( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_input, const Tensor& weight_, const Tensor& finput, const Tensor& fgrad_input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); Tensor grad_columns = finput; int n_input_plane = weight_.size(0); int n_output_plane = weight_.size(1); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, weight_arg{weight_, "weight", 3}, grad_columns_arg{grad_columns, "grad_columns", 4}, grad_input_arg{grad_input, "grad_input", 5}; checkAllSameGPU( "conv_transpose3d_backward_out_cuda", {input_arg, grad_output_arg, weight_arg, grad_columns_arg, grad_input_arg}); conv_transpose3d_shape_check( input_, grad_output_, weight_, Tensor(), kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 0); Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); Tensor weight = weight_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Resize output grad_input.resize_( {batch_size, n_input_plane, input_depth, input_height, input_width}); // Resize temporary columns grad_columns.resize_( {n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "conv_transpose3d_backward_out_cuda", [&] { // Helpers Tensor grad_input_n; Tensor grad_output_n; // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per sample: grad_input_n = grad_input.select(0, elt); grad_output_n = grad_output.select(0, elt); // Extract columns: at::native::vol2col<scalar_t>( at::cuda::getCurrentCUDAStream(), grad_output_n.data<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, grad_columns.data<scalar_t>()); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m = weight.size(0); int64_t n = grad_columns.size(1); int64_t k = weight.size(1) * weight.size(2) * weight.size(3) * weight.size(4); // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::cuda::getCurrentCUDAStream(), 'n', 'n', n, m, k, static_cast<scalar_t>(1), grad_columns.data<scalar_t>(), n, weight.data<scalar_t>(), k, static_cast<scalar_t>(0), grad_input_n.data<scalar_t>(), n); } // Resize output if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {n_input_plane, input_depth, input_height, input_width}); grad_input.resize_( {n_input_plane, input_depth, input_height, input_width}); } }); } void conv_transpose3d_acc_grad_parameters_cuda( const Tensor& input_, const Tensor& grad_output_, Tensor& grad_weight, Tensor& grad_bias, const Tensor& finput, const Tensor& fgrad_input, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, int scale_) { TORCH_CHECK( kernel_size.size() == 3, "It is expected kernel_size equals to 3, but got size ", kernel_size.size()); TORCH_CHECK( dilation.size() == 3, "It is expected dilation equals to 3, but got size ", dilation.size()); TORCH_CHECK( padding.size() == 3, "It is expected padding equals to 3, but got size ", padding.size()); TORCH_CHECK( stride.size() == 3, "It is expected stride equals to 3, but got size ", stride.size()); TORCH_CHECK( output_padding.size() == 3, "It is expected stride equals to 3, but got size ", output_padding.size()); int64_t kernel_depth = kernel_size[0]; int64_t kernel_height = kernel_size[1]; int64_t kernel_width = kernel_size[2]; int64_t dilation_depth = dilation[0]; int64_t dilation_height = dilation[1]; int64_t dilation_width = dilation[2]; int64_t padding_depth = padding[0]; int64_t padding_height = padding[1]; int64_t padding_width = padding[2]; int64_t stride_depth = stride[0]; int64_t stride_height = stride[1]; int64_t stride_width = stride[2]; int64_t output_padding_depth = output_padding[0]; int64_t output_padding_height = output_padding[1]; int64_t output_padding_width = output_padding[2]; Tensor columns = finput; Tensor ones = fgrad_input; TensorArg input_arg{input_, "input", 1}, grad_output_arg{grad_output_, "grad_output", 2}, grad_weight_arg{grad_weight, "grad_weight", 3}, grad_bias_arg{grad_bias, "grad_bias", 4}, columns_arg{columns, "columns", 5}, ones_arg{ones, "ones", 6}; checkAllSameGPU( "conv_transpose3d_acc_grad_parameters_cuda", {input_arg, grad_output_arg, grad_weight_arg, grad_bias_arg, columns_arg, ones_arg}); conv_transpose3d_shape_check( input_, grad_output_, grad_weight, grad_bias, kernel_depth, kernel_width, kernel_height, stride_depth, stride_width, stride_height, padding_depth, padding_width, padding_height, dilation_depth, dilation_width, dilation_height, output_padding_depth, output_padding_width, output_padding_height, 1); int n_output_plane; if (grad_weight.defined()) { n_output_plane = grad_weight.size(1); } else if (grad_bias.defined()) { n_output_plane = grad_bias.size(0); } else { return; } if (grad_weight.defined()) { TORCH_CHECK( grad_weight.is_contiguous(), "grad_weight needs to be contiguous"); } if (grad_bias.defined()) { TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous"); TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous"); } Tensor input = input_.contiguous(); Tensor grad_output = grad_output_.contiguous(); bool is_batch = false; if (input.dim() == 4) { // Force batch is_batch = true; input.resize_( {1, input.size(0), input.size(1), input.size(2), input.size(3)}); grad_output.resize_({1, grad_output.size(0), grad_output.size(1), grad_output.size(2), grad_output.size(3)}); } int64_t input_width = input.size(4); int64_t input_height = input.size(3); int64_t input_depth = input.size(2); int64_t output_depth = (input_depth - 1) * stride_depth - 2 * padding_depth + (dilation_depth * (kernel_depth - 1) + 1) + output_padding_depth; int64_t output_height = (input_height - 1) * stride_height - 2 * padding_height + (dilation_height * (kernel_height - 1) + 1) + output_padding_height; int64_t output_width = (input_width - 1) * stride_width - 2 * padding_width + (dilation_width * (kernel_width - 1) + 1) + output_padding_width; // Batch size + input planes int64_t batch_size = input.size(0); // Define a buffer of ones, for bias accumulation if (ones.dim() != 3 || ones.size(0) * ones.size(1) * ones.size(2) < output_depth * output_height * output_width) { // Resize plane and fill with ones... ones.resize_({output_depth, output_height, output_width}); ones.fill_(1); } // Resize temporary columns columns.resize_({n_output_plane * kernel_width * kernel_height * kernel_depth, input_depth * input_height * input_width}); AT_DISPATCH_FLOATING_TYPES_AND_HALF( input.scalar_type(), "conv_transpose3d_acc_grad_parameters_cuda", [&] { // Helpers Tensor input_n; Tensor grad_output_n; scalar_t scale = static_cast<scalar_t>(scale_); // For each elt in batch, do: for (int elt = 0; elt < batch_size; elt++) { // Matrix mulitply per output: grad_output_n = grad_output.select(0, elt); // Do Weight: if (grad_weight.defined()) { // Matrix mulitply per output: input_n = input.select(0, elt); // Extract columns: at::native::vol2col<scalar_t>( at::cuda::getCurrentCUDAStream(), grad_output_n.data<scalar_t>(), n_output_plane, output_depth, output_height, output_width, input_depth, input_height, input_width, kernel_depth, kernel_height, kernel_width, padding_depth, padding_height, padding_width, stride_depth, stride_height, stride_width, dilation_depth, dilation_height, dilation_width, columns.data<scalar_t>()); // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t n = columns.size(0); // n_output_plane * kt * kh * kw int64_t m = input_n.size(0); // n_input_plane int64_t k = columns.size(1); // input_height * input_width // Do GEMM (note: this is a bit confusing because gemm assumes // column-major matrices) at::cuda::blas::gemm<scalar_t>( at::cuda::getCurrentCUDAStream(), 't', 'n', n, m, k, scale, columns.data<scalar_t>(), k, input_n.data<scalar_t>(), k, static_cast<scalar_t>(1), grad_weight.data<scalar_t>(), n); } // Do Bias: if (grad_bias.defined()) { // M,N,K are dims of matrix A and B // (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm) int64_t m_ = n_output_plane; int64_t k_ = output_depth * output_height * output_width; // Do GEMV (note: this is a bit confusing because gemv assumes // column-major matrices) at::cuda::blas::gemv<scalar_t>( at::cuda::getCurrentCUDAStream(), 't', k_, m_, scale, grad_output_n.data<scalar_t>(), k_, ones.data<scalar_t>(), 1, static_cast<scalar_t>(1), grad_bias.data<scalar_t>(), 1); } } // Resize if (is_batch) { grad_output.resize_( {n_output_plane, output_depth, output_height, output_width}); input.resize_( {input.size(1), input_depth, input_height, input_width}); } }); } } // namespace Tensor& conv_transpose3d_out_cuda( Tensor& output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { Tensor finput = at::empty_like(input); Tensor fgrad = at::empty_like(input); conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation, finput, fgrad); return output; } Tensor conv_transpose3d_cuda( const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, const Tensor& bias, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation) { Tensor output = at::empty_like(input); Tensor finput = at::empty_like(input); Tensor fgrad = at::empty_like(input); conv_transpose3d_out_cuda_template( output, input, weight, kernel_size, bias, stride, padding, output_padding, dilation, finput, fgrad); return output; } std::tuple<Tensor&, Tensor&, Tensor&> conv_transpose3d_backward_out_cuda( Tensor& grad_input, Tensor& grad_weight, Tensor& grad_bias, const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor& finput, const Tensor& fgrad) { if (grad_input.defined()) { conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, finput, fgrad, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, finput, fgrad, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor&, Tensor&, Tensor&>( grad_input, grad_weight, grad_bias); } std::tuple<Tensor, Tensor, Tensor> conv_transpose3d_backward_cuda( const Tensor& grad_output, const Tensor& input, const Tensor& weight, IntArrayRef kernel_size, IntArrayRef stride, IntArrayRef padding, IntArrayRef output_padding, IntArrayRef dilation, const Tensor& finput, const Tensor& fgrad, std::array<bool, 3> output_mask) { Tensor grad_input; Tensor grad_weight; Tensor grad_bias; if (output_mask[0]) { grad_input = at::empty({0}, grad_output.options()); } else { grad_input = Tensor(); } if (output_mask[1]) { grad_weight = at::empty({0}, grad_output.options()); } else { grad_weight = Tensor(); } if (output_mask[2]) { grad_bias = at::empty({0}, grad_output.options()); } else { grad_bias = Tensor(); } if (grad_input.defined()) { conv_transpose3d_backward_out_cuda_template( input, grad_output, grad_input, weight, finput, fgrad, kernel_size, stride, padding, output_padding, dilation); } if (grad_weight.defined()) { grad_weight.resize_(weight.sizes()); grad_weight.zero_(); } if (grad_bias.defined()) { grad_bias.resize_({weight.size(1)}); grad_bias.zero_(); } if (grad_weight.defined() || grad_bias.defined()) { conv_transpose3d_acc_grad_parameters_cuda( input, grad_output, grad_weight, grad_bias, finput, fgrad, kernel_size, stride, padding, output_padding, dilation, 1); } return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias); } } // namespace native } // namespace at
60e551e686c30b8abc293b29da338505f1d75fbb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Caio Henrique Silva Ramos - NUSP 9292991 //Julio Kenji Ueda - NUSP 9298281 #include <stdio.h> #include <limits.h> #include "reduction.h" __global__ void reduction(int *structure, int *result, int N) { //Shared vector for min's extern __shared__ int sdata[]; int i = threadIdx.x; int tid = blockIdx.x*blockDim.x+threadIdx.x; int min = INT_MAX; while(tid < N) { min = imin(min, structure[tid]); tid += blockDim.x*gridDim.x; } sdata[i] = min; __syncthreads(); //Compute the min's int s = blockDim.x/2; while(s != 0) { if(i < s) { sdata[i] = imin(sdata[i], sdata[i+s]); } __syncthreads(); s /= 2; } //Save the results if(i == 0) result[blockIdx.x] = sdata[0]; }
60e551e686c30b8abc293b29da338505f1d75fbb.cu
//Caio Henrique Silva Ramos - NUSP 9292991 //Julio Kenji Ueda - NUSP 9298281 #include <stdio.h> #include <limits.h> #include "reduction.h" __global__ void reduction(int *structure, int *result, int N) { //Shared vector for min's extern __shared__ int sdata[]; int i = threadIdx.x; int tid = blockIdx.x*blockDim.x+threadIdx.x; int min = INT_MAX; while(tid < N) { min = imin(min, structure[tid]); tid += blockDim.x*gridDim.x; } sdata[i] = min; __syncthreads(); //Compute the min's int s = blockDim.x/2; while(s != 0) { if(i < s) { sdata[i] = imin(sdata[i], sdata[i+s]); } __syncthreads(); s /= 2; } //Save the results if(i == 0) result[blockIdx.x] = sdata[0]; }
c5fa25c7661ec8a12df8685d86806100b2fb7608.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cudaDefs.h> #include <time.h> #include <math.h> #include <benchmark.h> hipError_t error = hipSuccess; hipDeviceProp_t deviceProp = hipDeviceProp_t(); constexpr unsigned int N = 1 << 15; constexpr unsigned int MEMSIZE = N * sizeof(unsigned int); constexpr unsigned int NO_LOOPS = 100; constexpr unsigned int TPB = 256; constexpr unsigned int GRID_SIZE = (N + TPB - 1) / TPB; constexpr unsigned int NO_TEST_PHASES = 10; void fillData(unsigned int *data, const unsigned int length) { for (unsigned int i=0; i<length; i++) { data[i]= 1; } } void printData(const unsigned int *data, const unsigned int length) { if (data ==0) return; for (unsigned int i=0; i<length; i++) { printf("%u ", data[i]); } } __global__ void kernel(const unsigned int *a, const unsigned int *b, const unsigned int length, unsigned int *c) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int jump = gridDim.x * blockDim.x; //TODO: thread block loop while (tid < length) { c[tid] = a[tid] + b[tid]; tid += jump; } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Tests 1. - single stream, async calling </summary> //////////////////////////////////////////////////////////////////////////////////////////////////// void test1() { unsigned int *a, *b, *c; unsigned int *da, *db, *dc; // paged-locked allocation hipHostMalloc((void**)&a, NO_LOOPS * MEMSIZE,hipHostMallocDefault); hipHostMalloc((void**)&b, NO_LOOPS * MEMSIZE,hipHostMallocDefault); hipHostMalloc((void**)&c, NO_LOOPS * MEMSIZE,hipHostMallocDefault); fillData(a, NO_LOOPS * N); fillData(b, NO_LOOPS * N); // Data chunks on GPU hipMalloc( (void**)&da, MEMSIZE ); hipMalloc( (void**)&db, MEMSIZE ); hipMalloc( (void**)&dc, MEMSIZE ); //TODO: create stream hipStream_t stream; hipStreamCreate(&stream); auto lambda = [&]() { unsigned int dataOffset = 0; for (int i = 0; i < NO_LOOPS; i++) { //TODO: copy a->da, b->db hipMemcpyAsync(da, &a[dataOffset], MEMSIZE, hipMemcpyKind::hipMemcpyHostToDevice, stream); hipMemcpyAsync(db, &b[dataOffset], MEMSIZE, hipMemcpyKind::hipMemcpyHostToDevice, stream); //TODO: run the kernel in the stream dim3 grid(GRID_SIZE, 1, 1); dim3 block(TPB, 1, 1); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, stream, da, db, N, dc); //TODO: copy dc->c hipMemcpyAsync(&c[dataOffset], dc, MEMSIZE, hipMemcpyKind::hipMemcpyDeviceToHost, stream); dataOffset += N; } }; float gpuTime = GPUTIME(NO_TEST_PHASES, lambda()); hipStreamSynchronize(stream); // wait for stream to finish hipStreamDestroy(stream); hipDeviceSynchronize(); printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", __PRETTY_FUNCTION__, gpuTime); //printData(c, 100); hipFree(da); hipFree(db); hipFree(dc); hipHostFree(a); hipHostFree(b); hipHostFree(c); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Tests 2. - two streams - depth first approach </summary> //////////////////////////////////////////////////////////////////////////////////////////////////// void test2() { unsigned int *a, *b, *c; unsigned int *da0, *db0, *dc0; unsigned int *da1, *db1, *dc1; // paged-locked allocation hipHostMalloc((void**)&a, NO_LOOPS * MEMSIZE,hipHostMallocDefault); hipHostMalloc((void**)&b, NO_LOOPS * MEMSIZE,hipHostMallocDefault); hipHostMalloc((void**)&c, NO_LOOPS * MEMSIZE,hipHostMallocDefault); fillData(a, NO_LOOPS * N); fillData(b, NO_LOOPS * N); // Data chunks on GPU hipMalloc( (void**)&da0, MEMSIZE ); hipMalloc( (void**)&db0, MEMSIZE ); hipMalloc( (void**)&dc0, MEMSIZE ); hipMalloc( (void**)&da1, MEMSIZE ); hipMalloc( (void**)&db1, MEMSIZE ); hipMalloc( (void**)&dc1, MEMSIZE ); //create stream hipStream_t stream0, stream1; hipStreamCreate(&stream0); hipStreamCreate(&stream1); auto lambda = [&]() { unsigned int dataOffset = 0; int stream_id = 0; for (int i = 0; i < NO_LOOPS; i++) { //TODO: copy a->da, b->db auto da = stream_id == 0 ? da0 : da1; auto db = stream_id == 0 ? db0 : db1; auto dc = stream_id == 0 ? dc0 : dc1; auto stream = stream_id == 0 ? stream0 : stream1; hipMemcpyAsync(da, &a[dataOffset], MEMSIZE, hipMemcpyKind::hipMemcpyHostToDevice, stream); hipMemcpyAsync(db, &b[dataOffset], MEMSIZE, hipMemcpyKind::hipMemcpyHostToDevice, stream); //TODO: run the kernel in the stream dim3 grid(GRID_SIZE, 1, 1); dim3 block(TPB, 1, 1); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, stream, da, db, N, dc); //TODO: copy dc->c hipMemcpyAsync(&c[dataOffset], dc, MEMSIZE, hipMemcpyKind::hipMemcpyDeviceToHost, stream); dataOffset += N; stream_id = stream_id == 0 ? 1 : 0; } }; float gpuTime = GPUTIME(NO_TEST_PHASES, lambda()); hipStreamSynchronize(stream0); // wait for stream to finish hipStreamSynchronize(stream1); // wait for stream to finish hipStreamDestroy(stream0); hipStreamDestroy(stream1); hipDeviceSynchronize(); printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", __PRETTY_FUNCTION__, gpuTime); //printData(c, 100); hipFree(da0); hipFree(db0); hipFree(dc0); hipFree(da1); hipFree(db1); hipFree(dc1); hipHostFree(a); hipHostFree(b); hipHostFree(c); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Tests 3. - two streams - breadth first approach</summary> //////////////////////////////////////////////////////////////////////////////////////////////////// void test3() { unsigned int *a, *b, *c; unsigned int *da0, *db0, *dc0; unsigned int *da1, *db1, *dc1; // paged-locked allocation hipHostMalloc((void**)&a, NO_LOOPS * MEMSIZE,hipHostMallocDefault); hipHostMalloc((void**)&b, NO_LOOPS * MEMSIZE,hipHostMallocDefault); hipHostMalloc((void**)&c, NO_LOOPS * MEMSIZE,hipHostMallocDefault); fillData(a, NO_LOOPS * N); fillData(b, NO_LOOPS * N); // Data chunks on GPU hipMalloc( (void**)&da0, MEMSIZE ); hipMalloc( (void**)&db0, MEMSIZE ); hipMalloc( (void**)&dc0, MEMSIZE ); hipMalloc( (void**)&da1, MEMSIZE ); hipMalloc( (void**)&db1, MEMSIZE ); hipMalloc( (void**)&dc1, MEMSIZE ); //create stream hipStream_t stream0, stream1; hipStreamCreate(&stream0); hipStreamCreate(&stream1); auto lambda = [&]() { unsigned int dataOffset0 = 0; unsigned int dataOffset1 = N; for (int i = 0; i < NO_LOOPS; i += 2) { hipMemcpyAsync(da0, &a[dataOffset0], MEMSIZE, hipMemcpyKind::hipMemcpyHostToDevice, stream0); hipMemcpyAsync(da1, &a[dataOffset1], MEMSIZE, hipMemcpyKind::hipMemcpyHostToDevice, stream1); hipMemcpyAsync(db0, &b[dataOffset0], MEMSIZE, hipMemcpyKind::hipMemcpyHostToDevice, stream0); hipMemcpyAsync(db1, &b[dataOffset1], MEMSIZE, hipMemcpyKind::hipMemcpyHostToDevice, stream1); //TODO: run the kernel in the stream dim3 grid(GRID_SIZE, 1, 1); dim3 block(TPB, 1, 1); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, stream0, da0, db0, N, dc0); hipLaunchKernelGGL(( kernel), dim3(grid), dim3(block), 0, stream1, da1, db1, N, dc1); //TODO: copy dc->c hipMemcpyAsync(&c[dataOffset0], dc0, MEMSIZE, hipMemcpyKind::hipMemcpyDeviceToHost, stream0); hipMemcpyAsync(&c[dataOffset1], dc1, MEMSIZE, hipMemcpyKind::hipMemcpyDeviceToHost, stream1); dataOffset0 += 2*N; dataOffset1 += 2*N; } }; float gpuTime = GPUTIME(NO_TEST_PHASES, lambda()); hipStreamSynchronize(stream0); // wait for stream to finish hipStreamSynchronize(stream1); // wait for stream to finish hipStreamDestroy(stream0); hipStreamDestroy(stream1); hipDeviceSynchronize(); printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", __PRETTY_FUNCTION__, gpuTime); //printData(c, 100); hipFree(da0); hipFree(db0); hipFree(dc0); hipFree(da1); hipFree(db1); hipFree(dc1); hipHostFree(a); hipHostFree(b); hipHostFree(c); } int main(int argc, char *argv[]) { initializeCUDA(deviceProp); test1(); test2(); test3(); return 0; }
c5fa25c7661ec8a12df8685d86806100b2fb7608.cu
#include <cudaDefs.h> #include <time.h> #include <math.h> #include <benchmark.h> cudaError_t error = cudaSuccess; cudaDeviceProp deviceProp = cudaDeviceProp(); constexpr unsigned int N = 1 << 15; constexpr unsigned int MEMSIZE = N * sizeof(unsigned int); constexpr unsigned int NO_LOOPS = 100; constexpr unsigned int TPB = 256; constexpr unsigned int GRID_SIZE = (N + TPB - 1) / TPB; constexpr unsigned int NO_TEST_PHASES = 10; void fillData(unsigned int *data, const unsigned int length) { for (unsigned int i=0; i<length; i++) { data[i]= 1; } } void printData(const unsigned int *data, const unsigned int length) { if (data ==0) return; for (unsigned int i=0; i<length; i++) { printf("%u ", data[i]); } } __global__ void kernel(const unsigned int *a, const unsigned int *b, const unsigned int length, unsigned int *c) { unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int jump = gridDim.x * blockDim.x; //TODO: thread block loop while (tid < length) { c[tid] = a[tid] + b[tid]; tid += jump; } } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Tests 1. - single stream, async calling </summary> //////////////////////////////////////////////////////////////////////////////////////////////////// void test1() { unsigned int *a, *b, *c; unsigned int *da, *db, *dc; // paged-locked allocation cudaHostAlloc((void**)&a, NO_LOOPS * MEMSIZE,cudaHostAllocDefault); cudaHostAlloc((void**)&b, NO_LOOPS * MEMSIZE,cudaHostAllocDefault); cudaHostAlloc((void**)&c, NO_LOOPS * MEMSIZE,cudaHostAllocDefault); fillData(a, NO_LOOPS * N); fillData(b, NO_LOOPS * N); // Data chunks on GPU cudaMalloc( (void**)&da, MEMSIZE ); cudaMalloc( (void**)&db, MEMSIZE ); cudaMalloc( (void**)&dc, MEMSIZE ); //TODO: create stream cudaStream_t stream; cudaStreamCreate(&stream); auto lambda = [&]() { unsigned int dataOffset = 0; for (int i = 0; i < NO_LOOPS; i++) { //TODO: copy a->da, b->db cudaMemcpyAsync(da, &a[dataOffset], MEMSIZE, cudaMemcpyKind::cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(db, &b[dataOffset], MEMSIZE, cudaMemcpyKind::cudaMemcpyHostToDevice, stream); //TODO: run the kernel in the stream dim3 grid(GRID_SIZE, 1, 1); dim3 block(TPB, 1, 1); kernel<<<grid, block, 0, stream>>>(da, db, N, dc); //TODO: copy dc->c cudaMemcpyAsync(&c[dataOffset], dc, MEMSIZE, cudaMemcpyKind::cudaMemcpyDeviceToHost, stream); dataOffset += N; } }; float gpuTime = GPUTIME(NO_TEST_PHASES, lambda()); cudaStreamSynchronize(stream); // wait for stream to finish cudaStreamDestroy(stream); cudaDeviceSynchronize(); printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", __PRETTY_FUNCTION__, gpuTime); //printData(c, 100); cudaFree(da); cudaFree(db); cudaFree(dc); cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Tests 2. - two streams - depth first approach </summary> //////////////////////////////////////////////////////////////////////////////////////////////////// void test2() { unsigned int *a, *b, *c; unsigned int *da0, *db0, *dc0; unsigned int *da1, *db1, *dc1; // paged-locked allocation cudaHostAlloc((void**)&a, NO_LOOPS * MEMSIZE,cudaHostAllocDefault); cudaHostAlloc((void**)&b, NO_LOOPS * MEMSIZE,cudaHostAllocDefault); cudaHostAlloc((void**)&c, NO_LOOPS * MEMSIZE,cudaHostAllocDefault); fillData(a, NO_LOOPS * N); fillData(b, NO_LOOPS * N); // Data chunks on GPU cudaMalloc( (void**)&da0, MEMSIZE ); cudaMalloc( (void**)&db0, MEMSIZE ); cudaMalloc( (void**)&dc0, MEMSIZE ); cudaMalloc( (void**)&da1, MEMSIZE ); cudaMalloc( (void**)&db1, MEMSIZE ); cudaMalloc( (void**)&dc1, MEMSIZE ); //create stream cudaStream_t stream0, stream1; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); auto lambda = [&]() { unsigned int dataOffset = 0; int stream_id = 0; for (int i = 0; i < NO_LOOPS; i++) { //TODO: copy a->da, b->db auto da = stream_id == 0 ? da0 : da1; auto db = stream_id == 0 ? db0 : db1; auto dc = stream_id == 0 ? dc0 : dc1; auto stream = stream_id == 0 ? stream0 : stream1; cudaMemcpyAsync(da, &a[dataOffset], MEMSIZE, cudaMemcpyKind::cudaMemcpyHostToDevice, stream); cudaMemcpyAsync(db, &b[dataOffset], MEMSIZE, cudaMemcpyKind::cudaMemcpyHostToDevice, stream); //TODO: run the kernel in the stream dim3 grid(GRID_SIZE, 1, 1); dim3 block(TPB, 1, 1); kernel<<<grid, block, 0, stream>>>(da, db, N, dc); //TODO: copy dc->c cudaMemcpyAsync(&c[dataOffset], dc, MEMSIZE, cudaMemcpyKind::cudaMemcpyDeviceToHost, stream); dataOffset += N; stream_id = stream_id == 0 ? 1 : 0; } }; float gpuTime = GPUTIME(NO_TEST_PHASES, lambda()); cudaStreamSynchronize(stream0); // wait for stream to finish cudaStreamSynchronize(stream1); // wait for stream to finish cudaStreamDestroy(stream0); cudaStreamDestroy(stream1); cudaDeviceSynchronize(); printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", __PRETTY_FUNCTION__, gpuTime); //printData(c, 100); cudaFree(da0); cudaFree(db0); cudaFree(dc0); cudaFree(da1); cudaFree(db1); cudaFree(dc1); cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); } //////////////////////////////////////////////////////////////////////////////////////////////////// /// <summary> Tests 3. - two streams - breadth first approach</summary> //////////////////////////////////////////////////////////////////////////////////////////////////// void test3() { unsigned int *a, *b, *c; unsigned int *da0, *db0, *dc0; unsigned int *da1, *db1, *dc1; // paged-locked allocation cudaHostAlloc((void**)&a, NO_LOOPS * MEMSIZE,cudaHostAllocDefault); cudaHostAlloc((void**)&b, NO_LOOPS * MEMSIZE,cudaHostAllocDefault); cudaHostAlloc((void**)&c, NO_LOOPS * MEMSIZE,cudaHostAllocDefault); fillData(a, NO_LOOPS * N); fillData(b, NO_LOOPS * N); // Data chunks on GPU cudaMalloc( (void**)&da0, MEMSIZE ); cudaMalloc( (void**)&db0, MEMSIZE ); cudaMalloc( (void**)&dc0, MEMSIZE ); cudaMalloc( (void**)&da1, MEMSIZE ); cudaMalloc( (void**)&db1, MEMSIZE ); cudaMalloc( (void**)&dc1, MEMSIZE ); //create stream cudaStream_t stream0, stream1; cudaStreamCreate(&stream0); cudaStreamCreate(&stream1); auto lambda = [&]() { unsigned int dataOffset0 = 0; unsigned int dataOffset1 = N; for (int i = 0; i < NO_LOOPS; i += 2) { cudaMemcpyAsync(da0, &a[dataOffset0], MEMSIZE, cudaMemcpyKind::cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(da1, &a[dataOffset1], MEMSIZE, cudaMemcpyKind::cudaMemcpyHostToDevice, stream1); cudaMemcpyAsync(db0, &b[dataOffset0], MEMSIZE, cudaMemcpyKind::cudaMemcpyHostToDevice, stream0); cudaMemcpyAsync(db1, &b[dataOffset1], MEMSIZE, cudaMemcpyKind::cudaMemcpyHostToDevice, stream1); //TODO: run the kernel in the stream dim3 grid(GRID_SIZE, 1, 1); dim3 block(TPB, 1, 1); kernel<<<grid, block, 0, stream0>>>(da0, db0, N, dc0); kernel<<<grid, block, 0, stream1>>>(da1, db1, N, dc1); //TODO: copy dc->c cudaMemcpyAsync(&c[dataOffset0], dc0, MEMSIZE, cudaMemcpyKind::cudaMemcpyDeviceToHost, stream0); cudaMemcpyAsync(&c[dataOffset1], dc1, MEMSIZE, cudaMemcpyKind::cudaMemcpyDeviceToHost, stream1); dataOffset0 += 2*N; dataOffset1 += 2*N; } }; float gpuTime = GPUTIME(NO_TEST_PHASES, lambda()); cudaStreamSynchronize(stream0); // wait for stream to finish cudaStreamSynchronize(stream1); // wait for stream to finish cudaStreamDestroy(stream0); cudaStreamDestroy(stream1); cudaDeviceSynchronize(); printf("\x1B[93m[GPU time] %s: %f ms\033[0m\n", __PRETTY_FUNCTION__, gpuTime); //printData(c, 100); cudaFree(da0); cudaFree(db0); cudaFree(dc0); cudaFree(da1); cudaFree(db1); cudaFree(dc1); cudaFreeHost(a); cudaFreeHost(b); cudaFreeHost(c); } int main(int argc, char *argv[]) { initializeCUDA(deviceProp); test1(); test2(); test3(); return 0; }
ae5bd3dff5cb64cfab52fc5c648b0dd36cc156e4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <io/utilities/block_utils.cuh> #include "orc_common.h" #include "orc_gpu.h" #define LOG2_BYTESTREAM_BFRSZ 13 // Must be able to handle 512x 8-byte values #define BYTESTREAM_BFRSZ (1 << LOG2_BYTESTREAM_BFRSZ) #define BYTESTREAM_BFRMASK32 ((BYTESTREAM_BFRSZ - 1) >> 2) // TODO: Should be more efficient with 512 threads per block and circular queue for values #define LOG2_NWARPS 5 // Log2 of number of warps per threadblock #define LOG2_NTHREADS (LOG2_NWARPS + 5) #define NWARPS (1 << LOG2_NWARPS) #define NTHREADS (1 << LOG2_NTHREADS) #define ROWDEC_BFRSZ \ (NTHREADS + 128) // Add some margin to look ahead to future rows in case there are many zeroes #define IS_RLEv1(encoding_mode) ((encoding_mode) < DIRECT_V2) #define IS_RLEv2(encoding_mode) ((encoding_mode) >= DIRECT_V2) #define IS_DICTIONARY(encoding_mode) ((encoding_mode)&1) namespace cudf { namespace io { namespace orc { namespace gpu { static __device__ __constant__ int64_t kORCTimeToUTC = 1420070400; // Seconds from January 1st, 1970 to January 1st, 2015 struct int128_s { uint64_t lo; int64_t hi; }; struct orc_bytestream_s { const uint8_t *base; uint32_t pos; uint32_t len; uint32_t fill_pos; uint32_t fill_count; union { uint8_t u8[BYTESTREAM_BFRSZ]; uint32_t u32[BYTESTREAM_BFRSZ >> 2]; uint2 u64[BYTESTREAM_BFRSZ >> 3]; } buf; }; struct orc_rlev1_state_s { uint32_t num_runs; uint32_t num_vals; int32_t run_data[NWARPS * 12]; // (delta << 24) | (count << 16) | (first_val) }; struct orc_rlev2_state_s { uint32_t num_runs; uint32_t num_vals; union { uint32_t u32[NWARPS]; uint64_t u64[NWARPS]; } baseval; uint16_t m2_pw_byte3[NWARPS]; int64_t delta[NWARPS]; uint16_t runs_loc[NTHREADS]; }; struct orc_byterle_state_s { uint32_t num_runs; uint32_t num_vals; uint32_t runs_loc[NWARPS]; uint32_t runs_pos[NWARPS]; }; struct orc_rowdec_state_s { uint32_t nz_count; uint32_t last_row[NWARPS]; uint32_t row[ROWDEC_BFRSZ]; // 0=skip, >0: row position relative to cur_row }; struct orc_strdict_state_s { uint2 *local_dict; uint32_t dict_pos; uint32_t dict_len; }; struct orc_nulldec_state_s { uint32_t row; uint32_t null_count[NWARPS]; }; struct orc_datadec_state_s { uint32_t cur_row; // starting row of current batch uint32_t end_row; // ending row of this chunk (start_row + num_rows) uint32_t max_vals; // max # of non-zero values to decode in this batch uint32_t nrows; // # of rows in current batch (up to NTHREADS) uint32_t buffered_count; // number of buffered values in the secondary data stream uint32_t tz_num_entries; // number of entries in timezone table uint32_t tz_dst_cycle; // number of entries in timezone daylight savings cycle int64_t first_tz_transition; // first transition in timezone table int64_t last_tz_transition; // last transition in timezone table int64_t utc_epoch; // kORCTimeToUTC - gmtOffset RowGroup index; }; struct orcdec_state_s { ColumnDesc chunk; orc_bytestream_s bs; orc_bytestream_s bs2; int is_string; union { orc_strdict_state_s dict; orc_nulldec_state_s nulls; orc_datadec_state_s data; } top; union { orc_rlev1_state_s rlev1; orc_rlev2_state_s rlev2; orc_byterle_state_s rle8; orc_rowdec_state_s rowdec; } u; union { uint8_t u8[NTHREADS * 8]; uint32_t u32[NTHREADS * 2]; int32_t i32[NTHREADS * 2]; uint64_t u64[NTHREADS]; int64_t i64[NTHREADS]; } vals; }; /** * @brief Initializes byte stream, modifying length and start position to keep the read pointer *8-byte aligned Assumes that the address range [start_address & ~7, (start_address + len - 1) | 7] *is valid * * @param[in] bs Byte stream input * @param[in] base Pointer to raw byte stream data * @param[in] len Stream length in bytes * **/ static __device__ void bytestream_init(volatile orc_bytestream_s *bs, const uint8_t *base, uint32_t len) { uint32_t pos = static_cast<uint32_t>(7 & reinterpret_cast<size_t>(base)); bs->base = base - pos; bs->pos = (len > 0) ? pos : 0; bs->len = (len + pos + 7) & ~7; bs->fill_pos = 0; bs->fill_count = min(bs->len, BYTESTREAM_BFRSZ) >> 3; } /** * @brief Increment the read position, returns number of 64-bit slots to fill * * @param[in] bs Byte stream input * @param[in] bytes_consumed Number of bytes that were consumed * **/ static __device__ void bytestream_flush_bytes(volatile orc_bytestream_s *bs, uint32_t bytes_consumed) { uint32_t pos = bs->pos; uint32_t len = bs->len; uint32_t pos_new = min(pos + bytes_consumed, len); bs->pos = pos_new; pos = min(pos + BYTESTREAM_BFRSZ, len); pos_new = min(pos_new + BYTESTREAM_BFRSZ, len); bs->fill_pos = pos; bs->fill_count = (pos_new >> 3) - (pos >> 3); } /** * @brief Refill the byte stream buffer * * @param[in] bs Byte stream input * @param[in] t thread id * **/ static __device__ void bytestream_fill(orc_bytestream_s *bs, int t) { int count = bs->fill_count; if (t < count) { int pos8 = (bs->fill_pos >> 3) + t; bs->buf.u64[pos8 & ((BYTESTREAM_BFRSZ >> 3) - 1)] = (reinterpret_cast<const uint2 *>(bs->base))[pos8]; } } /** * @brief Read a byte from the byte stream (byte aligned) * * @param[in] bs Byte stream input * @param[in] pos Position in byte stream * @return byte * **/ inline __device__ uint8_t bytestream_readbyte(volatile orc_bytestream_s *bs, int pos) { return bs->buf.u8[pos & (BYTESTREAM_BFRSZ - 1)]; } /** * @brief Read 32 bits from a byte stream (little endian, byte aligned) * * @param[in] bs Byte stream input * @param[in] pos Position in byte stream * @result bits * **/ inline __device__ uint32_t bytestream_readu32(volatile orc_bytestream_s *bs, int pos) { uint32_t a = bs->buf.u32[(pos & (BYTESTREAM_BFRSZ - 1)) >> 2]; uint32_t b = bs->buf.u32[((pos + 4) & (BYTESTREAM_BFRSZ - 1)) >> 2]; return __funnelshift_r(a, b, (pos & 3) * 8); } /** * @brief Read 64 bits from a byte stream (little endian, byte aligned) * * @param[in] bs Byte stream input * @param[in] pos Position in byte stream * @param[in] numbits number of bits * @return bits * **/ inline __device__ uint64_t bytestream_readu64(volatile orc_bytestream_s *bs, int pos) { uint32_t a = bs->buf.u32[(pos & (BYTESTREAM_BFRSZ - 1)) >> 2]; uint32_t b = bs->buf.u32[((pos + 4) & (BYTESTREAM_BFRSZ - 1)) >> 2]; uint32_t c = bs->buf.u32[((pos + 8) & (BYTESTREAM_BFRSZ - 1)) >> 2]; uint32_t lo32 = __funnelshift_r(a, b, (pos & 3) * 8); uint32_t hi32 = __funnelshift_r(b, c, (pos & 3) * 8); uint64_t v = hi32; v <<= 32; v |= lo32; return v; } /** * @brief Read up to 32-bits from a byte stream (big endian) * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @return decoded value * **/ inline __device__ uint32_t bytestream_readbits(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits) { int idx = bitpos >> 5; uint32_t a = __byte_perm(bs->buf.u32[(idx + 0) & BYTESTREAM_BFRMASK32], 0, 0x0123); uint32_t b = __byte_perm(bs->buf.u32[(idx + 1) & BYTESTREAM_BFRMASK32], 0, 0x0123); return __funnelshift_l(b, a, bitpos & 0x1f) >> (32 - numbits); } /** * @brief Read up to 64-bits from a byte stream (big endian) * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @return decoded value * **/ inline __device__ uint64_t bytestream_readbits64(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits) { int idx = bitpos >> 5; uint32_t a = __byte_perm(bs->buf.u32[(idx + 0) & BYTESTREAM_BFRMASK32], 0, 0x0123); uint32_t b = __byte_perm(bs->buf.u32[(idx + 1) & BYTESTREAM_BFRMASK32], 0, 0x0123); uint32_t c = __byte_perm(bs->buf.u32[(idx + 2) & BYTESTREAM_BFRMASK32], 0, 0x0123); uint32_t hi32 = __funnelshift_l(b, a, bitpos & 0x1f); uint32_t lo32 = __funnelshift_l(c, b, bitpos & 0x1f); uint64_t v = hi32; v <<= 32; v |= lo32; v >>= (64 - numbits); return v; } /** * @brief Decode a big-endian unsigned 32-bit value * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @param[out] result decoded value * **/ inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits, uint32_t &result) { result = bytestream_readbits(bs, bitpos, numbits); } /** * @brief Decode a big-endian signed 32-bit value * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @param[out] result decoded value * **/ inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits, int32_t &result) { uint32_t u = bytestream_readbits(bs, bitpos, numbits); result = (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); } /** * @brief Decode a big-endian unsigned 64-bit value * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @param[out] result decoded value * **/ inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits, uint64_t &result) { result = bytestream_readbits64(bs, bitpos, numbits); } /** * @brief Decode a big-endian signed 64-bit value * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @param[out] result decoded value * **/ inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits, int64_t &result) { uint64_t u = bytestream_readbits64(bs, bitpos, numbits); result = (int64_t)((u >> 1u) ^ -(int64_t)(u & 1)); } /** * @brief Return the length of a base-128 varint * * @param[in] bs Byte stream input * @param[in] pos Position in circular byte stream buffer * @return length of varint in bytes **/ template <class T> inline __device__ uint32_t varint_length(volatile orc_bytestream_s *bs, int pos) { if (bytestream_readbyte(bs, pos) > 0x7f) { uint32_t next32 = bytestream_readu32(bs, pos + 1); uint32_t zbit = __ffs((~next32) & 0x80808080); if (sizeof(T) <= 4 || zbit) { return 1 + (zbit >> 3); // up to 5x7 bits } else { next32 = bytestream_readu32(bs, pos + 5); zbit = __ffs((~next32) & 0x80808080); if (zbit) { return 5 + (zbit >> 3); // up to 9x7 bits } else if ((sizeof(T) <= 8) || (bytestream_readbyte(bs, pos + 9) <= 0x7f)) { return 10; // up to 70 bits } else { uint64_t next64 = bytestream_readu64(bs, pos + 10); zbit = __ffsll((~next64) & 0x8080808080808080ull); if (zbit) { return 10 + (zbit >> 3); // Up to 18x7 bits (126) } else { return 19; // Up to 19x7 bits (133) } } } } else { return 1; } } /** * @brief Decodes a base-128 varint * * @param[in] bs Byte stream input * @param[in] pos Position in circular byte stream buffer * @param[in] result Unpacked value * @return new position in byte stream buffer **/ template <class T> inline __device__ int decode_base128_varint(volatile orc_bytestream_s *bs, int pos, T &result) { uint32_t v = bytestream_readbyte(bs, pos++); if (v > 0x7f) { uint32_t b = bytestream_readbyte(bs, pos++); v = (v & 0x7f) | (b << 7); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0x3fff) | (b << 14); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0x1fffff) | (b << 21); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0x0fffffff) | (b << 28); if (sizeof(T) > 4) { uint32_t lo = v; uint64_t hi; v = b >> 4; if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 7) | (b << 3); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0x3ff) | (b << 10); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0x1ffff) | (b << 17); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0xffffff) | (b << 24); if (b > 0x7f) { pos++; // last bit is redundant (extra byte implies bit63 is 1) } } } } } hi = v; hi <<= 32; result = hi | lo; return pos; } } } } } result = v; return pos; } /** * @brief Decodes a signed int128 encoded as base-128 varint (used for decimals) **/ inline __device__ int128_s decode_varint128(volatile orc_bytestream_s *bs, int pos) { uint32_t b = bytestream_readbyte(bs, pos++); int64_t sign_mask = -(int32_t)(b & 1); uint64_t v = (b >> 1) & 0x3f; uint32_t bitpos = 6; uint64_t lo = v; uint64_t hi = 0; while (b > 0x7f && bitpos < 128) { b = bytestream_readbyte(bs, pos++); v |= ((uint64_t)(b & 0x7f)) << (bitpos & 0x3f); if (bitpos == 62) { // 6 + 7 * 8 = 62 lo = v; v = (b & 0x7f) >> 2; // 64 - 62 } bitpos += 7; } if (bitpos >= 64) { hi = v; } else { lo = v; } return {(uint64_t)(lo ^ sign_mask), (int64_t)(hi ^ sign_mask)}; } /** * @brief Decodes an unsigned 32-bit varint **/ inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, uint32_t &result) { uint32_t u; pos = decode_base128_varint<uint32_t>(bs, pos, u); result = u; return pos; } /** * @brief Decodes an unsigned 64-bit varint **/ inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, uint64_t &result) { uint64_t u; pos = decode_base128_varint<uint64_t>(bs, pos, u); result = u; return pos; } /** * @brief Signed version of 32-bit decode_varint **/ inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, int32_t &result) { uint32_t u; pos = decode_base128_varint<uint32_t>(bs, pos, u); result = (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); return pos; } /** * @brief Signed version of 64-bit decode_varint **/ inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, int64_t &result) { uint64_t u; pos = decode_base128_varint<uint64_t>(bs, pos, u); result = (int64_t)((u >> 1u) ^ -(int64_t)(u & 1)); return pos; } /** * @brief In-place conversion from lengths to positions * * @param[in] vals input values * @param[in] numvals number of values * @param[in] t thread id * * @return number of values decoded **/ template <class T> inline __device__ void lengths_to_positions(volatile T *vals, uint32_t numvals, unsigned int t) { for (uint32_t n = 1; n < numvals; n <<= 1) { __syncthreads(); if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)]; } } /** * @brief ORC Integer RLEv1 decoding * * @param[in] bs input byte stream * @param[in] rle RLE state * @param[in] vals buffer for output values (uint32_t, int32_t, uint64_t or int64_t) * @param[in] maxvals maximum number of values to decode * @param[in] t thread id * * @return number of values decoded **/ template <class T> static __device__ uint32_t Integer_RLEv1( orc_bytestream_s *bs, volatile orc_rlev1_state_s *rle, volatile T *vals, uint32_t maxvals, int t) { uint32_t numvals, numruns; if (t == 0) { uint32_t maxpos = min(bs->len, bs->pos + (BYTESTREAM_BFRSZ - 8u)); uint32_t lastpos = bs->pos; numvals = numruns = 0; // Find the length and start location of each run while (numvals < maxvals && numruns < NWARPS * 12) { uint32_t pos = lastpos; uint32_t n = bytestream_readbyte(bs, pos++); if (n <= 0x7f) { // Run int32_t delta; n = n + 3; if (numvals + n > maxvals) break; delta = bytestream_readbyte(bs, pos++); vals[numvals] = pos & 0xffff; pos += varint_length<T>(bs, pos); if (pos > maxpos) break; rle->run_data[numruns++] = (delta << 24) | (n << 16) | numvals; numvals += n; } else { // Literals uint32_t i; n = 0x100 - n; if (numvals + n > maxvals) break; i = 0; do { vals[numvals + i] = pos & 0xffff; pos += varint_length<T>(bs, pos); } while (++i < n); if (pos > maxpos) break; numvals += n; } lastpos = pos; } rle->num_runs = numruns; rle->num_vals = numvals; bytestream_flush_bytes(bs, lastpos - bs->pos); } __syncthreads(); // Expand the runs numruns = rle->num_runs; if (numruns > 0) { int r = t >> 5; int tr = t & 0x1f; for (uint32_t run = r; run < numruns; run += NWARPS) { int32_t run_data = rle->run_data[run]; int n = (run_data >> 16) & 0xff; int delta = run_data >> 24; uint32_t base = run_data & 0x3ff; uint32_t pos = vals[base] & 0xffff; for (int i = 1 + tr; i < n; i += 32) { vals[base + i] = ((delta * i) << 16) | pos; } } __syncthreads(); } numvals = rle->num_vals; // Decode individual 32-bit varints if (t < numvals) { int32_t pos = vals[t]; int32_t delta = pos >> 16; T v; decode_varint(bs, pos, v); vals[t] = v + delta; } __syncthreads(); return numvals; } /** * @brief Maps the RLEv2 5-bit length code to 6-bit length * **/ static const __device__ __constant__ uint8_t kRLEv2_W[32] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 28, 30, 32, 40, 48, 56, 64}; /** * @brief ORC Integer RLEv2 decoding * * @param[in] bs input byte stream * @param[in] rle RLE state * @param[in] vals buffer for output values (uint32_t, int32_t, uint64_t or int64_t) * @param[in] maxvals maximum number of values to decode * @param[in] t thread id * * @return number of values decoded **/ template <class T> static __device__ uint32_t Integer_RLEv2( orc_bytestream_s *bs, volatile orc_rlev2_state_s *rle, volatile T *vals, uint32_t maxvals, int t) { uint32_t numvals, numruns; int r, tr; if (t == 0) { uint32_t maxpos = min(bs->len, bs->pos + (BYTESTREAM_BFRSZ - 8u)); uint32_t lastpos = bs->pos; numvals = numruns = 0; // Find the length and start location of each run while (numvals < maxvals) { uint32_t pos = lastpos; uint32_t byte0 = bytestream_readbyte(bs, pos++); uint32_t n, l; int mode = byte0 >> 6; rle->runs_loc[numruns] = numvals; vals[numvals] = lastpos; if (mode == 0) { // 00lllnnn: short repeat encoding l = 1 + ((byte0 >> 3) & 7); // 1 to 8 bytes n = 3 + (byte0 & 7); // 3 to 10 values } else { l = kRLEv2_W[(byte0 >> 1) & 0x1f]; n = 1 + ((byte0 & 1) << 8) + bytestream_readbyte(bs, pos++); if (mode == 1) { // 01wwwwwn.nnnnnnnn: direct encoding l = (l * n + 7) >> 3; } else if (mode == 2) { // 10wwwwwn.nnnnnnnn.xxxxxxxx.yyyyyyyy: patched base encoding uint32_t byte2 = bytestream_readbyte(bs, pos++); uint32_t byte3 = bytestream_readbyte(bs, pos++); uint32_t bw = 1 + (byte2 >> 5); // base value width, 1 to 8 bytes uint32_t pw = kRLEv2_W[byte2 & 0x1f]; // patch width, 1 to 64 bits uint32_t pgw = 1 + (byte3 >> 5); // patch gap width, 1 to 8 bits uint32_t pll = byte3 & 0x1f; // patch list length l = (l * n + 7) >> 3; l += bw; l += (pll * (pgw + pw) + 7) >> 3; } else { // 11wwwwwn.nnnnnnnn.<base>.<delta>: delta encoding uint32_t deltapos = varint_length<T>(bs, pos); deltapos += varint_length<T>(bs, pos + deltapos); l = (l > 1 && n > 2) ? (l * (n - 2) + 7) >> 3 : 0; l += deltapos; } } if (numvals + n > maxvals) break; pos += l; if (pos > maxpos) break; lastpos = pos; numvals += n; numruns++; } rle->num_vals = numvals; rle->num_runs = numruns; bytestream_flush_bytes(bs, lastpos - bs->pos); } __syncthreads(); // Process the runs, 1 warp per run numruns = rle->num_runs; r = t >> 5; tr = t & 0x1f; for (uint32_t run = r; run < numruns; run += NWARPS) { uint32_t base, pos, w, n; int mode; if (tr == 0) { uint32_t byte0; base = rle->runs_loc[run]; pos = vals[base]; byte0 = bytestream_readbyte(bs, pos++); mode = byte0 >> 6; if (mode == 0) { T baseval; // 00lllnnn: short repeat encoding w = 8 + (byte0 & 0x38); // 8 to 64 bits n = 3 + (byte0 & 7); // 3 to 10 values bytestream_readbe(bs, pos * 8, w, baseval); if (sizeof(T) <= 4) { rle->baseval.u32[r] = baseval; } else { rle->baseval.u64[r] = baseval; } } else { w = kRLEv2_W[(byte0 >> 1) & 0x1f]; n = 1 + ((byte0 & 1) << 8) + bytestream_readbyte(bs, pos++); if (mode > 1) { if (mode == 2) { // Patched base uint32_t byte2 = bytestream_readbyte(bs, pos++); uint32_t byte3 = bytestream_readbyte(bs, pos++); uint32_t bw = 1 + (byte2 >> 5); // base value width, 1 to 8 bytes uint32_t pw = kRLEv2_W[byte2 & 0x1f]; // patch width, 1 to 64 bits if (sizeof(T) <= 4) { uint32_t baseval, mask; bytestream_readbe(bs, pos * 8, bw * 8, baseval); mask = (1 << (bw * 8 - 1)) - 1; rle->baseval.u32[r] = (baseval > mask) ? (-(int32_t)(baseval & mask)) : baseval; } else { uint64_t baseval, mask; bytestream_readbe(bs, pos * 8, bw * 8, baseval); mask = 1; mask <<= (bw * 8) - 1; mask -= 1; rle->baseval.u64[r] = (baseval > mask) ? (-(int64_t)(baseval & mask)) : baseval; } rle->m2_pw_byte3[r] = (pw << 8) | byte3; pos += bw; } else { T baseval; int64_t delta; // Delta pos = decode_varint(bs, pos, baseval); if (sizeof(T) <= 4) { rle->baseval.u32[r] = baseval; } else { rle->baseval.u64[r] = baseval; } pos = decode_varint(bs, pos, delta); rle->delta[r] = delta; } } } } base = SHFL0(base); mode = SHFL0(mode); pos = SHFL0(pos); n = SHFL0(n); w = SHFL0(w); for (uint32_t i = tr; i < n; i += 32) { if (sizeof(T) <= 4) { if (mode == 0) { vals[base + i] = rle->baseval.u32[r]; } else if (mode == 1) { T v; bytestream_readbe(bs, pos * 8 + i * w, w, v); vals[base + i] = v; } else if (mode == 2) { uint32_t ofs = bytestream_readbits(bs, pos * 8 + i * w, w); vals[base + i] = rle->baseval.u32[r] + ofs; } else { int64_t delta = rle->delta[r]; if (w > 1 && i > 1) { int32_t delta_s = (delta < 0) ? -1 : 0; vals[base + i] = (bytestream_readbits(bs, pos * 8 + (i - 2) * w, w) ^ delta_s) - delta_s; } else { vals[base + i] = (i == 0) ? 0 : static_cast<uint32_t>(delta); } } } else { if (mode == 0) { vals[base + i] = rle->baseval.u64[r]; } else if (mode == 1) { T v; bytestream_readbe(bs, pos * 8 + i * w, w, v); vals[base + i] = v; } else if (mode == 2) { uint32_t ofs = bytestream_readbits64(bs, pos * 8 + i * w, w); vals[base + i] = rle->baseval.u64[r] + ofs; } else { int64_t delta = rle->delta[r], ofs; if (w > 1 && i > 1) { int64_t delta_s = (delta < 0) ? -1 : 0; ofs = (bytestream_readbits64(bs, pos * 8 + (i - 2) * w, w) ^ delta_s) - delta_s; } else { ofs = (i == 0) ? 0 : delta; } vals[base + i] = ofs; } } } SYNCWARP(); // Patch values if (mode == 2) { uint32_t pw_byte3 = rle->m2_pw_byte3[r]; uint32_t pw = pw_byte3 >> 8; uint32_t pgw = 1 + ((pw_byte3 >> 5) & 7); // patch gap width, 1 to 8 bits uint32_t pll = pw_byte3 & 0x1f; // patch list length if (pll != 0) { uint64_t patch_pos64 = (tr < pll) ? bytestream_readbits64(bs, pos * 8 + ((n * w + 7) & ~7) + tr * (pgw + pw), pgw + pw) : 0; uint32_t patch_pos; T patch = 1; patch <<= pw; patch = (patch - 1) & (T)patch_pos64; patch <<= w; patch_pos = (uint32_t)(patch_pos64 >> pw); for (uint32_t k = 1; k < pll; k <<= 1) { uint32_t tmp = SHFL(patch_pos, (tr & ~k) | (k - 1)); patch_pos += (tr & k) ? tmp : 0; } if (tr < pll && patch_pos < n) { vals[base + patch_pos] += patch; } } } SYNCWARP(); if (mode == 3) { T baseval; for (uint32_t i = 1; i < n; i <<= 1) { SYNCWARP(); for (uint32_t j = tr; j < n; j += 32) { if (j & i) vals[base + j] += vals[base + ((j & ~i) | (i - 1))]; } } if (sizeof(T) <= 4) baseval = rle->baseval.u32[r]; else baseval = rle->baseval.u64[r]; for (uint32_t j = tr; j < n; j += 32) { vals[base + j] += baseval; } } } __syncthreads(); return rle->num_vals; } /** * @brief Reads 32 booleans as a packed 32-bit value * * @param[in] vals 32-bit array of values (little-endian) * @param[in] bitpos bit position * * @return 32-bit value **/ inline __device__ uint32_t rle8_read_bool32(volatile uint32_t *vals, uint32_t bitpos) { uint32_t a = vals[(bitpos >> 5) + 0]; uint32_t b = vals[(bitpos >> 5) + 1]; a = __byte_perm(a, 0, 0x0123); b = __byte_perm(b, 0, 0x0123); return __brev(__funnelshift_l(b, a, bitpos)); } /** * @brief ORC Byte RLE decoding * * @param[in] bs Input byte stream * @param[in] rle RLE state * @param[in] vals output buffer for decoded 8-bit values * @param[in] maxvals Maximum number of values to decode * @param[in] t thread id * * @return number of values decoded **/ static __device__ uint32_t Byte_RLE(orc_bytestream_s *bs, volatile orc_byterle_state_s *rle, volatile uint8_t *vals, uint32_t maxvals, int t) { uint32_t numvals, numruns; int r, tr; if (t == 0) { uint32_t maxpos = min(bs->len, bs->pos + (BYTESTREAM_BFRSZ - 8u)); uint32_t lastpos = bs->pos; numvals = numruns = 0; // Find the length and start location of each run while (numvals < maxvals && numruns < NWARPS) { uint32_t pos = lastpos, n; rle->runs_pos[numruns] = pos; rle->runs_loc[numruns] = numvals; n = bytestream_readbyte(bs, pos++); if (n <= 0x7f) { // Run n = n + 3; pos++; } else { // Literals n = 0x100 - n; pos += n; } if (pos > maxpos || numvals + n > maxvals) { break; } numruns++; numvals += n; lastpos = pos; } rle->num_runs = numruns; rle->num_vals = numvals; bytestream_flush_bytes(bs, lastpos - bs->pos); } __syncthreads(); numruns = rle->num_runs; r = t >> 5; tr = t & 0x1f; for (int run = r; run < numruns; run += NWARPS) { uint32_t pos = rle->runs_pos[run]; uint32_t loc = rle->runs_loc[run]; uint32_t n = bytestream_readbyte(bs, pos++); uint32_t literal_mask; if (n <= 0x7f) { literal_mask = 0; n += 3; } else { literal_mask = ~0; n = 0x100 - n; } for (uint32_t i = tr; i < n; i += 32) { vals[loc + i] = bytestream_readbyte(bs, pos + (i & literal_mask)); } } __syncthreads(); return rle->num_vals; } /** * @brief Powers of 10 * **/ static const __device__ __constant__ double kPow10[40] = { 1.0, 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7, 1.e8, 1.e9, 1.e10, 1.e11, 1.e12, 1.e13, 1.e14, 1.e15, 1.e16, 1.e17, 1.e18, 1.e19, 1.e20, 1.e21, 1.e22, 1.e23, 1.e24, 1.e25, 1.e26, 1.e27, 1.e28, 1.e29, 1.e30, 1.e31, 1.e32, 1.e33, 1.e34, 1.e35, 1.e36, 1.e37, 1.e38, 1.e39, }; static const __device__ __constant__ int64_t kPow5i[28] = {1, 5, 25, 125, 625, 3125, 15625, 78125, 390625, 1953125, 9765625, 48828125, 244140625, 1220703125, 6103515625ll, 30517578125ll, 152587890625ll, 762939453125ll, 3814697265625ll, 19073486328125ll, 95367431640625ll, 476837158203125ll, 2384185791015625ll, 11920928955078125ll, 59604644775390625ll, 298023223876953125ll, 1490116119384765625ll, 7450580596923828125ll}; /** * @brief ORC Decimal decoding (unbounded base-128 varints) * * @param[in] bs Input byte stream * @param[in,out] vals on input: scale from secondary stream, on output: value * @param[in] numvals Number of values to decode * @param[in] t thread id * * @return number of values decoded * **/ static __device__ int Decode_Decimals(orc_bytestream_s *bs, volatile orc_byterle_state_s *scratch, volatile int64_t *vals, int val_scale, int numvals, int col_scale, int t) { if (t == 0) { uint32_t maxpos = min(bs->len, bs->pos + (BYTESTREAM_BFRSZ - 8u)); uint32_t lastpos = bs->pos; uint32_t n; for (n = 0; n < numvals; n++) { uint32_t pos = lastpos; *(volatile int32_t *)&vals[n] = lastpos; pos += varint_length<uint4>(bs, pos); if (pos > maxpos) break; lastpos = pos; } scratch->num_vals = n; bytestream_flush_bytes(bs, lastpos - bs->pos); } __syncthreads(); numvals = scratch->num_vals; if (t < numvals) { int pos = *(volatile int32_t *)&vals[t]; int128_s v = decode_varint128(bs, pos); if (col_scale & ORC_DECIMAL2FLOAT64_SCALE) { double f = Int128ToDouble_rn(v.lo, v.hi); int32_t scale = (t < numvals) ? val_scale : 0; if (scale >= 0) reinterpret_cast<volatile double *>(vals)[t] = f / kPow10[min(scale, 39)]; else reinterpret_cast<volatile double *>(vals)[t] = f * kPow10[min(-scale, 39)]; } else { int32_t scale = (t < numvals) ? (col_scale & ~ORC_DECIMAL2FLOAT64_SCALE) - val_scale : 0; if (scale >= 0) { scale = min(scale, 27); vals[t] = ((int64_t)v.lo * kPow5i[scale]) << scale; } else // if (scale < 0) { bool is_negative = (v.hi < 0); uint64_t hi = v.hi, lo = v.lo; scale = min(-scale, 27); if (is_negative) { hi = (~hi) + (lo == 0); lo = (~lo) + 1; } lo = (lo >> (uint32_t)scale) | ((uint64_t)hi << (64 - scale)); hi >>= (int32_t)scale; if (hi != 0) { // Use intermediate float lo = __double2ull_rn(Int128ToDouble_rn(lo, hi) / __ll2double_rn(kPow5i[scale])); hi = 0; } else { lo /= kPow5i[scale]; } vals[t] = (is_negative) ? -(int64_t)lo : (int64_t)lo; } } } return numvals; } /** * @brief Decoding NULLs and builds string dictionary index tables * * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] global_dictionary Global dictionary device array * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] max_num_rows Maximum number of rows to load * @param[in] first_row Crop all rows below first_row * **/ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuDecodeNullsAndStringDictionaries(ColumnDesc *chunks, DictionaryEntry *global_dictionary, uint32_t num_columns, uint32_t num_stripes, size_t max_num_rows, size_t first_row) { __shared__ __align__(16) orcdec_state_s state_g; orcdec_state_s *const s = &state_g; bool is_nulldec = (blockIdx.y >= num_stripes); uint32_t column = blockIdx.x; uint32_t stripe = (is_nulldec) ? blockIdx.y - num_stripes : blockIdx.y; uint32_t chunk_id = stripe * num_columns + column; int t = threadIdx.x; if (t < sizeof(ColumnDesc) / sizeof(uint32_t)) { ((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[chunk_id])[t]; } __syncthreads(); if (is_nulldec) { uint32_t null_count = 0; // Decode NULLs if (t == 0) { s->chunk.skip_count = 0; s->top.nulls.row = 0; bytestream_init(&s->bs, s->chunk.streams[CI_PRESENT], s->chunk.strm_len[CI_PRESENT]); } __syncthreads(); if (s->chunk.strm_len[CI_PRESENT] == 0) { // No present stream: all rows are valid s->vals.u32[t] = ~0; } while (s->top.nulls.row < s->chunk.num_rows) { uint32_t nrows_max = min(s->chunk.num_rows - s->top.nulls.row, NTHREADS * 32); uint32_t nrows; size_t row_in; bytestream_fill(&s->bs, t); __syncthreads(); if (s->chunk.strm_len[CI_PRESENT] > 0) { uint32_t nbytes = Byte_RLE(&s->bs, &s->u.rle8, s->vals.u8, (nrows_max + 7) >> 3, t); nrows = min(nrows_max, nbytes * 8u); if (!nrows) { // Error: mark all remaining rows as null nrows = nrows_max; if (t * 32 < nrows) { s->vals.u32[t] = 0; } } } else { nrows = nrows_max; } __syncthreads(); row_in = s->chunk.start_row + s->top.nulls.row; if (row_in + nrows > first_row && row_in < first_row + max_num_rows && s->chunk.valid_map_base != NULL) { int64_t dst_row = row_in - first_row; int64_t dst_pos = max(dst_row, (int64_t)0); uint32_t startbit = -static_cast<int32_t>(min(dst_row, (int64_t)0)); uint32_t nbits = nrows - min(startbit, nrows); uint32_t *valid = s->chunk.valid_map_base + (dst_pos >> 5); uint32_t bitpos = static_cast<uint32_t>(dst_pos) & 0x1f; if ((size_t)(dst_pos + nbits) > max_num_rows) { nbits = static_cast<uint32_t>(max_num_rows - min((size_t)dst_pos, max_num_rows)); } // Store bits up to the next 32-bit aligned boundary if (bitpos != 0) { uint32_t n = min(32u - bitpos, nbits); if (t == 0) { uint32_t mask = ((1 << n) - 1) << bitpos; uint32_t bits = (rle8_read_bool32(s->vals.u32, startbit) << bitpos) & mask; atomicAnd(valid, ~mask); atomicOr(valid, bits); null_count += __popc((~bits) & mask); } nbits -= n; startbit += n; valid++; } // Store bits aligned if (t * 32 + 32 <= nbits) { uint32_t bits = rle8_read_bool32(s->vals.u32, startbit + t * 32); valid[t] = bits; null_count += __popc(~bits); } else if (t * 32 < nbits) { uint32_t n = nbits - t * 32; uint32_t mask = (1 << n) - 1; uint32_t bits = rle8_read_bool32(s->vals.u32, startbit + t * 32) & mask; atomicAnd(valid + t, ~mask); atomicOr(valid + t, bits); null_count += __popc((~bits) & mask); } __syncthreads(); } // We may have some valid values that are not decoded below first_row -> count these in // skip_count, so that subsequent kernel can infer the correct row position if (row_in < first_row && t < 32) { uint32_t skippedrows = min(static_cast<uint32_t>(first_row - row_in), nrows); uint32_t skip_count = 0; for (uint32_t i = t * 32; i < skippedrows; i += 32 * 32) { uint32_t bits = s->vals.u32[i >> 5]; if (i + 32 > skippedrows) { bits &= (1 << (skippedrows - i)) - 1; } skip_count += __popc(bits); } skip_count += SHFL_XOR(skip_count, 1); skip_count += SHFL_XOR(skip_count, 2); skip_count += SHFL_XOR(skip_count, 4); skip_count += SHFL_XOR(skip_count, 8); skip_count += SHFL_XOR(skip_count, 16); if (t == 0) { s->chunk.skip_count += skip_count; } } __syncthreads(); if (t == 0) { s->top.nulls.row += nrows; } __syncthreads(); } __syncthreads(); // Sum up the valid counts and infer null_count null_count += SHFL_XOR(null_count, 1); null_count += SHFL_XOR(null_count, 2); null_count += SHFL_XOR(null_count, 4); null_count += SHFL_XOR(null_count, 8); null_count += SHFL_XOR(null_count, 16); if (!(t & 0x1f)) { s->top.nulls.null_count[t >> 5] = null_count; } __syncthreads(); if (t < 32) { null_count = (t < NWARPS) ? s->top.nulls.null_count[t] : 0; null_count += SHFL_XOR(null_count, 1); null_count += SHFL_XOR(null_count, 2); null_count += SHFL_XOR(null_count, 4); null_count += SHFL_XOR(null_count, 8); null_count += SHFL_XOR(null_count, 16); if (t == 0) { chunks[chunk_id].null_count = null_count; chunks[chunk_id].skip_count = s->chunk.skip_count; } } } else { // Decode string dictionary int encoding_kind = s->chunk.encoding_kind; if ((encoding_kind == DICTIONARY || encoding_kind == DICTIONARY_V2) && (s->chunk.dict_len > 0)) { if (t == 0) { s->top.dict.dict_len = s->chunk.dict_len; s->top.dict.local_dict = (uint2 *)(global_dictionary + s->chunk.dictionary_start); // Local dictionary s->top.dict.dict_pos = 0; // CI_DATA2 contains the LENGTH stream coding the length of individual dictionary entries bytestream_init(&s->bs, s->chunk.streams[CI_DATA2], s->chunk.strm_len[CI_DATA2]); } __syncthreads(); while (s->top.dict.dict_len > 0) { uint32_t numvals = min(s->top.dict.dict_len, NTHREADS), len; volatile uint32_t *vals = s->vals.u32; bytestream_fill(&s->bs, t); __syncthreads(); if (IS_RLEv1(s->chunk.encoding_kind)) { numvals = Integer_RLEv1(&s->bs, &s->u.rlev1, vals, numvals, t); } else // RLEv2 { numvals = Integer_RLEv2(&s->bs, &s->u.rlev2, vals, numvals, t); } __syncthreads(); len = (t < numvals) ? vals[t] : 0; lengths_to_positions(vals, numvals, t); __syncthreads(); if (numvals == 0) { // This is an error (ran out of data) numvals = min(s->top.dict.dict_len, NTHREADS); vals[t] = 0; } if (t < numvals) { uint2 dict_entry; dict_entry.x = s->top.dict.dict_pos + vals[t] - len; dict_entry.y = len; s->top.dict.local_dict[t] = dict_entry; } __syncthreads(); if (t == 0) { s->top.dict.dict_pos += vals[numvals - 1]; s->top.dict.dict_len -= numvals; s->top.dict.local_dict += numvals; } __syncthreads(); } } } } /** * @brief Decode row positions from valid bits * * @param[in,out] s Column chunk decoder state * @param[in] first_row crop all rows below first rows * @param[in] t thread id * **/ static __device__ void DecodeRowPositions(orcdec_state_s *s, size_t first_row, int t) { if (t == 0) { if (s->chunk.skip_count != 0) { s->u.rowdec.nz_count = min(min(s->chunk.skip_count, s->top.data.max_vals), NTHREADS); s->chunk.skip_count -= s->u.rowdec.nz_count; s->top.data.nrows = s->u.rowdec.nz_count; } else { s->u.rowdec.nz_count = 0; } } __syncthreads(); if (t < s->u.rowdec.nz_count) { s->u.rowdec.row[t] = 0; // Skipped values (below first_row) } while (s->u.rowdec.nz_count < s->top.data.max_vals && s->top.data.cur_row + s->top.data.nrows < s->top.data.end_row) { uint32_t nrows = min(s->top.data.end_row - s->top.data.cur_row, min((ROWDEC_BFRSZ - s->u.rowdec.nz_count) * 2, NTHREADS)); if (s->chunk.strm_len[CI_PRESENT] > 0) { // We have a present stream uint32_t rmax = s->top.data.end_row - min((uint32_t)first_row, s->top.data.end_row); uint32_t r = (uint32_t)(s->top.data.cur_row + s->top.data.nrows + t - first_row); uint32_t valid = (t < nrows && r < rmax) ? (((const uint8_t *)s->chunk.valid_map_base)[r >> 3] >> (r & 7)) & 1 : 0; volatile uint16_t *row_ofs_plus1 = (volatile uint16_t *)&s->u.rowdec.row[s->u.rowdec.nz_count]; uint32_t nz_pos, row_plus1, nz_count = s->u.rowdec.nz_count, last_row; if (t < nrows) { row_ofs_plus1[t] = valid; } lengths_to_positions<uint16_t>(row_ofs_plus1, nrows, t); if (t < nrows) { nz_count += row_ofs_plus1[t]; row_plus1 = s->top.data.nrows + t + 1; } else { row_plus1 = 0; } if (t == nrows - 1) { s->u.rowdec.nz_count = min(nz_count, s->top.data.max_vals); } __syncthreads(); // TBD: Brute-forcing this, there might be a more efficient way to find the thread with the // last row last_row = (nz_count == s->u.rowdec.nz_count) ? row_plus1 : 0; last_row = max(last_row, SHFL_XOR(last_row, 1)); last_row = max(last_row, SHFL_XOR(last_row, 2)); last_row = max(last_row, SHFL_XOR(last_row, 4)); last_row = max(last_row, SHFL_XOR(last_row, 8)); last_row = max(last_row, SHFL_XOR(last_row, 16)); if (!(t & 0x1f)) { *(volatile uint32_t *)&s->u.rowdec.last_row[t >> 5] = last_row; } nz_pos = (valid) ? nz_count : 0; __syncthreads(); if (t < 32) { last_row = (t < NWARPS) ? *(volatile uint32_t *)&s->u.rowdec.last_row[t] : 0; last_row = max(last_row, SHFL_XOR(last_row, 1)); last_row = max(last_row, SHFL_XOR(last_row, 2)); last_row = max(last_row, SHFL_XOR(last_row, 4)); last_row = max(last_row, SHFL_XOR(last_row, 8)); last_row = max(last_row, SHFL_XOR(last_row, 16)); if (t == 0) { s->top.data.nrows = last_row; } } if (valid && nz_pos - 1 < s->u.rowdec.nz_count) { s->u.rowdec.row[nz_pos - 1] = row_plus1; } __syncthreads(); } else { // All values are valid nrows = min(nrows, s->top.data.max_vals - s->u.rowdec.nz_count); if (t < nrows) { s->u.rowdec.row[s->u.rowdec.nz_count + t] = s->top.data.nrows + t + 1; } __syncthreads(); if (t == 0) { s->top.data.nrows += nrows; s->u.rowdec.nz_count += nrows; } __syncthreads(); } } } /** * @brief Convert seconds from writer timezone to UTC * * @param[in] s Orc data decoder state * @param[in] table Timezone translation table * @param[in] ts Local time in seconds * * @return UTC time in seconds * **/ static __device__ int64_t ConvertToUTC(const orc_datadec_state_s *s, const int64_t *table, int64_t ts) { uint32_t num_entries = s->tz_num_entries; uint32_t dst_cycle = s->tz_dst_cycle; int64_t first_transition = s->first_tz_transition; int64_t last_transition = s->last_tz_transition; int64_t tsbase; uint32_t first, last; if (ts <= first_transition) { return ts + table[0 * 2 + 2]; } else if (ts <= last_transition) { first = 0; last = num_entries - 1; tsbase = ts; } else if (!dst_cycle) { return ts + table[(num_entries - 1) * 2 + 2]; } else { // Apply 400-year cycle rule const int64_t k400Years = (365 * 400 + (100 - 3)) * 24 * 60 * 60ll; tsbase = ts; ts %= k400Years; if (ts < 0) { ts += k400Years; } first = num_entries; last = num_entries + dst_cycle - 1; if (ts < table[num_entries * 2 + 1]) { return tsbase + table[last * 2 + 2]; } } // Binary search the table from first to last for ts do { uint32_t mid = first + ((last - first + 1) >> 1); int64_t tmid = table[mid * 2 + 1]; if (tmid <= ts) { first = mid; } else { if (mid == last) { break; } last = mid; } } while (first < last); return tsbase + table[first * 2 + 2]; } /** * @brief Trailing zeroes for decoding timestamp nanoseconds * **/ static const __device__ __constant__ uint32_t kTimestampNanoScale[8] = { 1, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000}; /** * @brief Decodes column data * * @param[in] chunks ColumnDesc device array * @param[in] global_dictionary Global dictionary device array * @param[in] tz_table Timezone translation table * @param[in] row_groups Optional row index data * @param[in] max_num_rows Maximum number of rows to load * @param[in] first_row Crop all rows below first_row * @param[in] num_chunks Number of column chunks (num_columns * num_stripes) * @param[in] tz_len Length of timezone translation table (number of pairs) * @param[in] num_rowgroups Number of row groups in row index data * @param[in] rowidx_stride Row index stride * **/ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuDecodeOrcColumnData(ColumnDesc *chunks, DictionaryEntry *global_dictionary, int64_t *tz_table, const RowGroup *row_groups, size_t max_num_rows, size_t first_row, uint32_t num_columns, uint32_t tz_len, uint32_t num_rowgroups, uint32_t rowidx_stride) { __shared__ __align__(16) orcdec_state_s state_g; orcdec_state_s *const s = &state_g; uint32_t chunk_id; int t = threadIdx.x; if (num_rowgroups > 0) { if (t < sizeof(RowGroup) / sizeof(uint32_t)) { ((volatile uint32_t *)&s->top.data.index)[t] = ((const uint32_t *)&row_groups[blockIdx.y * num_columns + blockIdx.x])[t]; } __syncthreads(); chunk_id = s->top.data.index.chunk_id; } else { chunk_id = blockIdx.x; } if (t < sizeof(ColumnDesc) / sizeof(uint32_t)) { ((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[chunk_id])[t]; } __syncthreads(); if (t == 0) { // If we have an index, seek to the initial run and update row positions if (num_rowgroups > 0) { uint32_t ofs0 = min(s->top.data.index.strm_offset[0], s->chunk.strm_len[CI_DATA]); uint32_t ofs1 = min(s->top.data.index.strm_offset[1], s->chunk.strm_len[CI_DATA2]); uint32_t rowgroup_rowofs; s->chunk.streams[CI_DATA] += ofs0; s->chunk.strm_len[CI_DATA] -= ofs0; s->chunk.streams[CI_DATA2] += ofs1; s->chunk.strm_len[CI_DATA2] -= ofs1; rowgroup_rowofs = min((blockIdx.y - min(s->chunk.rowgroup_id, blockIdx.y)) * rowidx_stride, s->chunk.num_rows); s->chunk.start_row += rowgroup_rowofs; s->chunk.num_rows -= rowgroup_rowofs; } s->is_string = (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR); s->top.data.cur_row = max(s->chunk.start_row, max((int32_t)(first_row - s->chunk.skip_count), 0)); s->top.data.end_row = s->chunk.start_row + s->chunk.num_rows; s->top.data.buffered_count = 0; if (s->top.data.end_row > first_row + max_num_rows) { s->top.data.end_row = static_cast<uint32_t>(first_row + max_num_rows); } if (num_rowgroups > 0) { s->top.data.end_row = min(s->top.data.end_row, s->chunk.start_row + rowidx_stride); } if (!IS_DICTIONARY(s->chunk.encoding_kind)) { s->chunk.dictionary_start = 0; } if (tz_len > 0) { if (tz_len > 800) // 2 entries/year for 400 years { s->top.data.tz_num_entries = tz_len - 800; s->top.data.tz_dst_cycle = 800; } else { s->top.data.tz_num_entries = tz_len; s->top.data.tz_dst_cycle = 0; } s->top.data.utc_epoch = kORCTimeToUTC - tz_table[0]; if (tz_len > 0) { s->top.data.first_tz_transition = tz_table[1]; s->top.data.last_tz_transition = tz_table[(s->top.data.tz_num_entries - 1) * 2 + 1]; } } else { s->top.data.utc_epoch = kORCTimeToUTC; } bytestream_init(&s->bs, s->chunk.streams[CI_DATA], s->chunk.strm_len[CI_DATA]); bytestream_init(&s->bs2, s->chunk.streams[CI_DATA2], s->chunk.strm_len[CI_DATA2]); } __syncthreads(); while (s->top.data.cur_row < s->top.data.end_row) { bytestream_fill(&s->bs, t); bytestream_fill(&s->bs2, t); __syncthreads(); if (t == 0) { uint32_t max_vals = s->chunk.start_row + s->chunk.num_rows - s->top.data.cur_row; if (num_rowgroups > 0 && (s->is_string || s->chunk.type_kind == TIMESTAMP)) { max_vals += s->top.data.index.run_pos[IS_DICTIONARY(s->chunk.encoding_kind) ? CI_DATA : CI_DATA2]; } s->bs.fill_count = 0; s->bs2.fill_count = 0; s->top.data.nrows = 0; s->top.data.max_vals = min(max_vals, (s->chunk.type_kind == BOOLEAN) ? NTHREADS * 2 : NTHREADS); } __syncthreads(); // Decode data streams { uint32_t numvals = s->top.data.max_vals, secondary_val; uint32_t vals_skipped = 0; if (s->is_string || s->chunk.type_kind == TIMESTAMP) { // For these data types, we have a secondary unsigned 32-bit data stream orc_bytestream_s *bs = (IS_DICTIONARY(s->chunk.encoding_kind)) ? &s->bs : &s->bs2; uint32_t ofs = 0; if (s->chunk.type_kind == TIMESTAMP) { // Restore buffered secondary stream values, if any ofs = s->top.data.buffered_count; if (ofs > 0) { __syncthreads(); if (t == 0) { s->top.data.buffered_count = 0; } } } if (numvals > ofs) { if (IS_RLEv1(s->chunk.encoding_kind)) { numvals = ofs + Integer_RLEv1(bs, &s->u.rlev1, &s->vals.u32[ofs], numvals - ofs, t); } else { numvals = ofs + Integer_RLEv2(bs, &s->u.rlev2, &s->vals.u32[ofs], numvals - ofs, t); } __syncthreads(); if (numvals <= ofs && t >= ofs && t < s->top.data.max_vals) { s->vals.u32[t] = 0; } } // If we're using an index, we may have to drop values from the initial run if (num_rowgroups > 0) { int cid = IS_DICTIONARY(s->chunk.encoding_kind) ? CI_DATA : CI_DATA2; uint32_t run_pos = s->top.data.index.run_pos[cid]; if (run_pos) { vals_skipped = min(numvals, run_pos); __syncthreads(); if (t == 0) { s->top.data.index.run_pos[cid] = 0; } numvals -= vals_skipped; if (t < numvals) { secondary_val = s->vals.u32[vals_skipped + t]; } __syncthreads(); if (t < numvals) { s->vals.u32[t] = secondary_val; } } } __syncthreads(); // For strings with direct encoding, we need to convert the lengths into an offset if (!IS_DICTIONARY(s->chunk.encoding_kind)) { secondary_val = (t < numvals) ? s->vals.u32[t] : 0; if (s->chunk.type_kind != TIMESTAMP) { lengths_to_positions(s->vals.u32, numvals, t); __syncthreads(); } } // Adjust the maximum number of values if (numvals == 0 && vals_skipped == 0) { numvals = s->top.data.max_vals; // Just so that we don't hang if the stream is corrupted } if (t == 0 && numvals < s->top.data.max_vals) { s->top.data.max_vals = numvals; } } __syncthreads(); // Account for skipped values if (num_rowgroups > 0 && !s->is_string) { uint32_t run_pos = s->top.data.index.run_pos[CI_DATA] << ((s->chunk.type_kind == BOOLEAN) ? 3 : 0); numvals = min(numvals + run_pos, (s->chunk.type_kind == BOOLEAN) ? NTHREADS * 2 : NTHREADS); } // Decode the primary data stream if (s->chunk.type_kind == INT || s->chunk.type_kind == DATE || s->chunk.type_kind == SHORT) { // Signed int32 primary data stream if (IS_RLEv1(s->chunk.encoding_kind)) { numvals = Integer_RLEv1(&s->bs, &s->u.rlev1, s->vals.i32, numvals, t); } else { numvals = Integer_RLEv2(&s->bs, &s->u.rlev2, s->vals.i32, numvals, t); } __syncthreads(); } else if (s->chunk.type_kind == BYTE) { numvals = Byte_RLE(&s->bs, &s->u.rle8, s->vals.u8, numvals, t); __syncthreads(); } else if (s->chunk.type_kind == BOOLEAN) { int n = ((numvals + 7) >> 3); if (n > s->top.data.buffered_count) { numvals = Byte_RLE(&s->bs, &s->u.rle8, &s->vals.u8[s->top.data.buffered_count], n - s->top.data.buffered_count, t) + s->top.data.buffered_count; } else { numvals = s->top.data.buffered_count; } __syncthreads(); if (t == 0) { s->top.data.buffered_count = 0; s->top.data.max_vals = min(s->top.data.max_vals, NTHREADS); } __syncthreads(); n = numvals - ((s->top.data.max_vals + 7) >> 3); if (t < n) { secondary_val = s->vals.u8[((s->top.data.max_vals + 7) >> 3) + t]; if (t == 0) { s->top.data.buffered_count = n; } } numvals = min(numvals << 3u, s->top.data.max_vals); } else if (s->chunk.type_kind == LONG || s->chunk.type_kind == TIMESTAMP || s->chunk.type_kind == DECIMAL) { orc_bytestream_s *bs = (s->chunk.type_kind == DECIMAL) ? &s->bs2 : &s->bs; if (IS_RLEv1(s->chunk.encoding_kind)) { numvals = Integer_RLEv1<int64_t>(bs, &s->u.rlev1, s->vals.i64, numvals, t); } else { numvals = Integer_RLEv2<int64_t>(bs, &s->u.rlev2, s->vals.i64, numvals, t); } if (s->chunk.type_kind == DECIMAL) { // If we're using an index, we may have to drop values from the initial run uint32_t skip = 0; int val_scale; if (num_rowgroups > 0) { uint32_t run_pos = s->top.data.index.run_pos[CI_DATA2]; if (run_pos) { skip = min(numvals, run_pos); __syncthreads(); if (t == 0) { s->top.data.index.run_pos[CI_DATA2] = 0; } numvals -= skip; } } val_scale = (t < numvals) ? (int)s->vals.i64[skip + t] : 0; __syncthreads(); numvals = Decode_Decimals( &s->bs, &s->u.rle8, s->vals.i64, val_scale, numvals, s->chunk.decimal_scale, t); } __syncthreads(); } else if (s->chunk.type_kind == FLOAT) { numvals = min(numvals, (BYTESTREAM_BFRSZ - 8u) >> 2); if (t < numvals) { s->vals.u32[t] = bytestream_readu32(&s->bs, s->bs.pos + t * 4); } __syncthreads(); if (t == 0) { bytestream_flush_bytes(&s->bs, numvals * 4); } __syncthreads(); } else if (s->chunk.type_kind == DOUBLE) { numvals = min(numvals, (BYTESTREAM_BFRSZ - 8u) >> 3); if (t < numvals) { s->vals.u64[t] = bytestream_readu64(&s->bs, s->bs.pos + t * 8); } __syncthreads(); if (t == 0) { bytestream_flush_bytes(&s->bs, numvals * 8); } __syncthreads(); } __syncthreads(); if (numvals == 0 && vals_skipped != 0 && num_rowgroups > 0) { // Special case if the secondary streams produced fewer values than the primary stream's RLE // run, as a result of initial RLE run offset: keep vals_skipped as non-zero to ensure // proper buffered_count/max_vals update below. } else { vals_skipped = 0; if (num_rowgroups > 0) { uint32_t run_pos = s->top.data.index.run_pos[CI_DATA] << ((s->chunk.type_kind == BOOLEAN) ? 3 : 0); if (run_pos) { vals_skipped = min(numvals, run_pos); numvals -= vals_skipped; __syncthreads(); if (t == 0) { s->top.data.index.run_pos[CI_DATA] = 0; } } } } if (t == 0 && numvals + vals_skipped > 0 && numvals < s->top.data.max_vals) { if (s->chunk.type_kind == TIMESTAMP) { s->top.data.buffered_count = s->top.data.max_vals - numvals; } s->top.data.max_vals = numvals; } __syncthreads(); // Use the valid bits to compute non-null row positions until we get a full batch of values to // decode DecodeRowPositions(s, first_row, t); if (!s->top.data.nrows && !s->u.rowdec.nz_count && !vals_skipped) { // This is a bug (could happen with bitstream errors with a bad run that would produce more // values than the number of remaining rows) return; } // Store decoded values to output if (t < min(min(s->top.data.max_vals, s->u.rowdec.nz_count), s->top.data.nrows) && s->u.rowdec.row[t] != 0 && s->top.data.cur_row + s->u.rowdec.row[t] - 1 < s->top.data.end_row) { size_t row = s->top.data.cur_row + s->u.rowdec.row[t] - 1 - first_row; if (row < max_num_rows) { void *data_out = s->chunk.column_data_base; switch (s->chunk.type_kind) { case FLOAT: case INT: reinterpret_cast<uint32_t *>(data_out)[row] = s->vals.u32[t + vals_skipped]; break; case DOUBLE: case LONG: case DECIMAL: reinterpret_cast<uint64_t *>(data_out)[row] = s->vals.u64[t + vals_skipped]; break; case SHORT: reinterpret_cast<uint16_t *>(data_out)[row] = static_cast<uint16_t>(s->vals.u32[t + vals_skipped]); break; case BYTE: reinterpret_cast<uint8_t *>(data_out)[row] = s->vals.u8[t + vals_skipped]; break; case BOOLEAN: reinterpret_cast<uint8_t *>(data_out)[row] = (s->vals.u8[(t + vals_skipped) >> 3] >> ((~t) & 7)) & 1; break; case DATE: if (s->chunk.dtype_len == 8) { // Convert from days to milliseconds by multiplying by 24*3600*1000 reinterpret_cast<int64_t *>(data_out)[row] = 86400000ll * (int64_t)s->vals.i32[t + vals_skipped]; } else { reinterpret_cast<uint32_t *>(data_out)[row] = s->vals.u32[t + vals_skipped]; } break; case STRING: case BINARY: case VARCHAR: case CHAR: { nvstrdesc_s *strdesc = &reinterpret_cast<nvstrdesc_s *>(data_out)[row]; const uint8_t *ptr; uint32_t count; if (IS_DICTIONARY(s->chunk.encoding_kind)) { uint32_t dict_idx = s->vals.u32[t + vals_skipped]; ptr = s->chunk.streams[CI_DICTIONARY]; if (dict_idx < s->chunk.dict_len) { ptr += global_dictionary[s->chunk.dictionary_start + dict_idx].pos; count = global_dictionary[s->chunk.dictionary_start + dict_idx].len; } else { count = 0; // ptr = (uint8_t *)0xdeadbeef; } } else { uint32_t dict_idx = s->chunk.dictionary_start + s->vals.u32[t + vals_skipped] - secondary_val; count = secondary_val; ptr = s->chunk.streams[CI_DATA] + dict_idx; if (dict_idx + count > s->chunk.strm_len[CI_DATA]) { count = 0; // ptr = (uint8_t *)0xdeadbeef; } } strdesc->ptr = reinterpret_cast<const char *>(ptr); strdesc->count = count; break; } case TIMESTAMP: { int64_t seconds = s->vals.i64[t + vals_skipped] + s->top.data.utc_epoch; uint32_t nanos = secondary_val; nanos = (nanos >> 3) * kTimestampNanoScale[nanos & 7]; if (tz_len > 0) { seconds = ConvertToUTC(&s->top.data, tz_table, seconds); } if (seconds < 0 && nanos != 0) { seconds -= 1; } if (s->chunk.ts_clock_rate) reinterpret_cast<int64_t *>(data_out)[row] = seconds * s->chunk.ts_clock_rate + (nanos + (499999999 / s->chunk.ts_clock_rate)) / (1000000000 / s->chunk.ts_clock_rate); // Output to desired clock rate else reinterpret_cast<int64_t *>(data_out)[row] = seconds * 1000000000 + nanos; break; } } } } __syncthreads(); // Buffer secondary stream values if (s->chunk.type_kind == TIMESTAMP) { int buffer_pos = s->top.data.max_vals; if (t >= buffer_pos && t < buffer_pos + s->top.data.buffered_count) { s->vals.u32[t - buffer_pos] = secondary_val; } } else if (s->chunk.type_kind == BOOLEAN && t < s->top.data.buffered_count) { s->vals.u8[t] = secondary_val; } } __syncthreads(); if (t == 0) { s->top.data.cur_row += s->top.data.nrows; if (s->is_string && !IS_DICTIONARY(s->chunk.encoding_kind) && s->top.data.max_vals > 0) { s->chunk.dictionary_start += s->vals.u32[s->top.data.max_vals - 1]; } } __syncthreads(); } } /** * @brief Launches kernel for decoding NULLs and building string dictionary index tables * * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] global_dictionary Global dictionary device array * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] max_rows Maximum number of rows to load * @param[in] first_row Crop all rows below first_row * @param[in] stream CUDA stream to use, default 0 * * @return hipSuccess if successful, a CUDA error code otherwise **/ hipError_t __host__ DecodeNullsAndStringDictionaries(ColumnDesc *chunks, DictionaryEntry *global_dictionary, uint32_t num_columns, uint32_t num_stripes, size_t max_num_rows, size_t first_row, hipStream_t stream) { dim3 dim_block(NTHREADS, 1); dim3 dim_grid(num_columns, num_stripes * 2); // 1024 threads per chunk hipLaunchKernelGGL(( gpuDecodeNullsAndStringDictionaries), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, global_dictionary, num_columns, num_stripes, max_num_rows, first_row); return hipSuccess; } /** * @brief Launches kernel for decoding column data * * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] global_dictionary Global dictionary device array * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] max_rows Maximum number of rows to load * @param[in] first_row Crop all rows below first_row * @param[in] tz_table Timezone translation table * @param[in] tz_len Length of timezone translation table * @param[in] row_groups Optional row index data * @param[in] num_rowgroups Number of row groups in row index data * @param[in] rowidx_stride Row index stride * @param[in] stream CUDA stream to use, default 0 * * @return hipSuccess if successful, a CUDA error code otherwise **/ hipError_t __host__ DecodeOrcColumnData(ColumnDesc *chunks, DictionaryEntry *global_dictionary, uint32_t num_columns, uint32_t num_stripes, size_t max_num_rows, size_t first_row, int64_t *tz_table, size_t tz_len, const RowGroup *row_groups, uint32_t num_rowgroups, uint32_t rowidx_stride, hipStream_t stream) { uint32_t num_chunks = num_columns * num_stripes; dim3 dim_block(NTHREADS, 1); // 1024 threads per chunk dim3 dim_grid((num_rowgroups > 0) ? num_columns : num_chunks, (num_rowgroups > 0) ? num_rowgroups : 1); hipLaunchKernelGGL(( gpuDecodeOrcColumnData), dim3(dim_grid), dim3(dim_block), 0, stream, chunks, global_dictionary, tz_table, row_groups, max_num_rows, first_row, num_columns, (uint32_t)(tz_len >> 1), num_rowgroups, rowidx_stride); return hipSuccess; } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
ae5bd3dff5cb64cfab52fc5c648b0dd36cc156e4.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <io/utilities/block_utils.cuh> #include "orc_common.h" #include "orc_gpu.h" #define LOG2_BYTESTREAM_BFRSZ 13 // Must be able to handle 512x 8-byte values #define BYTESTREAM_BFRSZ (1 << LOG2_BYTESTREAM_BFRSZ) #define BYTESTREAM_BFRMASK32 ((BYTESTREAM_BFRSZ - 1) >> 2) // TODO: Should be more efficient with 512 threads per block and circular queue for values #define LOG2_NWARPS 5 // Log2 of number of warps per threadblock #define LOG2_NTHREADS (LOG2_NWARPS + 5) #define NWARPS (1 << LOG2_NWARPS) #define NTHREADS (1 << LOG2_NTHREADS) #define ROWDEC_BFRSZ \ (NTHREADS + 128) // Add some margin to look ahead to future rows in case there are many zeroes #define IS_RLEv1(encoding_mode) ((encoding_mode) < DIRECT_V2) #define IS_RLEv2(encoding_mode) ((encoding_mode) >= DIRECT_V2) #define IS_DICTIONARY(encoding_mode) ((encoding_mode)&1) namespace cudf { namespace io { namespace orc { namespace gpu { static __device__ __constant__ int64_t kORCTimeToUTC = 1420070400; // Seconds from January 1st, 1970 to January 1st, 2015 struct int128_s { uint64_t lo; int64_t hi; }; struct orc_bytestream_s { const uint8_t *base; uint32_t pos; uint32_t len; uint32_t fill_pos; uint32_t fill_count; union { uint8_t u8[BYTESTREAM_BFRSZ]; uint32_t u32[BYTESTREAM_BFRSZ >> 2]; uint2 u64[BYTESTREAM_BFRSZ >> 3]; } buf; }; struct orc_rlev1_state_s { uint32_t num_runs; uint32_t num_vals; int32_t run_data[NWARPS * 12]; // (delta << 24) | (count << 16) | (first_val) }; struct orc_rlev2_state_s { uint32_t num_runs; uint32_t num_vals; union { uint32_t u32[NWARPS]; uint64_t u64[NWARPS]; } baseval; uint16_t m2_pw_byte3[NWARPS]; int64_t delta[NWARPS]; uint16_t runs_loc[NTHREADS]; }; struct orc_byterle_state_s { uint32_t num_runs; uint32_t num_vals; uint32_t runs_loc[NWARPS]; uint32_t runs_pos[NWARPS]; }; struct orc_rowdec_state_s { uint32_t nz_count; uint32_t last_row[NWARPS]; uint32_t row[ROWDEC_BFRSZ]; // 0=skip, >0: row position relative to cur_row }; struct orc_strdict_state_s { uint2 *local_dict; uint32_t dict_pos; uint32_t dict_len; }; struct orc_nulldec_state_s { uint32_t row; uint32_t null_count[NWARPS]; }; struct orc_datadec_state_s { uint32_t cur_row; // starting row of current batch uint32_t end_row; // ending row of this chunk (start_row + num_rows) uint32_t max_vals; // max # of non-zero values to decode in this batch uint32_t nrows; // # of rows in current batch (up to NTHREADS) uint32_t buffered_count; // number of buffered values in the secondary data stream uint32_t tz_num_entries; // number of entries in timezone table uint32_t tz_dst_cycle; // number of entries in timezone daylight savings cycle int64_t first_tz_transition; // first transition in timezone table int64_t last_tz_transition; // last transition in timezone table int64_t utc_epoch; // kORCTimeToUTC - gmtOffset RowGroup index; }; struct orcdec_state_s { ColumnDesc chunk; orc_bytestream_s bs; orc_bytestream_s bs2; int is_string; union { orc_strdict_state_s dict; orc_nulldec_state_s nulls; orc_datadec_state_s data; } top; union { orc_rlev1_state_s rlev1; orc_rlev2_state_s rlev2; orc_byterle_state_s rle8; orc_rowdec_state_s rowdec; } u; union { uint8_t u8[NTHREADS * 8]; uint32_t u32[NTHREADS * 2]; int32_t i32[NTHREADS * 2]; uint64_t u64[NTHREADS]; int64_t i64[NTHREADS]; } vals; }; /** * @brief Initializes byte stream, modifying length and start position to keep the read pointer *8-byte aligned Assumes that the address range [start_address & ~7, (start_address + len - 1) | 7] *is valid * * @param[in] bs Byte stream input * @param[in] base Pointer to raw byte stream data * @param[in] len Stream length in bytes * **/ static __device__ void bytestream_init(volatile orc_bytestream_s *bs, const uint8_t *base, uint32_t len) { uint32_t pos = static_cast<uint32_t>(7 & reinterpret_cast<size_t>(base)); bs->base = base - pos; bs->pos = (len > 0) ? pos : 0; bs->len = (len + pos + 7) & ~7; bs->fill_pos = 0; bs->fill_count = min(bs->len, BYTESTREAM_BFRSZ) >> 3; } /** * @brief Increment the read position, returns number of 64-bit slots to fill * * @param[in] bs Byte stream input * @param[in] bytes_consumed Number of bytes that were consumed * **/ static __device__ void bytestream_flush_bytes(volatile orc_bytestream_s *bs, uint32_t bytes_consumed) { uint32_t pos = bs->pos; uint32_t len = bs->len; uint32_t pos_new = min(pos + bytes_consumed, len); bs->pos = pos_new; pos = min(pos + BYTESTREAM_BFRSZ, len); pos_new = min(pos_new + BYTESTREAM_BFRSZ, len); bs->fill_pos = pos; bs->fill_count = (pos_new >> 3) - (pos >> 3); } /** * @brief Refill the byte stream buffer * * @param[in] bs Byte stream input * @param[in] t thread id * **/ static __device__ void bytestream_fill(orc_bytestream_s *bs, int t) { int count = bs->fill_count; if (t < count) { int pos8 = (bs->fill_pos >> 3) + t; bs->buf.u64[pos8 & ((BYTESTREAM_BFRSZ >> 3) - 1)] = (reinterpret_cast<const uint2 *>(bs->base))[pos8]; } } /** * @brief Read a byte from the byte stream (byte aligned) * * @param[in] bs Byte stream input * @param[in] pos Position in byte stream * @return byte * **/ inline __device__ uint8_t bytestream_readbyte(volatile orc_bytestream_s *bs, int pos) { return bs->buf.u8[pos & (BYTESTREAM_BFRSZ - 1)]; } /** * @brief Read 32 bits from a byte stream (little endian, byte aligned) * * @param[in] bs Byte stream input * @param[in] pos Position in byte stream * @result bits * **/ inline __device__ uint32_t bytestream_readu32(volatile orc_bytestream_s *bs, int pos) { uint32_t a = bs->buf.u32[(pos & (BYTESTREAM_BFRSZ - 1)) >> 2]; uint32_t b = bs->buf.u32[((pos + 4) & (BYTESTREAM_BFRSZ - 1)) >> 2]; return __funnelshift_r(a, b, (pos & 3) * 8); } /** * @brief Read 64 bits from a byte stream (little endian, byte aligned) * * @param[in] bs Byte stream input * @param[in] pos Position in byte stream * @param[in] numbits number of bits * @return bits * **/ inline __device__ uint64_t bytestream_readu64(volatile orc_bytestream_s *bs, int pos) { uint32_t a = bs->buf.u32[(pos & (BYTESTREAM_BFRSZ - 1)) >> 2]; uint32_t b = bs->buf.u32[((pos + 4) & (BYTESTREAM_BFRSZ - 1)) >> 2]; uint32_t c = bs->buf.u32[((pos + 8) & (BYTESTREAM_BFRSZ - 1)) >> 2]; uint32_t lo32 = __funnelshift_r(a, b, (pos & 3) * 8); uint32_t hi32 = __funnelshift_r(b, c, (pos & 3) * 8); uint64_t v = hi32; v <<= 32; v |= lo32; return v; } /** * @brief Read up to 32-bits from a byte stream (big endian) * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @return decoded value * **/ inline __device__ uint32_t bytestream_readbits(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits) { int idx = bitpos >> 5; uint32_t a = __byte_perm(bs->buf.u32[(idx + 0) & BYTESTREAM_BFRMASK32], 0, 0x0123); uint32_t b = __byte_perm(bs->buf.u32[(idx + 1) & BYTESTREAM_BFRMASK32], 0, 0x0123); return __funnelshift_l(b, a, bitpos & 0x1f) >> (32 - numbits); } /** * @brief Read up to 64-bits from a byte stream (big endian) * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @return decoded value * **/ inline __device__ uint64_t bytestream_readbits64(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits) { int idx = bitpos >> 5; uint32_t a = __byte_perm(bs->buf.u32[(idx + 0) & BYTESTREAM_BFRMASK32], 0, 0x0123); uint32_t b = __byte_perm(bs->buf.u32[(idx + 1) & BYTESTREAM_BFRMASK32], 0, 0x0123); uint32_t c = __byte_perm(bs->buf.u32[(idx + 2) & BYTESTREAM_BFRMASK32], 0, 0x0123); uint32_t hi32 = __funnelshift_l(b, a, bitpos & 0x1f); uint32_t lo32 = __funnelshift_l(c, b, bitpos & 0x1f); uint64_t v = hi32; v <<= 32; v |= lo32; v >>= (64 - numbits); return v; } /** * @brief Decode a big-endian unsigned 32-bit value * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @param[out] result decoded value * **/ inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits, uint32_t &result) { result = bytestream_readbits(bs, bitpos, numbits); } /** * @brief Decode a big-endian signed 32-bit value * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @param[out] result decoded value * **/ inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits, int32_t &result) { uint32_t u = bytestream_readbits(bs, bitpos, numbits); result = (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); } /** * @brief Decode a big-endian unsigned 64-bit value * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @param[out] result decoded value * **/ inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits, uint64_t &result) { result = bytestream_readbits64(bs, bitpos, numbits); } /** * @brief Decode a big-endian signed 64-bit value * * @param[in] bs Byte stream input * @param[in] bitpos Position in byte stream * @param[in] numbits number of bits * @param[out] result decoded value * **/ inline __device__ void bytestream_readbe(volatile orc_bytestream_s *bs, int bitpos, uint32_t numbits, int64_t &result) { uint64_t u = bytestream_readbits64(bs, bitpos, numbits); result = (int64_t)((u >> 1u) ^ -(int64_t)(u & 1)); } /** * @brief Return the length of a base-128 varint * * @param[in] bs Byte stream input * @param[in] pos Position in circular byte stream buffer * @return length of varint in bytes **/ template <class T> inline __device__ uint32_t varint_length(volatile orc_bytestream_s *bs, int pos) { if (bytestream_readbyte(bs, pos) > 0x7f) { uint32_t next32 = bytestream_readu32(bs, pos + 1); uint32_t zbit = __ffs((~next32) & 0x80808080); if (sizeof(T) <= 4 || zbit) { return 1 + (zbit >> 3); // up to 5x7 bits } else { next32 = bytestream_readu32(bs, pos + 5); zbit = __ffs((~next32) & 0x80808080); if (zbit) { return 5 + (zbit >> 3); // up to 9x7 bits } else if ((sizeof(T) <= 8) || (bytestream_readbyte(bs, pos + 9) <= 0x7f)) { return 10; // up to 70 bits } else { uint64_t next64 = bytestream_readu64(bs, pos + 10); zbit = __ffsll((~next64) & 0x8080808080808080ull); if (zbit) { return 10 + (zbit >> 3); // Up to 18x7 bits (126) } else { return 19; // Up to 19x7 bits (133) } } } } else { return 1; } } /** * @brief Decodes a base-128 varint * * @param[in] bs Byte stream input * @param[in] pos Position in circular byte stream buffer * @param[in] result Unpacked value * @return new position in byte stream buffer **/ template <class T> inline __device__ int decode_base128_varint(volatile orc_bytestream_s *bs, int pos, T &result) { uint32_t v = bytestream_readbyte(bs, pos++); if (v > 0x7f) { uint32_t b = bytestream_readbyte(bs, pos++); v = (v & 0x7f) | (b << 7); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0x3fff) | (b << 14); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0x1fffff) | (b << 21); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0x0fffffff) | (b << 28); if (sizeof(T) > 4) { uint32_t lo = v; uint64_t hi; v = b >> 4; if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 7) | (b << 3); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0x3ff) | (b << 10); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0x1ffff) | (b << 17); if (b > 0x7f) { b = bytestream_readbyte(bs, pos++); v = (v & 0xffffff) | (b << 24); if (b > 0x7f) { pos++; // last bit is redundant (extra byte implies bit63 is 1) } } } } } hi = v; hi <<= 32; result = hi | lo; return pos; } } } } } result = v; return pos; } /** * @brief Decodes a signed int128 encoded as base-128 varint (used for decimals) **/ inline __device__ int128_s decode_varint128(volatile orc_bytestream_s *bs, int pos) { uint32_t b = bytestream_readbyte(bs, pos++); int64_t sign_mask = -(int32_t)(b & 1); uint64_t v = (b >> 1) & 0x3f; uint32_t bitpos = 6; uint64_t lo = v; uint64_t hi = 0; while (b > 0x7f && bitpos < 128) { b = bytestream_readbyte(bs, pos++); v |= ((uint64_t)(b & 0x7f)) << (bitpos & 0x3f); if (bitpos == 62) { // 6 + 7 * 8 = 62 lo = v; v = (b & 0x7f) >> 2; // 64 - 62 } bitpos += 7; } if (bitpos >= 64) { hi = v; } else { lo = v; } return {(uint64_t)(lo ^ sign_mask), (int64_t)(hi ^ sign_mask)}; } /** * @brief Decodes an unsigned 32-bit varint **/ inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, uint32_t &result) { uint32_t u; pos = decode_base128_varint<uint32_t>(bs, pos, u); result = u; return pos; } /** * @brief Decodes an unsigned 64-bit varint **/ inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, uint64_t &result) { uint64_t u; pos = decode_base128_varint<uint64_t>(bs, pos, u); result = u; return pos; } /** * @brief Signed version of 32-bit decode_varint **/ inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, int32_t &result) { uint32_t u; pos = decode_base128_varint<uint32_t>(bs, pos, u); result = (int32_t)((u >> 1u) ^ -(int32_t)(u & 1)); return pos; } /** * @brief Signed version of 64-bit decode_varint **/ inline __device__ int decode_varint(volatile orc_bytestream_s *bs, int pos, int64_t &result) { uint64_t u; pos = decode_base128_varint<uint64_t>(bs, pos, u); result = (int64_t)((u >> 1u) ^ -(int64_t)(u & 1)); return pos; } /** * @brief In-place conversion from lengths to positions * * @param[in] vals input values * @param[in] numvals number of values * @param[in] t thread id * * @return number of values decoded **/ template <class T> inline __device__ void lengths_to_positions(volatile T *vals, uint32_t numvals, unsigned int t) { for (uint32_t n = 1; n < numvals; n <<= 1) { __syncthreads(); if ((t & n) && (t < numvals)) vals[t] += vals[(t & ~n) | (n - 1)]; } } /** * @brief ORC Integer RLEv1 decoding * * @param[in] bs input byte stream * @param[in] rle RLE state * @param[in] vals buffer for output values (uint32_t, int32_t, uint64_t or int64_t) * @param[in] maxvals maximum number of values to decode * @param[in] t thread id * * @return number of values decoded **/ template <class T> static __device__ uint32_t Integer_RLEv1( orc_bytestream_s *bs, volatile orc_rlev1_state_s *rle, volatile T *vals, uint32_t maxvals, int t) { uint32_t numvals, numruns; if (t == 0) { uint32_t maxpos = min(bs->len, bs->pos + (BYTESTREAM_BFRSZ - 8u)); uint32_t lastpos = bs->pos; numvals = numruns = 0; // Find the length and start location of each run while (numvals < maxvals && numruns < NWARPS * 12) { uint32_t pos = lastpos; uint32_t n = bytestream_readbyte(bs, pos++); if (n <= 0x7f) { // Run int32_t delta; n = n + 3; if (numvals + n > maxvals) break; delta = bytestream_readbyte(bs, pos++); vals[numvals] = pos & 0xffff; pos += varint_length<T>(bs, pos); if (pos > maxpos) break; rle->run_data[numruns++] = (delta << 24) | (n << 16) | numvals; numvals += n; } else { // Literals uint32_t i; n = 0x100 - n; if (numvals + n > maxvals) break; i = 0; do { vals[numvals + i] = pos & 0xffff; pos += varint_length<T>(bs, pos); } while (++i < n); if (pos > maxpos) break; numvals += n; } lastpos = pos; } rle->num_runs = numruns; rle->num_vals = numvals; bytestream_flush_bytes(bs, lastpos - bs->pos); } __syncthreads(); // Expand the runs numruns = rle->num_runs; if (numruns > 0) { int r = t >> 5; int tr = t & 0x1f; for (uint32_t run = r; run < numruns; run += NWARPS) { int32_t run_data = rle->run_data[run]; int n = (run_data >> 16) & 0xff; int delta = run_data >> 24; uint32_t base = run_data & 0x3ff; uint32_t pos = vals[base] & 0xffff; for (int i = 1 + tr; i < n; i += 32) { vals[base + i] = ((delta * i) << 16) | pos; } } __syncthreads(); } numvals = rle->num_vals; // Decode individual 32-bit varints if (t < numvals) { int32_t pos = vals[t]; int32_t delta = pos >> 16; T v; decode_varint(bs, pos, v); vals[t] = v + delta; } __syncthreads(); return numvals; } /** * @brief Maps the RLEv2 5-bit length code to 6-bit length * **/ static const __device__ __constant__ uint8_t kRLEv2_W[32] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 26, 28, 30, 32, 40, 48, 56, 64}; /** * @brief ORC Integer RLEv2 decoding * * @param[in] bs input byte stream * @param[in] rle RLE state * @param[in] vals buffer for output values (uint32_t, int32_t, uint64_t or int64_t) * @param[in] maxvals maximum number of values to decode * @param[in] t thread id * * @return number of values decoded **/ template <class T> static __device__ uint32_t Integer_RLEv2( orc_bytestream_s *bs, volatile orc_rlev2_state_s *rle, volatile T *vals, uint32_t maxvals, int t) { uint32_t numvals, numruns; int r, tr; if (t == 0) { uint32_t maxpos = min(bs->len, bs->pos + (BYTESTREAM_BFRSZ - 8u)); uint32_t lastpos = bs->pos; numvals = numruns = 0; // Find the length and start location of each run while (numvals < maxvals) { uint32_t pos = lastpos; uint32_t byte0 = bytestream_readbyte(bs, pos++); uint32_t n, l; int mode = byte0 >> 6; rle->runs_loc[numruns] = numvals; vals[numvals] = lastpos; if (mode == 0) { // 00lllnnn: short repeat encoding l = 1 + ((byte0 >> 3) & 7); // 1 to 8 bytes n = 3 + (byte0 & 7); // 3 to 10 values } else { l = kRLEv2_W[(byte0 >> 1) & 0x1f]; n = 1 + ((byte0 & 1) << 8) + bytestream_readbyte(bs, pos++); if (mode == 1) { // 01wwwwwn.nnnnnnnn: direct encoding l = (l * n + 7) >> 3; } else if (mode == 2) { // 10wwwwwn.nnnnnnnn.xxxxxxxx.yyyyyyyy: patched base encoding uint32_t byte2 = bytestream_readbyte(bs, pos++); uint32_t byte3 = bytestream_readbyte(bs, pos++); uint32_t bw = 1 + (byte2 >> 5); // base value width, 1 to 8 bytes uint32_t pw = kRLEv2_W[byte2 & 0x1f]; // patch width, 1 to 64 bits uint32_t pgw = 1 + (byte3 >> 5); // patch gap width, 1 to 8 bits uint32_t pll = byte3 & 0x1f; // patch list length l = (l * n + 7) >> 3; l += bw; l += (pll * (pgw + pw) + 7) >> 3; } else { // 11wwwwwn.nnnnnnnn.<base>.<delta>: delta encoding uint32_t deltapos = varint_length<T>(bs, pos); deltapos += varint_length<T>(bs, pos + deltapos); l = (l > 1 && n > 2) ? (l * (n - 2) + 7) >> 3 : 0; l += deltapos; } } if (numvals + n > maxvals) break; pos += l; if (pos > maxpos) break; lastpos = pos; numvals += n; numruns++; } rle->num_vals = numvals; rle->num_runs = numruns; bytestream_flush_bytes(bs, lastpos - bs->pos); } __syncthreads(); // Process the runs, 1 warp per run numruns = rle->num_runs; r = t >> 5; tr = t & 0x1f; for (uint32_t run = r; run < numruns; run += NWARPS) { uint32_t base, pos, w, n; int mode; if (tr == 0) { uint32_t byte0; base = rle->runs_loc[run]; pos = vals[base]; byte0 = bytestream_readbyte(bs, pos++); mode = byte0 >> 6; if (mode == 0) { T baseval; // 00lllnnn: short repeat encoding w = 8 + (byte0 & 0x38); // 8 to 64 bits n = 3 + (byte0 & 7); // 3 to 10 values bytestream_readbe(bs, pos * 8, w, baseval); if (sizeof(T) <= 4) { rle->baseval.u32[r] = baseval; } else { rle->baseval.u64[r] = baseval; } } else { w = kRLEv2_W[(byte0 >> 1) & 0x1f]; n = 1 + ((byte0 & 1) << 8) + bytestream_readbyte(bs, pos++); if (mode > 1) { if (mode == 2) { // Patched base uint32_t byte2 = bytestream_readbyte(bs, pos++); uint32_t byte3 = bytestream_readbyte(bs, pos++); uint32_t bw = 1 + (byte2 >> 5); // base value width, 1 to 8 bytes uint32_t pw = kRLEv2_W[byte2 & 0x1f]; // patch width, 1 to 64 bits if (sizeof(T) <= 4) { uint32_t baseval, mask; bytestream_readbe(bs, pos * 8, bw * 8, baseval); mask = (1 << (bw * 8 - 1)) - 1; rle->baseval.u32[r] = (baseval > mask) ? (-(int32_t)(baseval & mask)) : baseval; } else { uint64_t baseval, mask; bytestream_readbe(bs, pos * 8, bw * 8, baseval); mask = 1; mask <<= (bw * 8) - 1; mask -= 1; rle->baseval.u64[r] = (baseval > mask) ? (-(int64_t)(baseval & mask)) : baseval; } rle->m2_pw_byte3[r] = (pw << 8) | byte3; pos += bw; } else { T baseval; int64_t delta; // Delta pos = decode_varint(bs, pos, baseval); if (sizeof(T) <= 4) { rle->baseval.u32[r] = baseval; } else { rle->baseval.u64[r] = baseval; } pos = decode_varint(bs, pos, delta); rle->delta[r] = delta; } } } } base = SHFL0(base); mode = SHFL0(mode); pos = SHFL0(pos); n = SHFL0(n); w = SHFL0(w); for (uint32_t i = tr; i < n; i += 32) { if (sizeof(T) <= 4) { if (mode == 0) { vals[base + i] = rle->baseval.u32[r]; } else if (mode == 1) { T v; bytestream_readbe(bs, pos * 8 + i * w, w, v); vals[base + i] = v; } else if (mode == 2) { uint32_t ofs = bytestream_readbits(bs, pos * 8 + i * w, w); vals[base + i] = rle->baseval.u32[r] + ofs; } else { int64_t delta = rle->delta[r]; if (w > 1 && i > 1) { int32_t delta_s = (delta < 0) ? -1 : 0; vals[base + i] = (bytestream_readbits(bs, pos * 8 + (i - 2) * w, w) ^ delta_s) - delta_s; } else { vals[base + i] = (i == 0) ? 0 : static_cast<uint32_t>(delta); } } } else { if (mode == 0) { vals[base + i] = rle->baseval.u64[r]; } else if (mode == 1) { T v; bytestream_readbe(bs, pos * 8 + i * w, w, v); vals[base + i] = v; } else if (mode == 2) { uint32_t ofs = bytestream_readbits64(bs, pos * 8 + i * w, w); vals[base + i] = rle->baseval.u64[r] + ofs; } else { int64_t delta = rle->delta[r], ofs; if (w > 1 && i > 1) { int64_t delta_s = (delta < 0) ? -1 : 0; ofs = (bytestream_readbits64(bs, pos * 8 + (i - 2) * w, w) ^ delta_s) - delta_s; } else { ofs = (i == 0) ? 0 : delta; } vals[base + i] = ofs; } } } SYNCWARP(); // Patch values if (mode == 2) { uint32_t pw_byte3 = rle->m2_pw_byte3[r]; uint32_t pw = pw_byte3 >> 8; uint32_t pgw = 1 + ((pw_byte3 >> 5) & 7); // patch gap width, 1 to 8 bits uint32_t pll = pw_byte3 & 0x1f; // patch list length if (pll != 0) { uint64_t patch_pos64 = (tr < pll) ? bytestream_readbits64(bs, pos * 8 + ((n * w + 7) & ~7) + tr * (pgw + pw), pgw + pw) : 0; uint32_t patch_pos; T patch = 1; patch <<= pw; patch = (patch - 1) & (T)patch_pos64; patch <<= w; patch_pos = (uint32_t)(patch_pos64 >> pw); for (uint32_t k = 1; k < pll; k <<= 1) { uint32_t tmp = SHFL(patch_pos, (tr & ~k) | (k - 1)); patch_pos += (tr & k) ? tmp : 0; } if (tr < pll && patch_pos < n) { vals[base + patch_pos] += patch; } } } SYNCWARP(); if (mode == 3) { T baseval; for (uint32_t i = 1; i < n; i <<= 1) { SYNCWARP(); for (uint32_t j = tr; j < n; j += 32) { if (j & i) vals[base + j] += vals[base + ((j & ~i) | (i - 1))]; } } if (sizeof(T) <= 4) baseval = rle->baseval.u32[r]; else baseval = rle->baseval.u64[r]; for (uint32_t j = tr; j < n; j += 32) { vals[base + j] += baseval; } } } __syncthreads(); return rle->num_vals; } /** * @brief Reads 32 booleans as a packed 32-bit value * * @param[in] vals 32-bit array of values (little-endian) * @param[in] bitpos bit position * * @return 32-bit value **/ inline __device__ uint32_t rle8_read_bool32(volatile uint32_t *vals, uint32_t bitpos) { uint32_t a = vals[(bitpos >> 5) + 0]; uint32_t b = vals[(bitpos >> 5) + 1]; a = __byte_perm(a, 0, 0x0123); b = __byte_perm(b, 0, 0x0123); return __brev(__funnelshift_l(b, a, bitpos)); } /** * @brief ORC Byte RLE decoding * * @param[in] bs Input byte stream * @param[in] rle RLE state * @param[in] vals output buffer for decoded 8-bit values * @param[in] maxvals Maximum number of values to decode * @param[in] t thread id * * @return number of values decoded **/ static __device__ uint32_t Byte_RLE(orc_bytestream_s *bs, volatile orc_byterle_state_s *rle, volatile uint8_t *vals, uint32_t maxvals, int t) { uint32_t numvals, numruns; int r, tr; if (t == 0) { uint32_t maxpos = min(bs->len, bs->pos + (BYTESTREAM_BFRSZ - 8u)); uint32_t lastpos = bs->pos; numvals = numruns = 0; // Find the length and start location of each run while (numvals < maxvals && numruns < NWARPS) { uint32_t pos = lastpos, n; rle->runs_pos[numruns] = pos; rle->runs_loc[numruns] = numvals; n = bytestream_readbyte(bs, pos++); if (n <= 0x7f) { // Run n = n + 3; pos++; } else { // Literals n = 0x100 - n; pos += n; } if (pos > maxpos || numvals + n > maxvals) { break; } numruns++; numvals += n; lastpos = pos; } rle->num_runs = numruns; rle->num_vals = numvals; bytestream_flush_bytes(bs, lastpos - bs->pos); } __syncthreads(); numruns = rle->num_runs; r = t >> 5; tr = t & 0x1f; for (int run = r; run < numruns; run += NWARPS) { uint32_t pos = rle->runs_pos[run]; uint32_t loc = rle->runs_loc[run]; uint32_t n = bytestream_readbyte(bs, pos++); uint32_t literal_mask; if (n <= 0x7f) { literal_mask = 0; n += 3; } else { literal_mask = ~0; n = 0x100 - n; } for (uint32_t i = tr; i < n; i += 32) { vals[loc + i] = bytestream_readbyte(bs, pos + (i & literal_mask)); } } __syncthreads(); return rle->num_vals; } /** * @brief Powers of 10 * **/ static const __device__ __constant__ double kPow10[40] = { 1.0, 1.e1, 1.e2, 1.e3, 1.e4, 1.e5, 1.e6, 1.e7, 1.e8, 1.e9, 1.e10, 1.e11, 1.e12, 1.e13, 1.e14, 1.e15, 1.e16, 1.e17, 1.e18, 1.e19, 1.e20, 1.e21, 1.e22, 1.e23, 1.e24, 1.e25, 1.e26, 1.e27, 1.e28, 1.e29, 1.e30, 1.e31, 1.e32, 1.e33, 1.e34, 1.e35, 1.e36, 1.e37, 1.e38, 1.e39, }; static const __device__ __constant__ int64_t kPow5i[28] = {1, 5, 25, 125, 625, 3125, 15625, 78125, 390625, 1953125, 9765625, 48828125, 244140625, 1220703125, 6103515625ll, 30517578125ll, 152587890625ll, 762939453125ll, 3814697265625ll, 19073486328125ll, 95367431640625ll, 476837158203125ll, 2384185791015625ll, 11920928955078125ll, 59604644775390625ll, 298023223876953125ll, 1490116119384765625ll, 7450580596923828125ll}; /** * @brief ORC Decimal decoding (unbounded base-128 varints) * * @param[in] bs Input byte stream * @param[in,out] vals on input: scale from secondary stream, on output: value * @param[in] numvals Number of values to decode * @param[in] t thread id * * @return number of values decoded * **/ static __device__ int Decode_Decimals(orc_bytestream_s *bs, volatile orc_byterle_state_s *scratch, volatile int64_t *vals, int val_scale, int numvals, int col_scale, int t) { if (t == 0) { uint32_t maxpos = min(bs->len, bs->pos + (BYTESTREAM_BFRSZ - 8u)); uint32_t lastpos = bs->pos; uint32_t n; for (n = 0; n < numvals; n++) { uint32_t pos = lastpos; *(volatile int32_t *)&vals[n] = lastpos; pos += varint_length<uint4>(bs, pos); if (pos > maxpos) break; lastpos = pos; } scratch->num_vals = n; bytestream_flush_bytes(bs, lastpos - bs->pos); } __syncthreads(); numvals = scratch->num_vals; if (t < numvals) { int pos = *(volatile int32_t *)&vals[t]; int128_s v = decode_varint128(bs, pos); if (col_scale & ORC_DECIMAL2FLOAT64_SCALE) { double f = Int128ToDouble_rn(v.lo, v.hi); int32_t scale = (t < numvals) ? val_scale : 0; if (scale >= 0) reinterpret_cast<volatile double *>(vals)[t] = f / kPow10[min(scale, 39)]; else reinterpret_cast<volatile double *>(vals)[t] = f * kPow10[min(-scale, 39)]; } else { int32_t scale = (t < numvals) ? (col_scale & ~ORC_DECIMAL2FLOAT64_SCALE) - val_scale : 0; if (scale >= 0) { scale = min(scale, 27); vals[t] = ((int64_t)v.lo * kPow5i[scale]) << scale; } else // if (scale < 0) { bool is_negative = (v.hi < 0); uint64_t hi = v.hi, lo = v.lo; scale = min(-scale, 27); if (is_negative) { hi = (~hi) + (lo == 0); lo = (~lo) + 1; } lo = (lo >> (uint32_t)scale) | ((uint64_t)hi << (64 - scale)); hi >>= (int32_t)scale; if (hi != 0) { // Use intermediate float lo = __double2ull_rn(Int128ToDouble_rn(lo, hi) / __ll2double_rn(kPow5i[scale])); hi = 0; } else { lo /= kPow5i[scale]; } vals[t] = (is_negative) ? -(int64_t)lo : (int64_t)lo; } } } return numvals; } /** * @brief Decoding NULLs and builds string dictionary index tables * * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] global_dictionary Global dictionary device array * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] max_num_rows Maximum number of rows to load * @param[in] first_row Crop all rows below first_row * **/ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuDecodeNullsAndStringDictionaries(ColumnDesc *chunks, DictionaryEntry *global_dictionary, uint32_t num_columns, uint32_t num_stripes, size_t max_num_rows, size_t first_row) { __shared__ __align__(16) orcdec_state_s state_g; orcdec_state_s *const s = &state_g; bool is_nulldec = (blockIdx.y >= num_stripes); uint32_t column = blockIdx.x; uint32_t stripe = (is_nulldec) ? blockIdx.y - num_stripes : blockIdx.y; uint32_t chunk_id = stripe * num_columns + column; int t = threadIdx.x; if (t < sizeof(ColumnDesc) / sizeof(uint32_t)) { ((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[chunk_id])[t]; } __syncthreads(); if (is_nulldec) { uint32_t null_count = 0; // Decode NULLs if (t == 0) { s->chunk.skip_count = 0; s->top.nulls.row = 0; bytestream_init(&s->bs, s->chunk.streams[CI_PRESENT], s->chunk.strm_len[CI_PRESENT]); } __syncthreads(); if (s->chunk.strm_len[CI_PRESENT] == 0) { // No present stream: all rows are valid s->vals.u32[t] = ~0; } while (s->top.nulls.row < s->chunk.num_rows) { uint32_t nrows_max = min(s->chunk.num_rows - s->top.nulls.row, NTHREADS * 32); uint32_t nrows; size_t row_in; bytestream_fill(&s->bs, t); __syncthreads(); if (s->chunk.strm_len[CI_PRESENT] > 0) { uint32_t nbytes = Byte_RLE(&s->bs, &s->u.rle8, s->vals.u8, (nrows_max + 7) >> 3, t); nrows = min(nrows_max, nbytes * 8u); if (!nrows) { // Error: mark all remaining rows as null nrows = nrows_max; if (t * 32 < nrows) { s->vals.u32[t] = 0; } } } else { nrows = nrows_max; } __syncthreads(); row_in = s->chunk.start_row + s->top.nulls.row; if (row_in + nrows > first_row && row_in < first_row + max_num_rows && s->chunk.valid_map_base != NULL) { int64_t dst_row = row_in - first_row; int64_t dst_pos = max(dst_row, (int64_t)0); uint32_t startbit = -static_cast<int32_t>(min(dst_row, (int64_t)0)); uint32_t nbits = nrows - min(startbit, nrows); uint32_t *valid = s->chunk.valid_map_base + (dst_pos >> 5); uint32_t bitpos = static_cast<uint32_t>(dst_pos) & 0x1f; if ((size_t)(dst_pos + nbits) > max_num_rows) { nbits = static_cast<uint32_t>(max_num_rows - min((size_t)dst_pos, max_num_rows)); } // Store bits up to the next 32-bit aligned boundary if (bitpos != 0) { uint32_t n = min(32u - bitpos, nbits); if (t == 0) { uint32_t mask = ((1 << n) - 1) << bitpos; uint32_t bits = (rle8_read_bool32(s->vals.u32, startbit) << bitpos) & mask; atomicAnd(valid, ~mask); atomicOr(valid, bits); null_count += __popc((~bits) & mask); } nbits -= n; startbit += n; valid++; } // Store bits aligned if (t * 32 + 32 <= nbits) { uint32_t bits = rle8_read_bool32(s->vals.u32, startbit + t * 32); valid[t] = bits; null_count += __popc(~bits); } else if (t * 32 < nbits) { uint32_t n = nbits - t * 32; uint32_t mask = (1 << n) - 1; uint32_t bits = rle8_read_bool32(s->vals.u32, startbit + t * 32) & mask; atomicAnd(valid + t, ~mask); atomicOr(valid + t, bits); null_count += __popc((~bits) & mask); } __syncthreads(); } // We may have some valid values that are not decoded below first_row -> count these in // skip_count, so that subsequent kernel can infer the correct row position if (row_in < first_row && t < 32) { uint32_t skippedrows = min(static_cast<uint32_t>(first_row - row_in), nrows); uint32_t skip_count = 0; for (uint32_t i = t * 32; i < skippedrows; i += 32 * 32) { uint32_t bits = s->vals.u32[i >> 5]; if (i + 32 > skippedrows) { bits &= (1 << (skippedrows - i)) - 1; } skip_count += __popc(bits); } skip_count += SHFL_XOR(skip_count, 1); skip_count += SHFL_XOR(skip_count, 2); skip_count += SHFL_XOR(skip_count, 4); skip_count += SHFL_XOR(skip_count, 8); skip_count += SHFL_XOR(skip_count, 16); if (t == 0) { s->chunk.skip_count += skip_count; } } __syncthreads(); if (t == 0) { s->top.nulls.row += nrows; } __syncthreads(); } __syncthreads(); // Sum up the valid counts and infer null_count null_count += SHFL_XOR(null_count, 1); null_count += SHFL_XOR(null_count, 2); null_count += SHFL_XOR(null_count, 4); null_count += SHFL_XOR(null_count, 8); null_count += SHFL_XOR(null_count, 16); if (!(t & 0x1f)) { s->top.nulls.null_count[t >> 5] = null_count; } __syncthreads(); if (t < 32) { null_count = (t < NWARPS) ? s->top.nulls.null_count[t] : 0; null_count += SHFL_XOR(null_count, 1); null_count += SHFL_XOR(null_count, 2); null_count += SHFL_XOR(null_count, 4); null_count += SHFL_XOR(null_count, 8); null_count += SHFL_XOR(null_count, 16); if (t == 0) { chunks[chunk_id].null_count = null_count; chunks[chunk_id].skip_count = s->chunk.skip_count; } } } else { // Decode string dictionary int encoding_kind = s->chunk.encoding_kind; if ((encoding_kind == DICTIONARY || encoding_kind == DICTIONARY_V2) && (s->chunk.dict_len > 0)) { if (t == 0) { s->top.dict.dict_len = s->chunk.dict_len; s->top.dict.local_dict = (uint2 *)(global_dictionary + s->chunk.dictionary_start); // Local dictionary s->top.dict.dict_pos = 0; // CI_DATA2 contains the LENGTH stream coding the length of individual dictionary entries bytestream_init(&s->bs, s->chunk.streams[CI_DATA2], s->chunk.strm_len[CI_DATA2]); } __syncthreads(); while (s->top.dict.dict_len > 0) { uint32_t numvals = min(s->top.dict.dict_len, NTHREADS), len; volatile uint32_t *vals = s->vals.u32; bytestream_fill(&s->bs, t); __syncthreads(); if (IS_RLEv1(s->chunk.encoding_kind)) { numvals = Integer_RLEv1(&s->bs, &s->u.rlev1, vals, numvals, t); } else // RLEv2 { numvals = Integer_RLEv2(&s->bs, &s->u.rlev2, vals, numvals, t); } __syncthreads(); len = (t < numvals) ? vals[t] : 0; lengths_to_positions(vals, numvals, t); __syncthreads(); if (numvals == 0) { // This is an error (ran out of data) numvals = min(s->top.dict.dict_len, NTHREADS); vals[t] = 0; } if (t < numvals) { uint2 dict_entry; dict_entry.x = s->top.dict.dict_pos + vals[t] - len; dict_entry.y = len; s->top.dict.local_dict[t] = dict_entry; } __syncthreads(); if (t == 0) { s->top.dict.dict_pos += vals[numvals - 1]; s->top.dict.dict_len -= numvals; s->top.dict.local_dict += numvals; } __syncthreads(); } } } } /** * @brief Decode row positions from valid bits * * @param[in,out] s Column chunk decoder state * @param[in] first_row crop all rows below first rows * @param[in] t thread id * **/ static __device__ void DecodeRowPositions(orcdec_state_s *s, size_t first_row, int t) { if (t == 0) { if (s->chunk.skip_count != 0) { s->u.rowdec.nz_count = min(min(s->chunk.skip_count, s->top.data.max_vals), NTHREADS); s->chunk.skip_count -= s->u.rowdec.nz_count; s->top.data.nrows = s->u.rowdec.nz_count; } else { s->u.rowdec.nz_count = 0; } } __syncthreads(); if (t < s->u.rowdec.nz_count) { s->u.rowdec.row[t] = 0; // Skipped values (below first_row) } while (s->u.rowdec.nz_count < s->top.data.max_vals && s->top.data.cur_row + s->top.data.nrows < s->top.data.end_row) { uint32_t nrows = min(s->top.data.end_row - s->top.data.cur_row, min((ROWDEC_BFRSZ - s->u.rowdec.nz_count) * 2, NTHREADS)); if (s->chunk.strm_len[CI_PRESENT] > 0) { // We have a present stream uint32_t rmax = s->top.data.end_row - min((uint32_t)first_row, s->top.data.end_row); uint32_t r = (uint32_t)(s->top.data.cur_row + s->top.data.nrows + t - first_row); uint32_t valid = (t < nrows && r < rmax) ? (((const uint8_t *)s->chunk.valid_map_base)[r >> 3] >> (r & 7)) & 1 : 0; volatile uint16_t *row_ofs_plus1 = (volatile uint16_t *)&s->u.rowdec.row[s->u.rowdec.nz_count]; uint32_t nz_pos, row_plus1, nz_count = s->u.rowdec.nz_count, last_row; if (t < nrows) { row_ofs_plus1[t] = valid; } lengths_to_positions<uint16_t>(row_ofs_plus1, nrows, t); if (t < nrows) { nz_count += row_ofs_plus1[t]; row_plus1 = s->top.data.nrows + t + 1; } else { row_plus1 = 0; } if (t == nrows - 1) { s->u.rowdec.nz_count = min(nz_count, s->top.data.max_vals); } __syncthreads(); // TBD: Brute-forcing this, there might be a more efficient way to find the thread with the // last row last_row = (nz_count == s->u.rowdec.nz_count) ? row_plus1 : 0; last_row = max(last_row, SHFL_XOR(last_row, 1)); last_row = max(last_row, SHFL_XOR(last_row, 2)); last_row = max(last_row, SHFL_XOR(last_row, 4)); last_row = max(last_row, SHFL_XOR(last_row, 8)); last_row = max(last_row, SHFL_XOR(last_row, 16)); if (!(t & 0x1f)) { *(volatile uint32_t *)&s->u.rowdec.last_row[t >> 5] = last_row; } nz_pos = (valid) ? nz_count : 0; __syncthreads(); if (t < 32) { last_row = (t < NWARPS) ? *(volatile uint32_t *)&s->u.rowdec.last_row[t] : 0; last_row = max(last_row, SHFL_XOR(last_row, 1)); last_row = max(last_row, SHFL_XOR(last_row, 2)); last_row = max(last_row, SHFL_XOR(last_row, 4)); last_row = max(last_row, SHFL_XOR(last_row, 8)); last_row = max(last_row, SHFL_XOR(last_row, 16)); if (t == 0) { s->top.data.nrows = last_row; } } if (valid && nz_pos - 1 < s->u.rowdec.nz_count) { s->u.rowdec.row[nz_pos - 1] = row_plus1; } __syncthreads(); } else { // All values are valid nrows = min(nrows, s->top.data.max_vals - s->u.rowdec.nz_count); if (t < nrows) { s->u.rowdec.row[s->u.rowdec.nz_count + t] = s->top.data.nrows + t + 1; } __syncthreads(); if (t == 0) { s->top.data.nrows += nrows; s->u.rowdec.nz_count += nrows; } __syncthreads(); } } } /** * @brief Convert seconds from writer timezone to UTC * * @param[in] s Orc data decoder state * @param[in] table Timezone translation table * @param[in] ts Local time in seconds * * @return UTC time in seconds * **/ static __device__ int64_t ConvertToUTC(const orc_datadec_state_s *s, const int64_t *table, int64_t ts) { uint32_t num_entries = s->tz_num_entries; uint32_t dst_cycle = s->tz_dst_cycle; int64_t first_transition = s->first_tz_transition; int64_t last_transition = s->last_tz_transition; int64_t tsbase; uint32_t first, last; if (ts <= first_transition) { return ts + table[0 * 2 + 2]; } else if (ts <= last_transition) { first = 0; last = num_entries - 1; tsbase = ts; } else if (!dst_cycle) { return ts + table[(num_entries - 1) * 2 + 2]; } else { // Apply 400-year cycle rule const int64_t k400Years = (365 * 400 + (100 - 3)) * 24 * 60 * 60ll; tsbase = ts; ts %= k400Years; if (ts < 0) { ts += k400Years; } first = num_entries; last = num_entries + dst_cycle - 1; if (ts < table[num_entries * 2 + 1]) { return tsbase + table[last * 2 + 2]; } } // Binary search the table from first to last for ts do { uint32_t mid = first + ((last - first + 1) >> 1); int64_t tmid = table[mid * 2 + 1]; if (tmid <= ts) { first = mid; } else { if (mid == last) { break; } last = mid; } } while (first < last); return tsbase + table[first * 2 + 2]; } /** * @brief Trailing zeroes for decoding timestamp nanoseconds * **/ static const __device__ __constant__ uint32_t kTimestampNanoScale[8] = { 1, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000}; /** * @brief Decodes column data * * @param[in] chunks ColumnDesc device array * @param[in] global_dictionary Global dictionary device array * @param[in] tz_table Timezone translation table * @param[in] row_groups Optional row index data * @param[in] max_num_rows Maximum number of rows to load * @param[in] first_row Crop all rows below first_row * @param[in] num_chunks Number of column chunks (num_columns * num_stripes) * @param[in] tz_len Length of timezone translation table (number of pairs) * @param[in] num_rowgroups Number of row groups in row index data * @param[in] rowidx_stride Row index stride * **/ // blockDim {NTHREADS,1,1} extern "C" __global__ void __launch_bounds__(NTHREADS) gpuDecodeOrcColumnData(ColumnDesc *chunks, DictionaryEntry *global_dictionary, int64_t *tz_table, const RowGroup *row_groups, size_t max_num_rows, size_t first_row, uint32_t num_columns, uint32_t tz_len, uint32_t num_rowgroups, uint32_t rowidx_stride) { __shared__ __align__(16) orcdec_state_s state_g; orcdec_state_s *const s = &state_g; uint32_t chunk_id; int t = threadIdx.x; if (num_rowgroups > 0) { if (t < sizeof(RowGroup) / sizeof(uint32_t)) { ((volatile uint32_t *)&s->top.data.index)[t] = ((const uint32_t *)&row_groups[blockIdx.y * num_columns + blockIdx.x])[t]; } __syncthreads(); chunk_id = s->top.data.index.chunk_id; } else { chunk_id = blockIdx.x; } if (t < sizeof(ColumnDesc) / sizeof(uint32_t)) { ((volatile uint32_t *)&s->chunk)[t] = ((const uint32_t *)&chunks[chunk_id])[t]; } __syncthreads(); if (t == 0) { // If we have an index, seek to the initial run and update row positions if (num_rowgroups > 0) { uint32_t ofs0 = min(s->top.data.index.strm_offset[0], s->chunk.strm_len[CI_DATA]); uint32_t ofs1 = min(s->top.data.index.strm_offset[1], s->chunk.strm_len[CI_DATA2]); uint32_t rowgroup_rowofs; s->chunk.streams[CI_DATA] += ofs0; s->chunk.strm_len[CI_DATA] -= ofs0; s->chunk.streams[CI_DATA2] += ofs1; s->chunk.strm_len[CI_DATA2] -= ofs1; rowgroup_rowofs = min((blockIdx.y - min(s->chunk.rowgroup_id, blockIdx.y)) * rowidx_stride, s->chunk.num_rows); s->chunk.start_row += rowgroup_rowofs; s->chunk.num_rows -= rowgroup_rowofs; } s->is_string = (s->chunk.type_kind == STRING || s->chunk.type_kind == BINARY || s->chunk.type_kind == VARCHAR || s->chunk.type_kind == CHAR); s->top.data.cur_row = max(s->chunk.start_row, max((int32_t)(first_row - s->chunk.skip_count), 0)); s->top.data.end_row = s->chunk.start_row + s->chunk.num_rows; s->top.data.buffered_count = 0; if (s->top.data.end_row > first_row + max_num_rows) { s->top.data.end_row = static_cast<uint32_t>(first_row + max_num_rows); } if (num_rowgroups > 0) { s->top.data.end_row = min(s->top.data.end_row, s->chunk.start_row + rowidx_stride); } if (!IS_DICTIONARY(s->chunk.encoding_kind)) { s->chunk.dictionary_start = 0; } if (tz_len > 0) { if (tz_len > 800) // 2 entries/year for 400 years { s->top.data.tz_num_entries = tz_len - 800; s->top.data.tz_dst_cycle = 800; } else { s->top.data.tz_num_entries = tz_len; s->top.data.tz_dst_cycle = 0; } s->top.data.utc_epoch = kORCTimeToUTC - tz_table[0]; if (tz_len > 0) { s->top.data.first_tz_transition = tz_table[1]; s->top.data.last_tz_transition = tz_table[(s->top.data.tz_num_entries - 1) * 2 + 1]; } } else { s->top.data.utc_epoch = kORCTimeToUTC; } bytestream_init(&s->bs, s->chunk.streams[CI_DATA], s->chunk.strm_len[CI_DATA]); bytestream_init(&s->bs2, s->chunk.streams[CI_DATA2], s->chunk.strm_len[CI_DATA2]); } __syncthreads(); while (s->top.data.cur_row < s->top.data.end_row) { bytestream_fill(&s->bs, t); bytestream_fill(&s->bs2, t); __syncthreads(); if (t == 0) { uint32_t max_vals = s->chunk.start_row + s->chunk.num_rows - s->top.data.cur_row; if (num_rowgroups > 0 && (s->is_string || s->chunk.type_kind == TIMESTAMP)) { max_vals += s->top.data.index.run_pos[IS_DICTIONARY(s->chunk.encoding_kind) ? CI_DATA : CI_DATA2]; } s->bs.fill_count = 0; s->bs2.fill_count = 0; s->top.data.nrows = 0; s->top.data.max_vals = min(max_vals, (s->chunk.type_kind == BOOLEAN) ? NTHREADS * 2 : NTHREADS); } __syncthreads(); // Decode data streams { uint32_t numvals = s->top.data.max_vals, secondary_val; uint32_t vals_skipped = 0; if (s->is_string || s->chunk.type_kind == TIMESTAMP) { // For these data types, we have a secondary unsigned 32-bit data stream orc_bytestream_s *bs = (IS_DICTIONARY(s->chunk.encoding_kind)) ? &s->bs : &s->bs2; uint32_t ofs = 0; if (s->chunk.type_kind == TIMESTAMP) { // Restore buffered secondary stream values, if any ofs = s->top.data.buffered_count; if (ofs > 0) { __syncthreads(); if (t == 0) { s->top.data.buffered_count = 0; } } } if (numvals > ofs) { if (IS_RLEv1(s->chunk.encoding_kind)) { numvals = ofs + Integer_RLEv1(bs, &s->u.rlev1, &s->vals.u32[ofs], numvals - ofs, t); } else { numvals = ofs + Integer_RLEv2(bs, &s->u.rlev2, &s->vals.u32[ofs], numvals - ofs, t); } __syncthreads(); if (numvals <= ofs && t >= ofs && t < s->top.data.max_vals) { s->vals.u32[t] = 0; } } // If we're using an index, we may have to drop values from the initial run if (num_rowgroups > 0) { int cid = IS_DICTIONARY(s->chunk.encoding_kind) ? CI_DATA : CI_DATA2; uint32_t run_pos = s->top.data.index.run_pos[cid]; if (run_pos) { vals_skipped = min(numvals, run_pos); __syncthreads(); if (t == 0) { s->top.data.index.run_pos[cid] = 0; } numvals -= vals_skipped; if (t < numvals) { secondary_val = s->vals.u32[vals_skipped + t]; } __syncthreads(); if (t < numvals) { s->vals.u32[t] = secondary_val; } } } __syncthreads(); // For strings with direct encoding, we need to convert the lengths into an offset if (!IS_DICTIONARY(s->chunk.encoding_kind)) { secondary_val = (t < numvals) ? s->vals.u32[t] : 0; if (s->chunk.type_kind != TIMESTAMP) { lengths_to_positions(s->vals.u32, numvals, t); __syncthreads(); } } // Adjust the maximum number of values if (numvals == 0 && vals_skipped == 0) { numvals = s->top.data.max_vals; // Just so that we don't hang if the stream is corrupted } if (t == 0 && numvals < s->top.data.max_vals) { s->top.data.max_vals = numvals; } } __syncthreads(); // Account for skipped values if (num_rowgroups > 0 && !s->is_string) { uint32_t run_pos = s->top.data.index.run_pos[CI_DATA] << ((s->chunk.type_kind == BOOLEAN) ? 3 : 0); numvals = min(numvals + run_pos, (s->chunk.type_kind == BOOLEAN) ? NTHREADS * 2 : NTHREADS); } // Decode the primary data stream if (s->chunk.type_kind == INT || s->chunk.type_kind == DATE || s->chunk.type_kind == SHORT) { // Signed int32 primary data stream if (IS_RLEv1(s->chunk.encoding_kind)) { numvals = Integer_RLEv1(&s->bs, &s->u.rlev1, s->vals.i32, numvals, t); } else { numvals = Integer_RLEv2(&s->bs, &s->u.rlev2, s->vals.i32, numvals, t); } __syncthreads(); } else if (s->chunk.type_kind == BYTE) { numvals = Byte_RLE(&s->bs, &s->u.rle8, s->vals.u8, numvals, t); __syncthreads(); } else if (s->chunk.type_kind == BOOLEAN) { int n = ((numvals + 7) >> 3); if (n > s->top.data.buffered_count) { numvals = Byte_RLE(&s->bs, &s->u.rle8, &s->vals.u8[s->top.data.buffered_count], n - s->top.data.buffered_count, t) + s->top.data.buffered_count; } else { numvals = s->top.data.buffered_count; } __syncthreads(); if (t == 0) { s->top.data.buffered_count = 0; s->top.data.max_vals = min(s->top.data.max_vals, NTHREADS); } __syncthreads(); n = numvals - ((s->top.data.max_vals + 7) >> 3); if (t < n) { secondary_val = s->vals.u8[((s->top.data.max_vals + 7) >> 3) + t]; if (t == 0) { s->top.data.buffered_count = n; } } numvals = min(numvals << 3u, s->top.data.max_vals); } else if (s->chunk.type_kind == LONG || s->chunk.type_kind == TIMESTAMP || s->chunk.type_kind == DECIMAL) { orc_bytestream_s *bs = (s->chunk.type_kind == DECIMAL) ? &s->bs2 : &s->bs; if (IS_RLEv1(s->chunk.encoding_kind)) { numvals = Integer_RLEv1<int64_t>(bs, &s->u.rlev1, s->vals.i64, numvals, t); } else { numvals = Integer_RLEv2<int64_t>(bs, &s->u.rlev2, s->vals.i64, numvals, t); } if (s->chunk.type_kind == DECIMAL) { // If we're using an index, we may have to drop values from the initial run uint32_t skip = 0; int val_scale; if (num_rowgroups > 0) { uint32_t run_pos = s->top.data.index.run_pos[CI_DATA2]; if (run_pos) { skip = min(numvals, run_pos); __syncthreads(); if (t == 0) { s->top.data.index.run_pos[CI_DATA2] = 0; } numvals -= skip; } } val_scale = (t < numvals) ? (int)s->vals.i64[skip + t] : 0; __syncthreads(); numvals = Decode_Decimals( &s->bs, &s->u.rle8, s->vals.i64, val_scale, numvals, s->chunk.decimal_scale, t); } __syncthreads(); } else if (s->chunk.type_kind == FLOAT) { numvals = min(numvals, (BYTESTREAM_BFRSZ - 8u) >> 2); if (t < numvals) { s->vals.u32[t] = bytestream_readu32(&s->bs, s->bs.pos + t * 4); } __syncthreads(); if (t == 0) { bytestream_flush_bytes(&s->bs, numvals * 4); } __syncthreads(); } else if (s->chunk.type_kind == DOUBLE) { numvals = min(numvals, (BYTESTREAM_BFRSZ - 8u) >> 3); if (t < numvals) { s->vals.u64[t] = bytestream_readu64(&s->bs, s->bs.pos + t * 8); } __syncthreads(); if (t == 0) { bytestream_flush_bytes(&s->bs, numvals * 8); } __syncthreads(); } __syncthreads(); if (numvals == 0 && vals_skipped != 0 && num_rowgroups > 0) { // Special case if the secondary streams produced fewer values than the primary stream's RLE // run, as a result of initial RLE run offset: keep vals_skipped as non-zero to ensure // proper buffered_count/max_vals update below. } else { vals_skipped = 0; if (num_rowgroups > 0) { uint32_t run_pos = s->top.data.index.run_pos[CI_DATA] << ((s->chunk.type_kind == BOOLEAN) ? 3 : 0); if (run_pos) { vals_skipped = min(numvals, run_pos); numvals -= vals_skipped; __syncthreads(); if (t == 0) { s->top.data.index.run_pos[CI_DATA] = 0; } } } } if (t == 0 && numvals + vals_skipped > 0 && numvals < s->top.data.max_vals) { if (s->chunk.type_kind == TIMESTAMP) { s->top.data.buffered_count = s->top.data.max_vals - numvals; } s->top.data.max_vals = numvals; } __syncthreads(); // Use the valid bits to compute non-null row positions until we get a full batch of values to // decode DecodeRowPositions(s, first_row, t); if (!s->top.data.nrows && !s->u.rowdec.nz_count && !vals_skipped) { // This is a bug (could happen with bitstream errors with a bad run that would produce more // values than the number of remaining rows) return; } // Store decoded values to output if (t < min(min(s->top.data.max_vals, s->u.rowdec.nz_count), s->top.data.nrows) && s->u.rowdec.row[t] != 0 && s->top.data.cur_row + s->u.rowdec.row[t] - 1 < s->top.data.end_row) { size_t row = s->top.data.cur_row + s->u.rowdec.row[t] - 1 - first_row; if (row < max_num_rows) { void *data_out = s->chunk.column_data_base; switch (s->chunk.type_kind) { case FLOAT: case INT: reinterpret_cast<uint32_t *>(data_out)[row] = s->vals.u32[t + vals_skipped]; break; case DOUBLE: case LONG: case DECIMAL: reinterpret_cast<uint64_t *>(data_out)[row] = s->vals.u64[t + vals_skipped]; break; case SHORT: reinterpret_cast<uint16_t *>(data_out)[row] = static_cast<uint16_t>(s->vals.u32[t + vals_skipped]); break; case BYTE: reinterpret_cast<uint8_t *>(data_out)[row] = s->vals.u8[t + vals_skipped]; break; case BOOLEAN: reinterpret_cast<uint8_t *>(data_out)[row] = (s->vals.u8[(t + vals_skipped) >> 3] >> ((~t) & 7)) & 1; break; case DATE: if (s->chunk.dtype_len == 8) { // Convert from days to milliseconds by multiplying by 24*3600*1000 reinterpret_cast<int64_t *>(data_out)[row] = 86400000ll * (int64_t)s->vals.i32[t + vals_skipped]; } else { reinterpret_cast<uint32_t *>(data_out)[row] = s->vals.u32[t + vals_skipped]; } break; case STRING: case BINARY: case VARCHAR: case CHAR: { nvstrdesc_s *strdesc = &reinterpret_cast<nvstrdesc_s *>(data_out)[row]; const uint8_t *ptr; uint32_t count; if (IS_DICTIONARY(s->chunk.encoding_kind)) { uint32_t dict_idx = s->vals.u32[t + vals_skipped]; ptr = s->chunk.streams[CI_DICTIONARY]; if (dict_idx < s->chunk.dict_len) { ptr += global_dictionary[s->chunk.dictionary_start + dict_idx].pos; count = global_dictionary[s->chunk.dictionary_start + dict_idx].len; } else { count = 0; // ptr = (uint8_t *)0xdeadbeef; } } else { uint32_t dict_idx = s->chunk.dictionary_start + s->vals.u32[t + vals_skipped] - secondary_val; count = secondary_val; ptr = s->chunk.streams[CI_DATA] + dict_idx; if (dict_idx + count > s->chunk.strm_len[CI_DATA]) { count = 0; // ptr = (uint8_t *)0xdeadbeef; } } strdesc->ptr = reinterpret_cast<const char *>(ptr); strdesc->count = count; break; } case TIMESTAMP: { int64_t seconds = s->vals.i64[t + vals_skipped] + s->top.data.utc_epoch; uint32_t nanos = secondary_val; nanos = (nanos >> 3) * kTimestampNanoScale[nanos & 7]; if (tz_len > 0) { seconds = ConvertToUTC(&s->top.data, tz_table, seconds); } if (seconds < 0 && nanos != 0) { seconds -= 1; } if (s->chunk.ts_clock_rate) reinterpret_cast<int64_t *>(data_out)[row] = seconds * s->chunk.ts_clock_rate + (nanos + (499999999 / s->chunk.ts_clock_rate)) / (1000000000 / s->chunk.ts_clock_rate); // Output to desired clock rate else reinterpret_cast<int64_t *>(data_out)[row] = seconds * 1000000000 + nanos; break; } } } } __syncthreads(); // Buffer secondary stream values if (s->chunk.type_kind == TIMESTAMP) { int buffer_pos = s->top.data.max_vals; if (t >= buffer_pos && t < buffer_pos + s->top.data.buffered_count) { s->vals.u32[t - buffer_pos] = secondary_val; } } else if (s->chunk.type_kind == BOOLEAN && t < s->top.data.buffered_count) { s->vals.u8[t] = secondary_val; } } __syncthreads(); if (t == 0) { s->top.data.cur_row += s->top.data.nrows; if (s->is_string && !IS_DICTIONARY(s->chunk.encoding_kind) && s->top.data.max_vals > 0) { s->chunk.dictionary_start += s->vals.u32[s->top.data.max_vals - 1]; } } __syncthreads(); } } /** * @brief Launches kernel for decoding NULLs and building string dictionary index tables * * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] global_dictionary Global dictionary device array * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] max_rows Maximum number of rows to load * @param[in] first_row Crop all rows below first_row * @param[in] stream CUDA stream to use, default 0 * * @return cudaSuccess if successful, a CUDA error code otherwise **/ cudaError_t __host__ DecodeNullsAndStringDictionaries(ColumnDesc *chunks, DictionaryEntry *global_dictionary, uint32_t num_columns, uint32_t num_stripes, size_t max_num_rows, size_t first_row, cudaStream_t stream) { dim3 dim_block(NTHREADS, 1); dim3 dim_grid(num_columns, num_stripes * 2); // 1024 threads per chunk gpuDecodeNullsAndStringDictionaries<<<dim_grid, dim_block, 0, stream>>>( chunks, global_dictionary, num_columns, num_stripes, max_num_rows, first_row); return cudaSuccess; } /** * @brief Launches kernel for decoding column data * * @param[in] chunks ColumnDesc device array [stripe][column] * @param[in] global_dictionary Global dictionary device array * @param[in] num_columns Number of columns * @param[in] num_stripes Number of stripes * @param[in] max_rows Maximum number of rows to load * @param[in] first_row Crop all rows below first_row * @param[in] tz_table Timezone translation table * @param[in] tz_len Length of timezone translation table * @param[in] row_groups Optional row index data * @param[in] num_rowgroups Number of row groups in row index data * @param[in] rowidx_stride Row index stride * @param[in] stream CUDA stream to use, default 0 * * @return cudaSuccess if successful, a CUDA error code otherwise **/ cudaError_t __host__ DecodeOrcColumnData(ColumnDesc *chunks, DictionaryEntry *global_dictionary, uint32_t num_columns, uint32_t num_stripes, size_t max_num_rows, size_t first_row, int64_t *tz_table, size_t tz_len, const RowGroup *row_groups, uint32_t num_rowgroups, uint32_t rowidx_stride, cudaStream_t stream) { uint32_t num_chunks = num_columns * num_stripes; dim3 dim_block(NTHREADS, 1); // 1024 threads per chunk dim3 dim_grid((num_rowgroups > 0) ? num_columns : num_chunks, (num_rowgroups > 0) ? num_rowgroups : 1); gpuDecodeOrcColumnData<<<dim_grid, dim_block, 0, stream>>>(chunks, global_dictionary, tz_table, row_groups, max_num_rows, first_row, num_columns, (uint32_t)(tz_len >> 1), num_rowgroups, rowidx_stride); return cudaSuccess; } } // namespace gpu } // namespace orc } // namespace io } // namespace cudf
b586695245405bc732d821d88e79030bca68b4e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "engine/graphics/postfx/GrayscaleFX.cuh" #include <math.h> namespace utad { __global__ void kernel_Grayscale(CudaSurface colorBuffer, int width, int height) { const int x = CUDA_X_POS; const int y = CUDA_Y_POS; if (x >= width || y >= height) return; Pixelf pixel; surf2Dread(&pixel, colorBuffer, x * sizeof(pixel), y); const float color = pixel.x * 0.299f + pixel.y * 0.587f + pixel.z * 0.114f; pixel.x = color; pixel.y = color; pixel.z = color; surf2Dwrite(pixel, colorBuffer, x * sizeof(pixel), y); } void GrayscaleFX::execute(const PostFXInfo& info) { dim3 gridSize; dim3 blockSize; Cuda::getKernelDimensions(gridSize, blockSize, info.width, info.height); hipLaunchKernelGGL(( kernel_Grayscale), dim3(gridSize), dim3(blockSize), 0, 0, info.colorBuffer, info.width, info.height); } }
b586695245405bc732d821d88e79030bca68b4e1.cu
#include "engine/graphics/postfx/GrayscaleFX.cuh" #include <math.h> namespace utad { __global__ void kernel_Grayscale(CudaSurface colorBuffer, int width, int height) { const int x = CUDA_X_POS; const int y = CUDA_Y_POS; if (x >= width || y >= height) return; Pixelf pixel; surf2Dread(&pixel, colorBuffer, x * sizeof(pixel), y); const float color = pixel.x * 0.299f + pixel.y * 0.587f + pixel.z * 0.114f; pixel.x = color; pixel.y = color; pixel.z = color; surf2Dwrite(pixel, colorBuffer, x * sizeof(pixel), y); } void GrayscaleFX::execute(const PostFXInfo& info) { dim3 gridSize; dim3 blockSize; Cuda::getKernelDimensions(gridSize, blockSize, info.width, info.height); kernel_Grayscale<<<gridSize, blockSize>>>(info.colorBuffer, info.width, info.height); } }
0f6eb033056a7d0f768b3b23e93987ceb6d1f166.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdlib> #include <ctime> #include <iostream> #include <fstream> #include "GraphCut.cu" using namespace std; #define HEIGHT 400 #define WIDTH 400 #define SIZE (HEIGHT*WIDTH) int main(int argc, char * argv[]) { int width; int height; /*height = 5; width = 5; int data_positive[] = {0, 0, 0, 0, 0, 0, 6, 4, 3, 0, 0, 5, 3, 2, 0, 0, 4, 2, 1, 0, 0, 0, 0, 0, 0}; int data_negative[] = {0, 0, 0, 0, 0, 0, 3, 4, 3, 0, 0, 3, 4, 3, 0, 0, 3, 4, 4, 0, 0, 0, 0, 0, 0};*/ height = 15; width = 4; int data_positive[SIZE] = { 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0 }; int data_negative[SIZE] = { 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0 }; if(argc == 2) { srand(atoi(argv[1])); height = HEIGHT; width = WIDTH; for(int i = 0 ; i < height ; ++i) for(int j = 0 ; j < width ; ++j) { if(false /*|| i == 0 || j == 0 || i == height-1 || j == width-1*/) { data_positive[i*width+j] = 0; data_negative[i*width+j] = 0; } else { data_positive[i*width+j] = rand()%300; data_negative[i*width+j] = rand()%300; } } } int * d_data_positive, * d_data_negative; CUDA_SAFE_CALL(hipMalloc((void**)&(d_data_positive),sizeof(int)*width*height)); CUDA_SAFE_CALL(hipMalloc((void**)&(d_data_negative),sizeof(int)*width*height)); CUDA_SAFE_CALL(hipMemcpy(d_data_positive,data_positive,sizeof(int)*width*height,hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_data_negative,data_negative,sizeof(int)*width*height,hipMemcpyHostToDevice)); GlobalWrapper gw = GC_Init(width, height, d_data_positive, d_data_negative, 50); int * label = (int *) malloc(sizeof(int) * width * height); GC_Optimize(gw, label); /*for(int i = 0 ; i < height ; ++i) { for(int j = 0 ; j < width ; ++j) cout << label[i*width+j] << " "; cout << endl; }*/ free(label); CUDA_SAFE_CALL(hipFree(d_data_positive)); CUDA_SAFE_CALL(hipFree(d_data_negative)); GC_End(&gw); return 0; }
0f6eb033056a7d0f768b3b23e93987ceb6d1f166.cu
#include <cstdlib> #include <ctime> #include <iostream> #include <fstream> #include "GraphCut.cu" using namespace std; #define HEIGHT 400 #define WIDTH 400 #define SIZE (HEIGHT*WIDTH) int main(int argc, char * argv[]) { int width; int height; /*height = 5; width = 5; int data_positive[] = {0, 0, 0, 0, 0, 0, 6, 4, 3, 0, 0, 5, 3, 2, 0, 0, 4, 2, 1, 0, 0, 0, 0, 0, 0}; int data_negative[] = {0, 0, 0, 0, 0, 0, 3, 4, 3, 0, 0, 3, 4, 3, 0, 0, 3, 4, 4, 0, 0, 0, 0, 0, 0};*/ height = 15; width = 4; int data_positive[SIZE] = { 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0 }; int data_negative[SIZE] = { 0, 0, 0, 0, 0, 0, 1, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0, 0 }; if(argc == 2) { srand(atoi(argv[1])); height = HEIGHT; width = WIDTH; for(int i = 0 ; i < height ; ++i) for(int j = 0 ; j < width ; ++j) { if(false /*|| i == 0 || j == 0 || i == height-1 || j == width-1*/) { data_positive[i*width+j] = 0; data_negative[i*width+j] = 0; } else { data_positive[i*width+j] = rand()%300; data_negative[i*width+j] = rand()%300; } } } int * d_data_positive, * d_data_negative; CUDA_SAFE_CALL(cudaMalloc((void**)&(d_data_positive),sizeof(int)*width*height)); CUDA_SAFE_CALL(cudaMalloc((void**)&(d_data_negative),sizeof(int)*width*height)); CUDA_SAFE_CALL(cudaMemcpy(d_data_positive,data_positive,sizeof(int)*width*height,cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_data_negative,data_negative,sizeof(int)*width*height,cudaMemcpyHostToDevice)); GlobalWrapper gw = GC_Init(width, height, d_data_positive, d_data_negative, 50); int * label = (int *) malloc(sizeof(int) * width * height); GC_Optimize(gw, label); /*for(int i = 0 ; i < height ; ++i) { for(int j = 0 ; j < width ; ++j) cout << label[i*width+j] << " "; cout << endl; }*/ free(label); CUDA_SAFE_CALL(cudaFree(d_data_positive)); CUDA_SAFE_CALL(cudaFree(d_data_negative)); GC_End(&gw); return 0; }
fd1e8e73fd5de2d339b246d70a1a7c5e63864a4b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/lamda_softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void LamdaSoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const Dtype lamda_, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); Dtype w = (label_value == 0) ? lamda_ : (1-lamda_); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -w * log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void LamdaSoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LamdaSoftmaxLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, label, loss_data, lamda_, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void LamdaSoftmaxLossBackwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const Dtype lamda_, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); Dtype w = (label_value == 0) ? lamda_ : (1-lamda_); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; for (int k = 0; k < channels; ++k) { bottom_diff[n * dim + k * spatial_dim + s] *= w; } } } } template <typename Dtype> void LamdaSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LamdaSoftmaxLossBackwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(nthreads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, nthreads, prob_data, top_data, label, bottom_diff, lamda_, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(LamdaSoftmaxWithLossLayer); } // namespace caffe
fd1e8e73fd5de2d339b246d70a1a7c5e63864a4b.cu
#include <algorithm> #include <cfloat> #include <vector> #include "caffe/layers/lamda_softmax_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void LamdaSoftmaxLossForwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* label, Dtype* loss, const Dtype lamda_, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); Dtype w = (label_value == 0) ? lamda_ : (1-lamda_); if (has_ignore_label_ && label_value == ignore_label_) { loss[index] = 0; counts[index] = 0; } else { loss[index] = -w * log(max(prob_data[n * dim + label_value * spatial_dim + s], Dtype(FLT_MIN))); counts[index] = 1; } } } template <typename Dtype> void LamdaSoftmaxWithLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { softmax_layer_->Forward(softmax_bottom_vec_, softmax_top_vec_); const Dtype* prob_data = prob_.gpu_data(); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is not used for anything until it is overwritten // on the backward pass, we use it here to avoid having to allocate new GPU // memory to accumulate intermediate results in the kernel. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); // Similarly, this memory is never used elsewhere, and thus we can use it // to avoid having to allocate additional GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) LamdaSoftmaxLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, label, loss_data, lamda_, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype loss; caffe_gpu_asum(nthreads, loss_data, &loss); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } top[0]->mutable_cpu_data()[0] = loss / get_normalizer(normalization_, valid_count); if (top.size() == 2) { top[1]->ShareData(prob_); } } template <typename Dtype> __global__ void LamdaSoftmaxLossBackwardGPU(const int nthreads, const Dtype* prob_data, const Dtype* top, const Dtype* label, Dtype* bottom_diff, const Dtype lamda_, const int num, const int dim, const int spatial_dim, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { const int channels = dim / spatial_dim; CUDA_KERNEL_LOOP(index, nthreads) { const int n = index / spatial_dim; const int s = index % spatial_dim; const int label_value = static_cast<int>(label[n * spatial_dim + s]); Dtype w = (label_value == 0) ? lamda_ : (1-lamda_); if (has_ignore_label_ && label_value == ignore_label_) { for (int c = 0; c < channels; ++c) { bottom_diff[n * dim + c * spatial_dim + s] = 0; } counts[index] = 0; } else { bottom_diff[n * dim + label_value * spatial_dim + s] -= 1; counts[index] = 1; for (int k = 0; k < channels; ++k) { bottom_diff[n * dim + k * spatial_dim + s] *= w; } } } } template <typename Dtype> void LamdaSoftmaxWithLossLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const Dtype* prob_data = prob_.gpu_data(); const Dtype* top_data = top[0]->gpu_data(); caffe_gpu_memcpy(prob_.count() * sizeof(Dtype), prob_data, bottom_diff); const Dtype* label = bottom[1]->gpu_data(); const int dim = prob_.count() / outer_num_; const int nthreads = outer_num_ * inner_num_; // Since this memory is never used for anything else, // we use to to avoid allocating new GPU memory. Dtype* counts = prob_.mutable_gpu_diff(); // NOLINT_NEXT_LINE(whitespace/operators) LamdaSoftmaxLossBackwardGPU<Dtype><<<CAFFE_GET_BLOCKS(nthreads), CAFFE_CUDA_NUM_THREADS>>>(nthreads, prob_data, top_data, label, bottom_diff, lamda_, outer_num_, dim, inner_num_, has_ignore_label_, ignore_label_, counts); Dtype valid_count = -1; // Only launch another CUDA kernel if we actually need the count of valid // outputs. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(nthreads, counts, &valid_count); } const Dtype loss_weight = top[0]->cpu_diff()[0] / get_normalizer(normalization_, valid_count); caffe_gpu_scal(prob_.count(), loss_weight , bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(LamdaSoftmaxWithLossLayer); } // namespace caffe
21805a4955060e597097dae91e81301e2bc1a9d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Lab 5, image filters with CUDA. // Compile with a command-line similar to Lab 4: // nvcc filter.cu -c -arch=sm_30 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter // or (multicore lab) // nvcc filter.cu -c -arch=sm_20 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter // 2017-11-27: Early pre-release, dubbed "beta". // 2017-12-03: First official version! Brand new lab 5 based on the old lab 6. // Better variable names, better prepared for some lab tasks. More changes may come // but I call this version 1.0b2. // 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages // that allocated too much memory. b3 // 2017-12-04: More fixes: Tightened up the kernel with edge clamping. // Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4 #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #ifdef __APPLE__ #include <GLUT/glut.h> #include <OpenGL/gl.h> #else #include <GL/glut.h> #endif #include "readppm.h" #include "milli.h" // Use these for setting shared memory size. #define maxKernelSizeX 32 #define maxKernelSizeY 32 //#define median #define separable //#define gaussian __device__ int find_median(int *data, int numelements) { int sum = 0; int iter = 0; int midvalue = (int)((numelements+1)/2); // numelements will always be odd while(sum < midvalue) { sum += data[iter++]; } return iter; } __global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey) { // // Load image to shared memory // // map from blockIdx to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; // allocate memory requred for maximum kernel size const int maxmemsizex = 2 * maxKernelSizeX + 1; const int maxmemsizey = 2 * maxKernelSizeY + 1; __shared__ unsigned char thisBlock[maxmemsizex * maxmemsizey * 3]; // define the block that should be loaded to shared memory // TODO Shoud memstartx and medendx be multiplied with 3? int memstartx = max(0, (int)(blockIdx.x*blockDim.x) - kernelsizex); int memstarty = max(0, (int)(blockIdx.y*blockDim.y) - kernelsizey); int memendx = min(imagesizex-1, memstartx + (int)blockDim.x + 2*kernelsizex - 1); int memendy = min(imagesizey-1, memstarty + (int)blockDim.y + 2*kernelsizey - 1); // how much memory should each tread load int memloadsize = (memendx - memstartx + 1) * (memendy - memstarty + 1); int blocksize = blockDim.x * blockDim.y; int memperthread = (int)(memloadsize/(blocksize)); int memsizex = memendx - memstartx + 1; // load image pixels to shared memory for(int i = 0; i <= memperthread; i++) { // Memory image coordinates (in pixels, without rgb) int mem_idx = (threadIdx.x + threadIdx.y *memsizex + i*blocksize); int mem_x = mem_idx % memsizex; int mem_y = (int)((mem_idx - mem_x) / memsizex); // Change mem_idx to work with rgb mem_idx *= 3; // Corresponding index in image data int img_x = memstartx + mem_x; int img_y = memstarty + mem_y; int img_idx = 3 * (img_x + img_y * imagesizex); if(mem_idx <= 3*memloadsize) { // r, g, b thisBlock[mem_idx] = image[img_idx]; thisBlock[mem_idx+1] = image[img_idx+1]; thisBlock[mem_idx+2] = image[img_idx+2]; } } __syncthreads(); // // Apply filter to image (curently not with shared memory) // int dy, dx; unsigned int sumx, sumy, sumz; int divby = (2*kernelsizex+1)*(2*kernelsizey+1); // Works for box filters only! // x and y in shared memory coordinates int memx = x - memstartx; int memy = y - memstarty; #ifdef gaussian int gaussweights[] = {1, 4, 6, 4, 1}; divby = 16; #endif if (x < imagesizex && y < imagesizey) // If inside image { // Filter kernel (simple box filter) #ifdef median int histogram_x[256]; int histogram_y[256]; int histogram_z[256]; int u; for(u = 0; u < 256; u++) { histogram_x[u] = 0; histogram_y[u] = 0; histogram_z[u] = 0; } #endif sumx=0;sumy=0;sumz=0; for(dy=-kernelsizey;dy<=kernelsizey;dy++) for(dx=-kernelsizex;dx<=kernelsizex;dx++) { // Use max and min to avoid branching! int yy = min(max(memy+dy, 0), memendy); int xx = min(max(memx+dx, 0), memendx); int idx = 3* (xx + memsizex*yy); #ifdef gaussian // dx or dy will always be == 0 when using separable filter int weight = gaussweights[dx+dy+2]; sumx += weight * thisBlock[idx]; sumy += weight * thisBlock[idx+1]; sumz += weight * thisBlock[idx+2]; #elif defined(median) histogram_x[(int)(thisBlock[idx])] += 1; histogram_y[(int)(thisBlock[idx+1])] += 1; histogram_z[(int)(thisBlock[idx+2])] += 1; #else sumx += thisBlock[idx]; sumy += thisBlock[idx+1]; sumz += thisBlock[idx+2]; #endif } #ifdef median out[(y*imagesizex+x)*3+0] = find_median(histogram_x, divby); out[(y*imagesizex+x)*3+1] = find_median(histogram_y, divby); out[(y*imagesizex+x)*3+2] = find_median(histogram_z, divby); #else out[(y*imagesizex+x)*3+0] = sumx/divby; out[(y*imagesizex+x)*3+1] = sumy/divby; out[(y*imagesizex+x)*3+2] = sumz/divby; #endif } } // Global variables for image data unsigned char *image, *pixels, *dev_bitmap, *dev_input, *dev_temp; unsigned int imagesizey, imagesizex; // Image size //////////////////////////////////////////////////////////////////////////////// // main computation function //////////////////////////////////////////////////////////////////////////////// void computeImages(int kernelsizex, int kernelsizey) { if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY) { printf("Kernel size out of bounds!\n"); return; } hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); int blocksize = 4; pixels = (unsigned char *) malloc(imagesizex*imagesizey*3); hipMalloc( (void**)&dev_input, imagesizex*imagesizey*3); hipMemcpy( dev_input, image, imagesizey*imagesizex*3, hipMemcpyHostToDevice ); hipMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3); hipMalloc( (void**)&dev_temp, imagesizex*imagesizey*3); hipEventRecord(start); hipEventSynchronize(start); #ifdef separable dim3 grid1(imagesizex/(blocksize), imagesizey); dim3 grid2(imagesizex*3, imagesizey/blocksize); dim3 blockGrid1(blocksize, 1); dim3 blockGrid2(3*1, blocksize); hipLaunchKernelGGL(( filter), dim3(grid1), dim3(blockGrid1), 0, 0, dev_input, dev_temp, imagesizex, imagesizey, kernelsizex, 0); // Awful load balance hipLaunchKernelGGL(( filter), dim3(grid2), dim3(blockGrid2), 0, 0, dev_temp, dev_bitmap, imagesizex, imagesizey, 0, kernelsizey); // Awful load balance #else dim3 grid(imagesizex * 3/(blocksize), imagesizey/blocksize); dim3 blockGrid(3*blocksize, blocksize); hipLaunchKernelGGL(( filter), dim3(grid), dim3(blockGrid), 0, 0, dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // Awful load balance #endif hipDeviceSynchronize(); hipEventRecord(stop); hipEventSynchronize(stop); // Check for errors! hipError_t err = hipGetLastError(); if (err != hipSuccess) printf("Error: %s\n", hipGetErrorString(err)); hipMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, hipMemcpyDeviceToHost ); float time_taken = 0; hipEventElapsedTime(&time_taken, start, stop); printf("Time taken time: %f ms\n", time_taken); hipFree( dev_bitmap ); hipFree( dev_input ); } // Display images void Draw() { // Dump the whole picture onto the screen. glClearColor( 0.0, 0.0, 0.0, 1.0 ); glClear( GL_COLOR_BUFFER_BIT ); if (imagesizey >= imagesizex) { // Not wide - probably square. Original left, result right. glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); glRasterPos2i(0, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels); } else { // Wide image! Original on top, result below. glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels ); glRasterPos2i(-1, 0); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); } glFlush(); } // Main program, inits int main( int argc, char** argv) { glutInit(&argc, argv); glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA ); if (argc > 1) image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey); else image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey); if (imagesizey >= imagesizex) glutInitWindowSize( imagesizex*2, imagesizey ); else glutInitWindowSize( imagesizex, imagesizey*2 ); glutCreateWindow("Lab 5"); glutDisplayFunc(Draw); ResetMilli(); computeImages(2, 2); // You can save the result to a file like this: writeppm("out.ppm", imagesizey, imagesizex, pixels); printf("finish\n"); glutMainLoop(); return 0; }
21805a4955060e597097dae91e81301e2bc1a9d7.cu
// Lab 5, image filters with CUDA. // Compile with a command-line similar to Lab 4: // nvcc filter.cu -c -arch=sm_30 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -lcudart -L/usr/local/cuda/lib -lglut -o filter // or (multicore lab) // nvcc filter.cu -c -arch=sm_20 -o filter.o // g++ filter.o milli.c readppm.c -lGL -lm -lcuda -L/usr/local/cuda/lib64 -lcudart -lglut -o filter // 2017-11-27: Early pre-release, dubbed "beta". // 2017-12-03: First official version! Brand new lab 5 based on the old lab 6. // Better variable names, better prepared for some lab tasks. More changes may come // but I call this version 1.0b2. // 2017-12-04: Two fixes: Added command-lines (above), fixed a bug in computeImages // that allocated too much memory. b3 // 2017-12-04: More fixes: Tightened up the kernel with edge clamping. // Less code, nicer result (no borders). Cleaned up some messed up X and Y. b4 #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #ifdef __APPLE__ #include <GLUT/glut.h> #include <OpenGL/gl.h> #else #include <GL/glut.h> #endif #include "readppm.h" #include "milli.h" // Use these for setting shared memory size. #define maxKernelSizeX 32 #define maxKernelSizeY 32 //#define median #define separable //#define gaussian __device__ int find_median(int *data, int numelements) { int sum = 0; int iter = 0; int midvalue = (int)((numelements+1)/2); // numelements will always be odd while(sum < midvalue) { sum += data[iter++]; } return iter; } __global__ void filter(unsigned char *image, unsigned char *out, const unsigned int imagesizex, const unsigned int imagesizey, const int kernelsizex, const int kernelsizey) { // // Load image to shared memory // // map from blockIdx to pixel position int x = blockIdx.x * blockDim.x + threadIdx.x; int y = blockIdx.y * blockDim.y + threadIdx.y; // allocate memory requred for maximum kernel size const int maxmemsizex = 2 * maxKernelSizeX + 1; const int maxmemsizey = 2 * maxKernelSizeY + 1; __shared__ unsigned char thisBlock[maxmemsizex * maxmemsizey * 3]; // define the block that should be loaded to shared memory // TODO Shoud memstartx and medendx be multiplied with 3? int memstartx = max(0, (int)(blockIdx.x*blockDim.x) - kernelsizex); int memstarty = max(0, (int)(blockIdx.y*blockDim.y) - kernelsizey); int memendx = min(imagesizex-1, memstartx + (int)blockDim.x + 2*kernelsizex - 1); int memendy = min(imagesizey-1, memstarty + (int)blockDim.y + 2*kernelsizey - 1); // how much memory should each tread load int memloadsize = (memendx - memstartx + 1) * (memendy - memstarty + 1); int blocksize = blockDim.x * blockDim.y; int memperthread = (int)(memloadsize/(blocksize)); int memsizex = memendx - memstartx + 1; // load image pixels to shared memory for(int i = 0; i <= memperthread; i++) { // Memory image coordinates (in pixels, without rgb) int mem_idx = (threadIdx.x + threadIdx.y *memsizex + i*blocksize); int mem_x = mem_idx % memsizex; int mem_y = (int)((mem_idx - mem_x) / memsizex); // Change mem_idx to work with rgb mem_idx *= 3; // Corresponding index in image data int img_x = memstartx + mem_x; int img_y = memstarty + mem_y; int img_idx = 3 * (img_x + img_y * imagesizex); if(mem_idx <= 3*memloadsize) { // r, g, b thisBlock[mem_idx] = image[img_idx]; thisBlock[mem_idx+1] = image[img_idx+1]; thisBlock[mem_idx+2] = image[img_idx+2]; } } __syncthreads(); // // Apply filter to image (curently not with shared memory) // int dy, dx; unsigned int sumx, sumy, sumz; int divby = (2*kernelsizex+1)*(2*kernelsizey+1); // Works for box filters only! // x and y in shared memory coordinates int memx = x - memstartx; int memy = y - memstarty; #ifdef gaussian int gaussweights[] = {1, 4, 6, 4, 1}; divby = 16; #endif if (x < imagesizex && y < imagesizey) // If inside image { // Filter kernel (simple box filter) #ifdef median int histogram_x[256]; int histogram_y[256]; int histogram_z[256]; int u; for(u = 0; u < 256; u++) { histogram_x[u] = 0; histogram_y[u] = 0; histogram_z[u] = 0; } #endif sumx=0;sumy=0;sumz=0; for(dy=-kernelsizey;dy<=kernelsizey;dy++) for(dx=-kernelsizex;dx<=kernelsizex;dx++) { // Use max and min to avoid branching! int yy = min(max(memy+dy, 0), memendy); int xx = min(max(memx+dx, 0), memendx); int idx = 3* (xx + memsizex*yy); #ifdef gaussian // dx or dy will always be == 0 when using separable filter int weight = gaussweights[dx+dy+2]; sumx += weight * thisBlock[idx]; sumy += weight * thisBlock[idx+1]; sumz += weight * thisBlock[idx+2]; #elif defined(median) histogram_x[(int)(thisBlock[idx])] += 1; histogram_y[(int)(thisBlock[idx+1])] += 1; histogram_z[(int)(thisBlock[idx+2])] += 1; #else sumx += thisBlock[idx]; sumy += thisBlock[idx+1]; sumz += thisBlock[idx+2]; #endif } #ifdef median out[(y*imagesizex+x)*3+0] = find_median(histogram_x, divby); out[(y*imagesizex+x)*3+1] = find_median(histogram_y, divby); out[(y*imagesizex+x)*3+2] = find_median(histogram_z, divby); #else out[(y*imagesizex+x)*3+0] = sumx/divby; out[(y*imagesizex+x)*3+1] = sumy/divby; out[(y*imagesizex+x)*3+2] = sumz/divby; #endif } } // Global variables for image data unsigned char *image, *pixels, *dev_bitmap, *dev_input, *dev_temp; unsigned int imagesizey, imagesizex; // Image size //////////////////////////////////////////////////////////////////////////////// // main computation function //////////////////////////////////////////////////////////////////////////////// void computeImages(int kernelsizex, int kernelsizey) { if (kernelsizex > maxKernelSizeX || kernelsizey > maxKernelSizeY) { printf("Kernel size out of bounds!\n"); return; } cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); int blocksize = 4; pixels = (unsigned char *) malloc(imagesizex*imagesizey*3); cudaMalloc( (void**)&dev_input, imagesizex*imagesizey*3); cudaMemcpy( dev_input, image, imagesizey*imagesizex*3, cudaMemcpyHostToDevice ); cudaMalloc( (void**)&dev_bitmap, imagesizex*imagesizey*3); cudaMalloc( (void**)&dev_temp, imagesizex*imagesizey*3); cudaEventRecord(start); cudaEventSynchronize(start); #ifdef separable dim3 grid1(imagesizex/(blocksize), imagesizey); dim3 grid2(imagesizex*3, imagesizey/blocksize); dim3 blockGrid1(blocksize, 1); dim3 blockGrid2(3*1, blocksize); filter<<<grid1, blockGrid1>>>(dev_input, dev_temp, imagesizex, imagesizey, kernelsizex, 0); // Awful load balance filter<<<grid2, blockGrid2>>>(dev_temp, dev_bitmap, imagesizex, imagesizey, 0, kernelsizey); // Awful load balance #else dim3 grid(imagesizex * 3/(blocksize), imagesizey/blocksize); dim3 blockGrid(3*blocksize, blocksize); filter<<<grid, blockGrid>>>(dev_input, dev_bitmap, imagesizex, imagesizey, kernelsizex, kernelsizey); // Awful load balance #endif cudaThreadSynchronize(); cudaEventRecord(stop); cudaEventSynchronize(stop); // Check for errors! cudaError_t err = cudaGetLastError(); if (err != cudaSuccess) printf("Error: %s\n", cudaGetErrorString(err)); cudaMemcpy( pixels, dev_bitmap, imagesizey*imagesizex*3, cudaMemcpyDeviceToHost ); float time_taken = 0; cudaEventElapsedTime(&time_taken, start, stop); printf("Time taken time: %f ms\n", time_taken); cudaFree( dev_bitmap ); cudaFree( dev_input ); } // Display images void Draw() { // Dump the whole picture onto the screen. glClearColor( 0.0, 0.0, 0.0, 1.0 ); glClear( GL_COLOR_BUFFER_BIT ); if (imagesizey >= imagesizex) { // Not wide - probably square. Original left, result right. glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); glRasterPos2i(0, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels); } else { // Wide image! Original on top, result below. glRasterPos2f(-1, -1); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, pixels ); glRasterPos2i(-1, 0); glDrawPixels( imagesizex, imagesizey, GL_RGB, GL_UNSIGNED_BYTE, image ); } glFlush(); } // Main program, inits int main( int argc, char** argv) { glutInit(&argc, argv); glutInitDisplayMode( GLUT_SINGLE | GLUT_RGBA ); if (argc > 1) image = readppm(argv[1], (int *)&imagesizex, (int *)&imagesizey); else image = readppm((char *)"maskros512.ppm", (int *)&imagesizex, (int *)&imagesizey); if (imagesizey >= imagesizex) glutInitWindowSize( imagesizex*2, imagesizey ); else glutInitWindowSize( imagesizex, imagesizey*2 ); glutCreateWindow("Lab 5"); glutDisplayFunc(Draw); ResetMilli(); computeImages(2, 2); // You can save the result to a file like this: writeppm("out.ppm", imagesizey, imagesizex, pixels); printf("finish\n"); glutMainLoop(); return 0; }
ead0023d625cb2dca44f491778e46f59f95243cd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013-2015 Guoguo Chen // 2016-2017 Shiyin Kang // 2017 Hossein Hadian, Daniel Galvez // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include <limits> #include <math_constants.h> #include "cudamatrix/cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while (nTotalThreads > 1) { int32_cuda halfPoint = ((1 + nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if (threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = j * mat_dim.stride + i, index2 = j * mat2_row_stride + i * mat2_col_stride; if (i < mat_dim.cols && j < mat_dim.rows) { mat[index] = alpha * vec[j] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dmat.cols && j < dmat.rows) { int32_cuda index_B = (j * (j + 1) / 2) + i; int32_cuda index_A = j * dmat.stride + i; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { // we interpret these indexes oppositely from normal, but it doesn't // matter as it's invoked in a symmetric way. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j + 1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index. int32_cuda index_out = i + j * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.cols && j < d_out.rows) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<int TileDim, typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { // Use shared meme to achieve both coalesced memory reading and writing // '+1' to avoid bank conflict when reading sbuf __shared__ Real sbuf[TileDim][TileDim + 1]; const int32_cuda i_in = blockIdx.y * TileDim + threadIdx.y; // row-index const int32_cuda j_in = blockIdx.x * TileDim + threadIdx.x; // col-index const int32_cuda tile_stride_in = CU1DBLOCK / TileDim * d_in.stride; int32_cuda index_in = i_in * d_in.stride + j_in; # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_in + i < d_in.rows && j_in < d_in.cols) { sbuf[threadIdx.y + i][threadIdx.x] = static_cast<Real>(mat_in[index_in]); } index_in += tile_stride_in; } __syncthreads(); // Grid is transposed, but block is not yet. // Warp (blockDim.x) is always along the row-dim. const int32_cuda i_out = blockIdx.x * TileDim + threadIdx.y; const int32_cuda j_out = blockIdx.y * TileDim + threadIdx.x; const int32_cuda tile_stride_out = CU1DBLOCK / TileDim * d_out.stride; int32_cuda index_out = i_out * d_out.stride + j_out; # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_out + i < d_out.rows && j_out < d_out.cols) { // block is tranposed when reading sbuf mat_out[index_out] = sbuf[threadIdx.x][threadIdx.y + i]; } index_out += tile_stride_out; } } // Copy from CSR sparse matrix to dense matrix // // We use warpSize threads per row to access only the nnz elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows. template<typename Real, typename OtherReal> __global__ static void _copy_from_smat(Real* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const OtherReal* smat_val) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx if (i < mat_dim.rows) { const int nz_start = smat_row_ptr[i]; const int nz_end = smat_row_ptr[i + 1]; for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id += warpSize) { const int j = smat_col_idx[nz_id]; // col idx mat[i * mat_dim.stride + j] = static_cast<Real>(smat_val[nz_id]); } } } /// Select a subset of the rows of a CSR SparseMatrix. /// Sets 'out' to only the rows of 'in' that are listed /// in 'row_indexes'. 'row_indexes' must be sorted and unique, /// and satisfy 0 <= row_indexes[i] < in.size(). /// /// Note: 'out_row_ptr' is an input parameter that is calculated before /// calling this kernel function /// /// We use warpSize threads per row to access only the nnz elements. /// Every CU1DBLOCK/warpSize rows share one thread block. /// 1D grid to cover all selected rows. template<typename Real> __global__ static void _select_rows(const int* out_row_ptr, int* out_col_idx, Real* out_val, const int* row_indexes, const int num_selected_rows, const int* in_row_ptr, const int* in_col_idx, const Real* in_val) { const int out_i = blockIdx.x * blockDim.y + threadIdx.y; // out row idx if (out_i < num_selected_rows) { const int in_i = row_indexes[out_i]; const int in_row_start = in_row_ptr[in_i]; const int out_row_start = out_row_ptr[out_i]; const int row_length = in_row_ptr[in_i + 1] - in_row_start; for (int k = threadIdx.x; k < row_length; k += warpSize) { const int in_n = in_row_start + k; const int out_n = out_row_start + k; out_col_idx[out_n] = in_col_idx[in_n]; out_val[out_n] = in_val[in_n]; } } } // mat += alpha * smat // // We use warpSize threads per row to access only the nonzero elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows of smat. template<typename Real> __global__ static void _add_smat(Real* mat, MatrixDim mat_dim, Real alpha, const int* smat_row_ptr, const int* smat_col_idx, const Real* smat_val) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx if (i < mat_dim.rows) { const int row_start = smat_row_ptr[i]; const int row_end = smat_row_ptr[i + 1]; for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) { const int j = smat_col_idx[n]; // col idx of smat mat[i * mat_dim.stride + j] += alpha * smat_val[n]; } } } // mat += alpha * smat^T // // We use warpSize threads per row to access only the nonzero elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows of smat. template<typename Real> __global__ static void _add_smat_trans(Real* mat, MatrixDim mat_dim, Real alpha, const int* smat_row_ptr, const int* smat_col_idx, const Real* smat_val) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx if (i < mat_dim.cols) { const int row_start = smat_row_ptr[i]; const int row_end = smat_row_ptr[i + 1]; for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) { const int j = smat_col_idx[n]; // col idx of smat mat[j * mat_dim.stride + i] += alpha * smat_val[n]; } } } /// For each element x of the matrix, set it to /// (x < 0 ? exp(x) : x + 1). /// Use block/grid sizes for simple matrix ops template<typename T> __global__ static void _apply_exp_special(T* out, MatrixDim out_dim, const T* in, int in_stride) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < out_dim.rows && j < out_dim.cols) { T x = in[i * in_stride + j]; if (x < T(0)) { out[i * out_dim.stride + j] = exp(x); } else { out[i * out_dim.stride + j] = x + T(1); } } } /// Fill the array 'data' with the sequence [base ... base + length) /// Use 1D block and 1D grid template<typename T> __global__ static void _sequence(T* data, int length, T base) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < length) { data[i] = base + T(i); } } // Copy from CSR sparse matrix to transposed dense matrix // // We use warpSize threads per row to access only the nnz elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows. template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_trans(Real* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const OtherReal* smat_val) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat if (i < mat_dim.cols) { const int nz_start = smat_row_ptr[i]; const int nz_end = smat_row_ptr[i + 1]; for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id += warpSize) { const int j = smat_col_idx[nz_id]; // col idx of smat mat[j * mat_dim.stride + i] = static_cast<Real>(smat_val[nz_id]); } } } // First stage of trace(mat * smat^T) // We use warpSize threads per row to access only the nnz elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows of smat. template<typename Real> __global__ static void _trace_mat_smat_trans(const Real* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const Real* smat_val, Real* trace_vec) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat if (i < mat_dim.rows) { const int nz_start = smat_row_ptr[i]; const int nz_end = smat_row_ptr[i + 1]; for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id += warpSize) { const int j = smat_col_idx[nz_id]; // col idx of smat trace_vec[nz_id] = mat[i * mat_dim.stride + j] * smat_val[nz_id]; } } } // First stage of trace(mat * smat) // We use warpSize threads per row to access only the nnz elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows of smat. template<typename Real> __global__ static void _trace_mat_smat(const Real* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const Real* smat_val, Real* trace_vec) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat if (i < mat_dim.cols) { const int nz_start = smat_row_ptr[i]; const int nz_end = smat_row_ptr[i + 1]; for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id += warpSize) { const int j = smat_col_idx[nz_id]; // col idx of smat trace_vec[nz_id] = mat[j * mat_dim.stride + i] * smat_val[nz_id]; } } } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i + 1) * (i + 2) / 2) - 1; if (i < dim) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i * d.stride; if (i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i + 1) * (i + 2) / 2) - 1; if (i < dim) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i + 1) * (i + 2) / 2) - 1; if (i < dim) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] / A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride; if (i < dst_d.cols && j < dst_d.rows) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = fmax(a, b); } } template<typename Real> __global__ static void _min(Real* mat, const Real* other, MatrixDim mat_d, int other_stride) { int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda mat_index = i * mat_d.stride + j; int32_cuda other_index = i * other_stride + j; if (j < mat_d.cols && i < mat_d.rows) { Real a = mat[mat_index], b = other[other_index]; mat[mat_index] = fmin(a, b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } template<typename Real> __global__ void _diff_group_pnorm(Real *id, const Real *iv, const Real *ov, const Real* od, MatrixDim id_dim, int iv_stride, int ov_stride, int od_stride, int group_size, Real power) { const int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < id_dim.cols) { const int grid_stride = gridDim.y * blockDim.y; const int src_j = j / group_size; int i = blockIdx.y * blockDim.y + threadIdx.y; for (; i < id_dim.rows; i += grid_stride) { const int iv_index = j + i * iv_stride; Real iv_ij = iv[iv_index]; Real ans; if (power == Real(2)) { const int ov_index = src_j + i * ov_stride; Real ov_ij = ov[ov_index]; ans = ov_ij <= 0.0 ? 0.0 : iv_ij / ov_ij; } else if (power == Real(1)) { Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1); ans = (iv_ij == Real(0) ? 0.0 : iv_ij_sign); } else if (power == (sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF)) { const int ov_index = src_j + i * ov_stride; Real ov_ij = ov[ov_index]; Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1); ans = ov_ij <= 0.0 ? 0.0 : (iv_ij_sign * (abs(iv_ij) == ov_ij ? 1.0 : 0.0)); } else { const int ov_index = src_j + i * ov_stride; Real ov_ij = ov[ov_index]; Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1); if (ov_ij <= 0.0) { ans = 0.0; // The derivative is either 0 or undefined at the origin. } else { ans = iv_ij_sign * pow(std::abs(iv_ij), power - 1) * pow(ov_ij, 1 - power); } } const int od_index = src_j + i * od_stride; const int id_index = j + i * id_dim.stride; id[id_index] = ans * od[od_index]; } } } /// deriv is the derivative we will output; vec is the input we're computing /// the group max on, "maxv" is the previously computed group max. template<typename Real> __global__ static void _calc_group_max_deriv(Real *deriv, const Real *vec, const Real *maxv, MatrixDim deriv_dim, int vec_stride, int maxv_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < deriv_dim.rows && i < deriv_dim.cols) { int deriv_index = i + j * deriv_dim.stride; int vec_index = i + j * vec_stride; int maxv_index = i / group_size + j * maxv_stride; Real vec_element = vec[vec_index], // The element of the original vector. max_element = maxv[maxv_index]; // this is the max value Real ans = (max_element == vec_element ? 1.0 : 0.0); deriv[deriv_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { const int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; if (i < d.rows) { const int32_cuda start = i * d.stride; const Real scale = Real(1) / vec_div[i]; const int32_cuda grid_stride = blockDim.x * gridDim.x; for (int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; j < d.cols; j += grid_stride) { mat[start + j] *= scale; } } } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha * src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha * src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_blocks(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.rows + q * d.cols] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_repeated(Real alpha, const Real* src, MatrixDim src_dim, Real* dst, MatrixDim dst_dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda src_i = i % src_dim.cols, src_j = j % src_dim.rows, dst_index = i + j * dst_dim.stride, src_index = src_i + src_j * src_dim.stride; if (i < dst_dim.cols && j < dst_dim.rows) dst[dst_index] += alpha * src[src_index]; } template<typename Real> __global__ static void _add_mat_blocks_trans(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.cols + q * d.rows] + dst[index]; } } } template<typename Real> __global__ static void _set_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride, a_index = i + j * stride_a, b_index = i + j * stride_b, c_index = i + j * stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, this // function does S = beta S + alpha * L^T L. This is used in PSD matrix // inversion. The i index is the row of the destination S and the j the column // (although of course the output is symmetric so it doesn't matter in a sense). // The main point of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha * col[j] + beta * dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha * row[i] + beta * dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * dmat.stride; int32_cuda index2 = i + j * dmask.stride; if (i < dmat.cols && j < dmat.rows) if (mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * mat_dim.stride, index2 = i * mat2_col_stride + j * mat2_row_stride; if (j < mat_dim.rows && i < mat_dim.cols) mat[index] = alpha * mat2[index2] * vec[i] + beta * mat[index]; } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j * dim.stride; int32_cuda srcA_index = i + j * srcA_stride; int32_cuda srcB_index = i + j * srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index]; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) { Real ratio = a[i] / param_3; if ((ratio < 0.0) || (ratio >= 1.01)) { *flag = 1; return; } if (ratio < param_1) { Real factor = ((param_1 / ratio) > param_2) ? param_2 : (param_1 / ratio); v[i] = v[i] / factor; } else if (ratio > param_1) { Real factor = ((ratio / param_1) > param_2) ? param_2 : (ratio / param_1); v[i] = v[i] * factor; } } } template<typename Real, typename OtherReal> __global__ static void _cublas_copy_kaldi(int n, const Real* x, int incx, OtherReal* y, int incy) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { y[i * incy] = static_cast<OtherReal>(x[i * incx]); } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index. int j = blockIdx.y * blockDim.y + threadIdx.y; // row index. if (i < d.cols && j < d.rows) { int index = i + j * d.stride; m_out[index] = v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each col of the matrix // "m_out". the dimension of v_in should be equal to the #row of m_out. template<typename Real> __global__ static void _copy_cols_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.y * blockDim.y + threadIdx.y; // row id int j = blockIdx.x * blockDim.x + threadIdx.x; // col id if (i < d.rows && j < d.cols) { m_out[i * d.stride + j] = v_in[i]; } } // _trace_mat_mat reduce the partial sum to // value[blockIdx.y * gridDim.x + blockIdx.x] // It use shared mem to transpose matrix B to ensure coalesced memory access template<int TileDim, typename Real> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { // Reuse shared mem and make indexing easier. "+1" to avoid bank conflict __shared__ union { Real trans[TileDim][TileDim + 1]; Real sum[CU1DBLOCK]; } smem; // linear thread id; const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x; const int32_cuda grid_height = gridDim.y * TileDim; const int32_cuda ja = blockIdx.x * TileDim + threadIdx.x; const int32_cuda ib = blockIdx.x * TileDim + threadIdx.y; int32_cuda ia = blockIdx.y * TileDim + threadIdx.y; int32_cuda jb = blockIdx.y * TileDim + threadIdx.x; // Grid reduce Real tsum = Real(0); for (int32_cuda i0 = 0; i0 < dA.rows; i0 += grid_height) { // Load from B, transpose the block and store in shared mem if (jb < dA.rows) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (ib + i < dA.cols) { smem.trans[threadIdx.x][threadIdx.y + i] = B[(ib + i) * B_stride + jb]; } } } __syncthreads(); // Load from A, sum up the product. if (ja < dA.cols) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (ia + i < dA.rows) { tsum += A[(ia + i) * dA.stride + ja] * smem.trans[threadIdx.y + i][threadIdx.x]; } } } __syncthreads(); ia += grid_height; jb += grid_height; } smem.sum[tid] = tsum; __syncthreads(); // Block reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) smem.sum[tid] += smem.sum[tid + shift]; __syncthreads(); } // Warp reduce. Implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { smem.sum[tid] += smem.sum[tid + shift]; } } // output 1 sum per thread block if (tid == 0) { value[blockIdx.y * gridDim.x + blockIdx.x] = smem.sum[0]; } } // _trace_mat_mat_trans reduce the partial sum to // value[blockIdx.y * gridDim.x + blockIdx.x] template<typename Real> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { __shared__ Real ssum[CU1DBLOCK]; // linear thread id; const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x; const int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; const int32_cuda grid_height = gridDim.y * blockDim.y; int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; // Grid reduce Real tsum = Real(0); if (j < dA.cols) { while (i < dA.rows) { tsum += A[i * dA.stride + j] * B[i * B_stride + j]; i += grid_height; } } ssum[tid] = tsum; __syncthreads(); // Block reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) ssum[tid] += ssum[tid + shift]; __syncthreads(); } // Warp reduce. Implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // output 1 sum per thread block if (tid == 0) { value[blockIdx.y * gridDim.x + blockIdx.x] = ssum[0]; } } // v = alpha * diag(M * N^T) + beta * v template<typename Real> __global__ static void _add_diag_mat_mat_MNT(const Real alpha, const Real* M, const MatrixDim dim_M, const Real* N, const int stride_N, const Real beta, Real* v) { __shared__ Real ssum[CU1DBLOCK]; const int tid = threadIdx.x; const int i = blockIdx.x; const int m_start = i * dim_M.stride; const int n_start = i * stride_N; // Loop along the matrix row. Reduce to CU1DBLOCK elements per row. Real tsum = Real(0); for (int j = tid; j < dim_M.cols; j += CU1DBLOCK) { tsum += M[m_start + j] * N[n_start + j]; } ssum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize elements. # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) ssum[tid] += ssum[tid + shift]; __syncthreads(); } // Warp reduce to 1 element. Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // output 1 sum per thread block if (tid == 0) { v[i] = alpha * ssum[0] + beta * v[i]; } } // v = alpha * diag(M^T * N) + beta * v template<int TileDim, typename Real> __global__ static void _add_diag_mat_mat_MTN(const Real alpha, const Real* M, const int stride_M, const Real* N, const MatrixDim dim_N, const Real beta, Real* v) { __shared__ Real ssum[CU1DBLOCK]; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int j = blockIdx.x * blockDim.x + threadIdx.x; if (j >= dim_N.cols) return; // Loop along the matrix column. // Reduce to CU1DBLOCK / TileDim elements per column. Real tsum = Real(0); for (int i = threadIdx.y; i < dim_N.rows; i += blockDim.y) { tsum += M[i * stride_M + j] * N[i * dim_N.stride + j]; } ssum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize / TileDim elements per column. # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim; shift >>= 1) { if (tid < shift) { ssum[tid] += ssum[tid + shift]; } __syncthreads(); } // Warp reduce to 1 element per column. // Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift >= TileDim; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // output TileDim sums per thread block if (tid < TileDim) { v[j] = alpha * ssum[tid] + beta * v[j]; } } // v = alpha * diag(M * N) + beta * v template<int TileDim, typename Real> __global__ static void _add_diag_mat_mat_MN(const Real alpha, const Real* M, const int stride_M, const Real* N, const MatrixDim dim_N, const Real beta, Real* v) { // Reuse shared mem and make indexing easier. "+1" to avoid bank conflict __shared__ union { Real trans[TileDim][TileDim + 1]; Real sum[CU1DBLOCK]; } smem; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int i_m = blockIdx.x * TileDim + threadIdx.y; const int j_n = blockIdx.x * TileDim + threadIdx.x; int i_n = threadIdx.y; int j_m = threadIdx.x; // Loop along the matrix column. // Reduce to CU1DBLOCK / TileDim elements per column. Real tsum = Real(0); for (int block_i_n = 0; block_i_n < dim_N.rows; block_i_n += TileDim) { // Load, transpose and store M to shared mem. if (j_m < dim_N.rows) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_m + i < dim_N.cols) { smem.trans[threadIdx.x][threadIdx.y + i] = M[(i_m + i) * stride_M + j_m]; } } } __syncthreads(); // Load N, sum up the product. if (j_n < dim_N.cols) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_n + i < dim_N.rows) { tsum += N[(i_n + i) * dim_N.stride + j_n] * smem.trans[threadIdx.y + i][threadIdx.x]; } } } __syncthreads(); i_n += TileDim; j_m += TileDim; } smem.sum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize / TileDim elements per column. # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim; shift >>= 1) { if (tid < shift) { smem.sum[tid] += smem.sum[tid + shift]; } __syncthreads(); } // Warp reduce to 1 element per column. // Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift >= TileDim; shift >>= 1) { smem.sum[tid] += smem.sum[tid + shift]; } } // output TileDim sums per thread block if (tid < TileDim && j_n < dim_N.cols) { v[j_n] = alpha * smem.sum[tid] + beta * v[j_n]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if (i < threshold) { loop_start = i * (size + 1); loop_end = (i + 1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i + 1) * size; } for (int j = loop_start; j < loop_end; j++) { //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int m = (x + j)->row; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); int label = (x + j)->column; // *(Real*) ((size_t)x + j*(2*sizeof(int) + sizeof(Real)) + 2*sizeof(int)); Real weight = (x + j)->weight; tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); // there might be problems here.... *(z2 + m * d2.stride + label) += weight / this_prob; } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t + 1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_vector_copy_elements(Real *data, int dim, const Real *src_mat, int mat_stride, bool transpose, const MatrixIndexT_cuda* elements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= dim) return; int j = elements[i]; int mat_index; if (transpose) mat_index = i + j * mat_stride; else mat_index = j + i * mat_stride; data[i] = src_mat[mat_index]; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int num_elements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_elements) return; data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight; } template<typename Real> __global__ static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha, const Int32Pair* indices, const Real* x, int s, Real* data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= s) return; int data_i = indices[i].first * dim.stride + indices[i].second; data[data_i] += alpha * x[i]; } template<typename Real> __global__ static void _cuda_matrix_add_to_elements(Real alpha, Real* mat, MatrixDim dim, const MatrixIndexT_cuda* elements) { int row = blockIdx.x * blockDim.x + threadIdx.x; if (row < dim.rows) { int col = elements[row]; if (col >= 0) { int index = col + row * dim.stride; mat[index] += alpha; } } } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index_mat1 = i + j * mat1_dim.stride; int32_cuda index_mat2 = i + j * mat2_stride; int32_cuda index_mask = i + j * mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } enum EnumTransformReduce { SUMAB, SUM, MAX, MIN, LINFNORM, L2NORM, L1NORM, L0NORM, LPNORM }; template<EnumTransformReduce TransReduceType, typename Real> struct TransReduceOp { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return Real(0); } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return Real(0); } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return Real(0); } }; template<typename Real> struct TransReduceOp<SUMAB, Real> { const Real alpha_; const Real beta_; TransReduceOp(const Real& a, const Real& b) : alpha_(a), beta_(b) { } __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return x; } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { if (beta_ == Real(0)) { return alpha_ * x; } else { return alpha_ * x + beta_ * output; } } }; template<typename Real> struct TransReduceOp<SUM, Real> { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return x; } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<MAX, Real> { __forceinline__ __device__ Real InitValue() const { return sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF; } __forceinline__ __device__ Real Transform(const Real& x) const { return x; } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return fmax(a, b); } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<MIN, Real> { __forceinline__ __device__ Real InitValue() const { return sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF; } __forceinline__ __device__ Real Transform(const Real& x) const { return x; } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return min(a, b); } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<LINFNORM, Real> { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return abs(x); } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return fmax(a, b); } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<L2NORM, Real> { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return x * x; } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return sqrt(x); } }; template<typename Real> struct TransReduceOp<L1NORM, Real> { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return abs(x); } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<L0NORM, Real> { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return Real(x == Real(0) ? 0 : 1); } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<LPNORM, Real> { const Real power_; TransReduceOp(const Real& p) : power_(p) { } __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return pow(abs(x), power_); } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return pow(x, Real(1) / power_); } }; // Vector reduce. template<EnumTransformReduce TransReduceType, typename Real> __global__ static void _vec_transform_reduce( const Real* v, Real* result, const int dim, const int inc, const TransReduceOp<TransReduceType, Real> op) { __shared__ Real sdata[CU1DBLOCK]; Real tdata = op.InitValue(); const int tid = threadIdx.x; const int vec_len = dim * inc; const int grid_stride = gridDim.x * blockDim.x * inc; int i = (blockIdx.x * blockDim.x + tid) * inc; // Grid reduce. Loop over the whole vector v. for (; i < vec_len; i += grid_stride) { tdata = op.Reduce(tdata, op.Transform(v[i])); } sdata[tid] = tdata; __syncthreads(); // Tree reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]); } __syncthreads(); } // Reduce last warp. Threads implicitly synchronized within a warp. if (tid < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) { sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]); } } // Output to vector result. if (tid == 0) result[blockIdx.x] = op.PostReduce(sdata[0], result[blockIdx.x]); } // Reduce a matrix 'mat' to a column vector 'result' template<EnumTransformReduce TransReduceType, typename Real> __global__ static void _transform_reduce_mat_cols( Real *result, const Real *mat, const MatrixDim d, const TransReduceOp<TransReduceType, Real> op) { __shared__ Real sdata[CU1DBLOCK]; const int tid = threadIdx.x; const int i = blockIdx.x; const int row_start = i * d.stride; Real tdata = op.InitValue(); for (int j = tid; j < d.cols; j += CU1DBLOCK) { tdata = op.Reduce(tdata, op.Transform(mat[row_start + j])); } sdata[tid] = tdata; __syncthreads(); // Tree reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]); __syncthreads(); } // Reduce last warp. Threads implicitly synchronized within a warp. if (tid < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]); } // Output to vector result. if (tid == 0) { result[i] = op.PostReduce(sdata[0], result[i]); } } template<EnumTransformReduce TransReduceType, typename Real> __global__ static void _group_transform_reduce( Real *y, const Real *x, const MatrixDim d, const int src_stride, const int group_size, const TransReduceOp<TransReduceType, Real> op) { __shared__ Real sreduction[CU1DBLOCK]; const int i = blockIdx.x; const int x_start = i * src_stride; const int y_start = i * d.stride; const int threads_per_group = blockDim.x; // Reduce n groups per thread block const int n = blockDim.y; const int len = group_size * n; // linear thread id const int tid = threadIdx.y * threads_per_group + threadIdx.x; int j = threadIdx.y * group_size + threadIdx.x; // col-id of *x int group_id = threadIdx.y; // col-id of *y int group_end = x_start + (group_id + 1) * group_size; while (group_id < d.cols) { // reduce to threads_per_group elements per group int x_idx = x_start + j; Real treduction = op.Transform(x[x_idx]); x_idx += threads_per_group; while (x_idx < group_end) { treduction = op.Reduce(treduction, op.Transform(x[x_idx])); x_idx += threads_per_group; } sreduction[tid] = treduction; if (threads_per_group > warpSize) { __syncthreads(); } // tree-reduce to 2x warpSize elements per group # pragma unroll for (int shift = threads_per_group / 2; shift > warpSize; shift >>= 1) { if (threadIdx.x < shift) { sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]); } __syncthreads(); } // Warp-reduce to 1 element per group. // Threads implicitly synchronized within the warp. const int warp_reduce_size = threads_per_group / 2 < warpSize ? threads_per_group / 2 : warpSize; if (threadIdx.x < warp_reduce_size) { # pragma unroll for (int shift = warp_reduce_size; shift > 0; shift >>= 1) { sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]); } } // Store the result. if (threadIdx.x == 0) { y[y_start + group_id] = op.PostReduce(sreduction[tid], y[y_start + group_id]); } j += len; group_end += len; group_id += n; } } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) { if (v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) { if (v[i] > ceiling_val) { v[i] = ceiling_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { mat[index] = max(mat[index], floor_val); } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + reorder[i]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _add_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + index; Real val = src[src_index]; dst[dst_index] += val; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[j], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = reorder[j] * src_stride + i; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real * const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; const Real *pointer = src[j]; if (pointer != NULL) { dst[dst_index] = pointer[i]; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_to_rows(Real* const * dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { Real *pointer = dst[j]; if (pointer != NULL) { pointer[i] = src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (reorder[j] >= 0) { int src_index = reorder[j] * src_stride + i; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _mul_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (reorder[j] >= 0) { int src_index = reorder[j] * src_stride + i; dst[dst_index] *= src[src_index]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real * const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (src[j] != NULL) { dst[dst_index] += alpha * src[j][i]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim src_dim, int dst_stride) { int c = blockIdx.x * blockDim.x + threadIdx.x; // col index int r = blockIdx.y * blockDim.y + threadIdx.y; // row index if (c < src_dim.cols && r < src_dim.rows) { int src_index = r * src_dim.stride + c; if (reorder[r] >= 0) { int dst_index = reorder[r] * dst_stride + c; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* const * dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { if (dst[j] != NULL) { dst[j][i] += alpha * src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) { mat[index] = min(mat[index], ceiling_val); } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0 / data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; // Cast from void; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; // Cast from void; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; // Cast from void; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indexes) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col; int src_index_start = indexes[row].first, src_index_end = indexes[row].second; for (int row_index = src_index_start; row_index < src_index_end; row_index++) data[dst_index] += src_data[row_index * src_dim.stride + col]; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride, src_index = i + j * src_stride; // compute the function y[index] = log(1 + exp(x[index])) if (i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; // let max_value be the largest abs(value) Real max_abs_value = (max_value > -min_value ? max_value : -min_value); if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride, src_index = i + j * src_stride; if (i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride; int e_index = i + j * e_stride; int y_index = i + j * y_stride; if (i < d.cols && j < d.rows) eout[dst_index] = y[y_index] * (1.0 - y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride, src_index = i + j * src_stride; if (i < d.cols && j < d.rows) { Real exp_2x = exp(2.0 * x[src_index]); Real res; if (isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride; int e_index = i + j * e_stride; int y_index = i + j * y_stride; if (i < d.cols && j < d.rows) eout[dst_index] = (1.0 - y[y_index] * y[y_index]) * e[e_index]; } /* This function copies x to y while bounding the elements away from zero using the scalar function: y = x if x <= -epsilon or x >= +epsilon +epsilon if 0 <= x < epsilon -epsilon if -epsilon < x < 0. where: x is the source matrix, of dimension and stride given by d epsilon > 0 y is the destination matrix, with the num-rows and num-cols given by d, but stride given by y_stride. */ template<typename Real> __global__ static void _ensure_nonzero(const Real *x, MatrixDim d, Real epsilon, int y_stride, Real *y) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int x_index = i + j * d.stride, y_index = i + j * y_stride; if (i < d.cols && j < d.rows) { Real src = x[x_index], dst; if (src <= -epsilon || src >= epsilon) dst = src; else if (src >= 0) dst = epsilon; else dst = -epsilon; __syncthreads(); // This allows it to do consolidated write below, which // should improve speed. y[y_index] = dst; } } template<typename Real> __global__ static void _parametric_relu(Real* y, const Real* x, MatrixDim d, int src_stride, const Real* a, const Real* b) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride, src_index = i + j * src_stride; if (i < d.cols && j < d.rows) { Real res = (x[src_index] > 0.0) ? a[i] * x[src_index] : b[i] * x[src_index]; y[dst_index] = res; } } template<typename Real> __global__ static void _diff_parametric_relu(Real* eout, const Real* e, const Real* y, MatrixDim d, int e_stride, int y_stride, const Real* a, const Real* b) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride; int e_index = i + j * e_stride; int y_index = i + j * y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (y[y_index] > 0.0 ? a[i] * e[e_index] : b[i] * e[e_index]); } template<typename Real> __global__ static void _heaviside(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride, src_index = i + j * src_stride; if (i < d.cols && j < d.rows) { Real res = (x[src_index] > 0.0 ? 1.0 : 0.0); y[dst_index] = res; } } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { __shared__ Real smem[CU1DBLOCK]; const int i = blockIdx.x; const int x_start = i * src_stride; const int y_start = i * d.stride; const int tid = threadIdx.x; // find max element of the row // reduce to CU1DBLOCK elements per row. Real tmax = sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF; for (int j = tid; j < d.cols; j += CU1DBLOCK) { tmax = fmax(tmax, x[x_start + j]); } smem[tid] = tmax; __syncthreads(); // reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { smem[tid] = fmax(smem[tid], smem[tid + shift]); } __syncthreads(); } // reduce to 1 element per row if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { smem[tid] = fmax(smem[tid], smem[tid + shift]); } } // broadcast max to all threads __syncthreads(); Real max = smem[0]; // sum_j(exp(x(i,j)-max)) // reduce to CU1DBLOCK elements per row. Real tsum = Real(0); for (int j = tid; j < d.cols; j += CU1DBLOCK) { tsum += exp(x[x_start + j] - max); } smem[tid] = tsum; __syncthreads(); // reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { smem[tid] += smem[tid + shift]; } __syncthreads(); } // reduce to 1 element per row if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { smem[tid] += smem[tid + shift]; } } // broadcast sum to all threads __syncthreads(); Real inv_sum = Real(1) / smem[0]; // normalize the row for (int j = tid; j < d.cols; j += CU1DBLOCK) { y[y_start + j] = exp(x[x_start + j] - max) * inv_sum; } } // The output y_i = scale * x_i, // and we want to RMS value of the y_i to equal target_rms, // so y^t y = D * target_rms^2 (if y is one row of the input). // we need to have scale = 1.0 / sqrt(x^t x / (D * target_rms^2)). // there is also flooring involved, to avoid division-by-zero // problems. It's important for the backprop, that the floor's // square root is exactly representable as float. // If add_log_stddev is true, log(max(epsi, sqrt(x^t x / D))) // is an extra dimension of the output. // // 1D grid is used. Each 256-thread block works on 1 row of the data matrix. // The block is also of 1D. Strided memory access is used if the length of the // row is longer than 256. template<typename Real> __global__ static void _normalize_per_row(Real *y, int y_stride, const Real *x, MatrixDim x_d, Real target_rms, bool add_log_stddev) { const int i = blockIdx.x; const int tid = threadIdx.x; const Real* x_row = x + i * x_d.stride; __shared__ Real ssum[CU1DBLOCK]; // Reduce x_j^2 to CU1DBLOCK elements per row Real tsum = Real(0); for (int j = tid; j < x_d.cols; j += CU1DBLOCK) { tsum += x_row[j] * x_row[j]; } ssum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) ssum[tid] += ssum[tid + shift]; __syncthreads(); } // Reduce last warp to 1 element per row. // Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66 if (tid == 0) { ssum[0] = sqrt( fmax(ssum[0] / (target_rms * target_rms * x_d.cols), kSquaredNormFloor)); } // Broadcast floored stddev to all threads. __syncthreads(); const Real stddev_div_target_rms = ssum[0]; const Real scale = Real(1) / stddev_div_target_rms; // Store normalized input to output Real* y_row = y + i * y_stride; for (int j = tid; j < x_d.cols; j += CU1DBLOCK) { y_row[j] = x_row[j] * scale; } if (tid == 0 && add_log_stddev) { y_row[x_d.cols] = log(stddev_div_target_rms * target_rms); } } template<typename Real> __global__ static void _diff_normalize_per_row(Real *id, int id_stride, const Real *iv, MatrixDim iv_dim, const Real* od, int od_stride, Real target_rms, bool add_log_stddev) { const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66 const Real kInvNormFloor = 8589934592.0; const int tid = threadIdx.x; const int i = blockIdx.x; const Real* iv_row = iv + i * iv_dim.stride; const Real* od_row = od + i * od_stride; // reduce to CU1DBLOCK elements per row Real dot_products = Real(0); Real in_norm = Real(0); for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) { const Real iv_ij = iv_row[j]; dot_products += iv_ij * od_row[j]; in_norm += iv_ij * iv_ij; } __shared__ Real sprod[CU1DBLOCK]; __shared__ Real snorm[CU1DBLOCK]; sprod[tid] = dot_products; snorm[tid] = in_norm; __syncthreads(); // reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { sprod[tid] += sprod[tid + shift]; snorm[tid] += snorm[tid + shift]; } __syncthreads(); } // reduce to 1 element per row if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { sprod[tid] += sprod[tid + shift]; snorm[tid] += snorm[tid + shift]; } } // broadcast the sum results __syncthreads(); dot_products = sprod[0]; in_norm = snorm[0]; Real log_stddev_deriv; if (add_log_stddev) { log_stddev_deriv = Real(1) / max(in_norm, iv_dim.cols * kSquaredNormFloor) * od_row[iv_dim.cols]; } const Real inv_d_scaled = Real(1) / (iv_dim.cols * target_rms * target_rms); in_norm = Real(1) / sqrt(max(in_norm * inv_d_scaled, kSquaredNormFloor)); const Real f = in_norm == kInvNormFloor ? Real(0) : in_norm; dot_products *= f * f * f * inv_d_scaled; for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) { const Real iv_ij = iv_row[j]; Real id_ij = id[i * id_stride + j]; if (add_log_stddev) { id_ij += log_stddev_deriv * iv_ij; } if (id != od) { id_ij += in_norm * od_row[j]; } else { id_ij *= in_norm; } id_ij -= dot_products * iv_ij; id[i * id_stride + j] = id_ij; } } // Per-row log-softmax operation on 'x', with writing to 'y'. // note, x and y may point to the same memory. This is equivalent to setting // matrix y to matrix x and then, for each row of y, subtracting the offset that // will make exp(y.row[j]) sum to 1 for each row j. // // It expects to be called with CU1DBLOCK threads. // The number of blocks [i.e. the gridDim] equals to y_dim.rows, // so one block of threads processes each row. x and y are // expected to have the same dimension, but possibly different row strides. template<typename Real> __global__ static void _log_softmax_reduce(Real* y, const Real* x, MatrixDim y_dim, int x_stride) { __shared__ Real smem[CU1DBLOCK]; const int i = blockIdx.x; const int x_start = i * x_stride; const int y_start = i * y_dim.stride; const int tid = threadIdx.x; // find max element of the row // reduce to CU1DBLOCK elements per row. Real tmax = -1e20; for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) { tmax = fmax(tmax, x[x_start + j]); } smem[tid] = tmax; __syncthreads(); // reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { smem[tid] = fmax(smem[tid], smem[tid + shift]); } __syncthreads(); } // reduce to 1 element per row if (tid < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) { smem[tid] = fmax(smem[tid], smem[tid + shift]); } } // broadcast max to all threads __syncthreads(); Real max = smem[0]; // sum_j(exp(x(i,j)-max)) // reduce to CU1DBLOCK elements per row. Real tsum = Real(0); for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) { tsum += exp(x[x_start + j] - max); } smem[tid] = tsum; __syncthreads(); // reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { smem[tid] += smem[tid + shift]; } __syncthreads(); } // reduce to 1 element per row if (tid < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) { smem[tid] += smem[tid + shift]; } } // broadcast sum to all threads __syncthreads(); Real log_sum = log(smem[0]); // normalize the row for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) { y[y_start + j] = x[x_start + j] - max - log_sum; } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d_out.stride; if (i < d_out.cols && j < d_out.rows) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if (src_row < 0) src_row = 0; if (src_row >= d_in.rows) src_row = d_in.rows - 1; y[index] = x[src_col + src_row * d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j + 1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i + 1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j + 1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i + 1) * (i + 2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // if (i < dim.cols && j < dim.rows) { int dst_index = i + j * dim.stride, src_index; if (j <= i) { // no transpose src_index = (i * (i + 1) / 2) + j; } else { // transpose. src_index = (j * (j + 1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d_out.stride; if (i < d_out.cols && j < d_out.rows) { int32_cuda src_col = copy_from[i]; if (src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j * d_in.stride]; } else { y[index] = 1.0 / 0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d_out.stride; if (i < d_out.cols && j < d_out.rows) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row * d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride, grad_index = i + j * stride_grad; if (i < d.cols && j < d.rows) { if (wei[index] == 0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if (wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; //simulate update Real after = wei[index] - lr * grad[grad_index] - l1_signed; if ((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, MatrixDim d) { const int32_cuda i = blockIdx.x; const int32_cuda base = i * d.stride; const int32_cuda tid = threadIdx.x; __shared__ Real smax[CU1DBLOCK]; __shared__ int32_cuda sidx[CU1DBLOCK]; Real tmax = -1e20; int32_cuda tidx = -1; // Loop over blocks for coalesced memory access. for (int32_cuda j = tid; j < d.cols; j += CU1DBLOCK) { const Real val = mat[base + j]; if (val > tmax) { tmax = val; tidx = j; } } smax[tid] = tmax; sidx[tid] = tidx; // Parallel reduce #pragma unroll for (int32_cuda num_working_threads = CU1DBLOCK / 2; num_working_threads >= warpSize; num_working_threads >>= 1) { __syncthreads(); if (tid < num_working_threads) { if (smax[tid + num_working_threads] > smax[tid]) { smax[tid] = smax[tid + num_working_threads]; sidx[tid] = sidx[tid + num_working_threads]; } } } // Warp reduce without __syncthreads() // (note.: synchronizes implicitly within a warp at the multiprocessor) if (tid < warpSize / 2) { #pragma unroll for (int32_cuda num_working_threads = warpSize / 2; num_working_threads > 0; num_working_threads >>= 1) { if (smax[tid + num_working_threads] > smax[tid]) { smax[tid] = smax[tid + num_working_threads]; sidx[tid] = sidx[tid + num_working_threads]; } } } if (tid == 0) { if (vec_val) { vec_val[i] = smax[0]; } vec_id[i] = sidx[0]; } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if (i > 0) return; if (j < d.rows) { int32_cuda index = vec_tgt[j] + j * d.stride; Real value = mat_net_out[index]; if (value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } template<typename Real> __global__ static void _diff_softmax(Real* x, const MatrixDim dim, const Real* value, const int value_stride, const Real* diff, const int diff_stride) { __shared__ Real ssum[CU1DBLOCK]; const int tid = threadIdx.x; const int i = blockIdx.x; const int value_start = i * value_stride; const int diff_start = i * diff_stride; const int x_start = i * dim.stride; // Loop along the matrix row. Reduce to CU1DBLOCK elements per row. Real tsum = Real(0); for (int j = tid; j < dim.cols; j += CU1DBLOCK) { tsum += value[value_start + j] * diff[diff_start + j]; } ssum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize elements. # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { ssum[tid] += ssum[tid + shift]; } __syncthreads(); } // Warp reduce to 1 element. Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // Broadcast result to all threads __syncthreads(); const Real pe = ssum[0]; // Apply element-wise x = value * (diff - pe) for (int j = tid; j < dim.cols; j += CU1DBLOCK) { x[x_start + j] = value[value_start + j] * (diff[diff_start + j] - pe); } } // Differentiate backward through the log softmax function. // "out_value" is the log softmax output. Does, for each row i, // in_deriv(i) = out_deriv(i) - sum(out_deriv(i)) .* exp(out_value(i)) // ???(i) is row-vector. // CUDA thread layout: 1 thread block (CU1DBLOCK == 256 threads) per matrix-row. template<typename Real> __global__ static void _diff_log_softmax(const MatrixDim in_deriv_dim, const Real* out_value, const int out_value_stride, const Real* out_deriv, const int out_deriv_stride, Real* in_deriv) { __shared__ Real ssum[CU1DBLOCK]; const int tid = threadIdx.x; const int i = blockIdx.x; const int out_value_start = i * out_value_stride; const int out_deriv_start = i * out_deriv_stride; const int in_deriv_start = i * in_deriv_dim.stride; // Loop along the matrix row. Reduce to CU1DBLOCK elements per row. Real tsum = Real(0); for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) { tsum += out_deriv[out_deriv_start + j]; } ssum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize elements. # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { ssum[tid] += ssum[tid + shift]; } __syncthreads(); } // Warp reduce to 1 element. Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // Broadcast result to all threads __syncthreads(); const Real sum_e = ssum[0]; // Apply element-wise x = out_deriv - exp(value) * sum_e for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) { in_deriv[in_deriv_start + j] = out_deriv[out_deriv_start + j] - exp(out_value[out_value_start + j]) * sum_e; } } /** this function computes the core part of the LSTM nonlinearity. @param [in] in A matrix, of dimension num_rows by 5*cell_dim (i.e. its num-cols must be a multiple of 5). The column-space is interpreted as 5 consecutive blocks, each of dimension cell_dim, which we name: (i_part, f_part, c_part, o_part, c_{t-1}). If 'have_dropout_mask' is nonzero, each row of 'in' will have 3 extra elements, interpreted as dropout masks/scales for i_t, f_t and o_t. @param [in] params A matrix, of dimension 3 by cell_dim, with rows containing the 3 diagonal parameter matrices used in LSTMs, namely w_{ic}, w_{fc} and w_{oc}. @param [out] out A matrix, of dimension num_rows by 2*cell_dim. The quantities c_t and m_t respectively are put there (in two blocks of column-dimension cell_dim), according to the following equations: i_t = Sigmoid(i_part + w_{ic}*c_{t-1}) f_t = Sigmoid(f_part + w_{fc}*c_{t-1}) c_t = f_t*c_{t-1} + i_t * Tanh(c_part) o_t = Sigmoid(o_part + w_{oc}*c_t) m_t = o_t * Tanh(c_t) We use 1D thread block with CU1DBLOCK threads. It works best when cell_dim is a multiple of CU1DBLOCK. We use 1d Grid. Each block is working on one row of the in and out matrices. */ template<typename Real> __global__ static void _lstm_nonlinearity(const Real* in, const int in_stride, const Real* params, const int params_stride, const int out_stride, const int cell_dim, const int have_dropout_mask, const int num_rows, Real* out) { const int tid = threadIdx.x; const int i = blockIdx.x; const Real* i_part = in + i * in_stride; const Real* f_part = in + i * in_stride + cell_dim; const Real* c_part = in + i * in_stride + cell_dim * 2; const Real* o_part = in + i * in_stride + cell_dim * 3; const Real* c_tm1 = in + i * in_stride + cell_dim * 4; const Real* w_ic = params; const Real* w_fc = params + params_stride; const Real* w_oc = params + params_stride * 2; Real* c_t = out + i * out_stride; Real* m_t = out + i * out_stride + cell_dim; Real i_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5] : 1), f_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 1] : 1), o_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 2] : 1); for (int j = tid; j < cell_dim; j += CU1DBLOCK) { Real c_tm1_j = c_tm1[j]; Real i_t_j = Real(1) / (Real(1) + exp(-i_part[j] - w_ic[j] * c_tm1_j)); Real f_t_j = Real(1) / (Real(1) + exp(-f_part[j] - w_fc[j] * c_tm1_j)); Real c_t_j = f_t_j * f_scale * c_tm1_j + i_t_j * i_scale * tanh(c_part[j]); Real o_t_j = Real(1) / (Real(1) + exp(-o_part[j] - w_oc[j] * c_t_j)); c_t[j] = c_t_j; m_t[j] = o_t_j * o_scale * tanh(c_t_j); } } /** This function does the 'backward' pass corresponding to the function ComputeLstmNonlinearity. It's a little more complicated than you might expect because of the 'self-repair' mechanism that we use to prevent the sigmoid and tanh nonlinearities oversaturating, and because of the average-activation and average-derivative stats that we store for these nonlinearites (these stats are used both to control the self-repair mechanism, and for diagnostic purposes). Because the forward pass computes various intermediate values that are not output, this function actually has to do the same computations as the forward pass before it actually does the backprop. In the following description, `C` is for `cell_dim`, `N` is for `num_rows`. @param [in] input The same as in ComputeLstmNonlinearity(). A matrix, of dimension N by 5C (i.e. its num-cols must be a multiple of 5). The column-space is interpreted as 5 consecutive blocks, each of dimension C, which we name: (i_part, f_part, c_part, o_part, c_{t-1}). If 'have_dropout_mask' is nonzero, each row of 'in' will have 3 extra elements, interpreted as dropout masks/scales for i_t, f_t and o_t. @param [in] params The same as in ComputeLstmNonlinearity(). A matrix, of dimension 3 by C, with rows containing the three diagonal parameter matrices used in LSTMs, namely w_{ic}, w_{fc} and w_{oc}. @param [in] output_deriv A matrix, of dimension N by 2C, containing the derivative of the objective function we're backpropagating, w.r.t. the quantities c_t and m_t (in two blocks of column-dimension C). @param [in] deriv_sum_in This is used in the self-repair code to identify oversaturated nonlinearities. It is a matrix, of dimension 5 by C, corresponding to the totals of the derivatives of the 5 sigmoid and tanh nonlinearities, in they order they appear in the equations in the documentation of ComputeLstmNonlinearity() respectively, they appear in the equations for (i_t, f_t, c_t, o_t, m_t). This will be divided by 'count_in' to get the average derivative value so far, for each of the nonlinearities. @param [in] self_repair_config A vector of dimension 10, containing the configuration of the self-repair to be used for the 5 nonlinearities. The first 5 elements are the self_repair_lower_threshold values (typically 0.05 for sigmoid and 0.2 for tanh), and the next 5 elements are the corresponding self-repair-scales (typically 10^-5). @param [in] count_in The data-count that corresponds to the stats in 'deriv_sum_in' at entry to the function. This function should tolerate the count being zero (in that case, it is free to do the self-repair or not, as this should only happen on the 1st minibatch of each training job). @param [out] input_deriv May be NULL; if not, this function writes, to this location, the backpropagated derivative of the objective function w.r.t. the 'input' matrix. This matrix should have the same dimension as 'input' i.e. N by 5C. In addition to the regular backpropagated derivative, the output will include small values relating to 'self-repair'. @param [out] params_deriv May be NULL; if not, this is where this function *writes* [not adds] the backpropagated derivative of the objective function w.r.t. 'params'; it should have the same dimension as 'params' (3 by C). (This matrix will then be processed by the natural gradient code and added to the appropriate copy of the parameter matrix, outside this function). @param [out] value_sum_out Must be NULL if params_deriv is NULL; if not, a matrix of dimension 5 by C. This function *adds* to this location the total value of each of the sigmoid/tanh nonlinearities that it computes (this is for diagnostic purposes). @param [out] deriv_sum_out Must be NULL if params_deriv is NULL; if not, a matrix of dimension 5 by C; this function *adds* to this location the total of the derivative of each of the sigmoid/tanh nonlinearities that it computes (this is for diagnostic purposes and to control the self-repair). This function should tolerate the case when 'deriv_sum_out' points to the same data as 'deriv_sum_in'. @param [out] self_repair_sum_out Must be NULL if params_deriv is NULL; if not, a matrix of dimension 5 by C; this function *writes* to this location the sum of the number of times the self-repair code was activated (integer values 0 <= k <= N). This will be processed outside this function into self-repair stats for diagnostics. // Use 2D block (8x32 threads) as we need to compute column sum. // Use 1D grid to cover the data matrix `cell_dim`. */ template<typename Real> __global__ static void _diff_lstm_nonlinearity(const int cell_dim, const int have_dropout_mask, const int num_rows, const Real* input, const int input_stride, const Real* params, const int params_stride, const Real* output_deriv, const int output_deriv_stride, const double* deriv_sum_in, const int deriv_sum_in_stride, const Real* self_repair_config, double count, Real* input_deriv, const int input_deriv_stride, Real* params_deriv, const int params_deriv_stride, double* value_sum_out, const int value_sum_out_stride, double* deriv_sum_out, const int deriv_sum_out_stride, Real* self_repair_sum_out, const int self_repair_sum_out_stride) { __shared__ Real smem[CU1DBLOCK]; const int j = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int grid_stride = gridDim.y * blockDim.y; const int i0 = blockIdx.y * blockDim.y + threadIdx.y; Real w_ic_deriv_sum = 0; Real w_fc_deriv_sum = 0; Real w_oc_deriv_sum = 0; Real i_t_value_sum = 0, i_t_deriv_sum = 0; Real f_t_value_sum = 0, f_t_deriv_sum = 0; Real c_part_value_sum = 0, c_part_deriv_sum = 0; Real o_t_value_sum = 0, o_t_deriv_sum = 0; Real c_t_value_sum = 0, c_t_deriv_sum = 0; bool update_sr[5]; if (j < cell_dim) { const Real w_ic = params[j]; const Real w_fc = params[params_stride + j]; const Real w_oc = params[2 * params_stride + j]; const Real* sr_config = self_repair_config; # pragma unroll for (int i = 0; i < 5; i++) { update_sr[i] = deriv_sum_in[i * deriv_sum_in_stride + j] < sr_config[i] * count; } const Real i_t_self_repair = (update_sr[0] ? sr_config[5] : 0); const Real f_t_self_repair = (update_sr[1] ? sr_config[6] : 0); const Real c_part_self_repair = (update_sr[2] ? sr_config[7] : 0); const Real o_t_self_repair = (update_sr[3] ? sr_config[8] : 0); const Real c_t_self_repair = (update_sr[4] ? sr_config[9] : 0); for (int i = i0; i < num_rows; i += grid_stride) { const Real i_part = input[i * input_stride + j]; const Real f_part = input[i * input_stride + j + cell_dim]; const Real c_part = input[i * input_stride + j + 2 * cell_dim]; const Real o_part = input[i * input_stride + j + 3 * cell_dim]; const Real c_prev = input[i * input_stride + j + 4 * cell_dim]; const Real i_scale = (have_dropout_mask ? input[i * input_stride + cell_dim * 5] : 1), f_scale = (have_dropout_mask ? input[i * input_stride + cell_dim * 5 + 1] :1), o_scale = (have_dropout_mask ? input[i * input_stride + cell_dim * 5 + 2] :1); const Real i_t = Real(1) / (1 + exp(-i_part - w_ic * c_prev)); const Real f_t = Real(1) / (1 + exp(-f_part - w_fc * c_prev)); const Real tanh_c_part = tanh(c_part); const Real c_t = f_t * f_scale * c_prev + i_t * i_scale * tanh_c_part; const Real o_t = 1 / (1 + exp(-o_part - w_oc * c_t)); const Real tanh_c_t = tanh(c_t); const Real i_t_deriv = i_t * (1 - i_t); const Real f_t_deriv = f_t * (1 - f_t); const Real c_part_deriv = 1 - tanh_c_part * tanh_c_part; const Real o_t_deriv = o_t * (1 - o_t); const Real c_t_deriv = 1 - tanh_c_t * tanh_c_t; if (params_deriv) { i_t_value_sum += i_t; f_t_value_sum += f_t; c_part_value_sum += tanh_c_part; o_t_value_sum += o_t; c_t_value_sum += tanh_c_t; i_t_deriv_sum += i_t_deriv; f_t_deriv_sum += f_t_deriv; c_part_deriv_sum += c_part_deriv; o_t_deriv_sum += o_t_deriv; c_t_deriv_sum += c_t_deriv; } const Real dc_t_out = output_deriv[i * output_deriv_stride + j]; const Real dm_t = output_deriv[i * output_deriv_stride + j + cell_dim]; const Real dtanh_c_t = o_t * o_scale * dm_t; const Real do_t = o_scale * tanh_c_t * dm_t; const Real do_t_input = (o_t_deriv * do_t - (2 * o_t - 1) * o_t_self_repair); const Real dc_t = (c_t_deriv * dtanh_c_t + dc_t_out + do_t_input * w_oc) - tanh_c_t * c_t_self_repair; const Real dtanh_c_part = i_t * i_scale * dc_t; const Real df_t = dc_t * f_scale * c_prev; const Real df_t_input = (df_t * f_t_deriv - (2 * f_t - 1) * f_t_self_repair); const Real di_t = dc_t * i_scale * tanh_c_part; const Real di_t_input = (di_t * i_t_deriv - (2 * i_t - 1) * i_t_self_repair); if (params_deriv) { w_ic_deriv_sum += c_prev * di_t_input; w_fc_deriv_sum += c_prev * df_t_input; w_oc_deriv_sum += c_t * do_t_input; } const Real dc_prev = w_ic * di_t_input + w_fc * df_t_input + f_t * f_scale * dc_t; const Real do_part = do_t_input; const Real dc_part = (c_part_deriv * dtanh_c_part - tanh_c_part * c_part_self_repair); const Real df_part = df_t_input; const Real di_part = di_t_input; if (input_deriv) { input_deriv[i * input_deriv_stride + j] = di_part; input_deriv[i * input_deriv_stride + j + cell_dim] = df_part; input_deriv[i * input_deriv_stride + j + cell_dim * 2] = dc_part; input_deriv[i * input_deriv_stride + j + cell_dim * 3] = do_part; input_deriv[i * input_deriv_stride + j + cell_dim * 4] = dc_prev; } } } if (params_deriv) { // compute params_deriv smem[tid] = w_ic_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { params_deriv[j] = smem[tid]; } __syncthreads(); smem[tid] = w_fc_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { params_deriv[params_deriv_stride + j] = smem[tid]; } __syncthreads(); smem[tid] = w_oc_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { params_deriv[2 * params_deriv_stride + j] = smem[tid]; } // compute value_sum_out __syncthreads(); smem[tid] = i_t_value_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { value_sum_out[j] += smem[tid]; } __syncthreads(); smem[tid] = f_t_value_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { value_sum_out[value_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = c_part_value_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { value_sum_out[2 * value_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = o_t_value_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { value_sum_out[3 * value_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = c_t_value_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { value_sum_out[4 * value_sum_out_stride + j] += smem[tid]; } // need to update self_repair_sum_out before deriv_sum_out, because // deriv_sum_out and deriv_sum_in might point to the same memory. if (i0 < 5 && j < cell_dim) { self_repair_sum_out[i0 * self_repair_sum_out_stride + j] = update_sr[i0] ? num_rows : 0; } // compute derive_sum_out __syncthreads(); smem[tid] = i_t_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { deriv_sum_out[j] += smem[tid]; } __syncthreads(); smem[tid] = f_t_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { deriv_sum_out[deriv_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = c_part_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { deriv_sum_out[2 * deriv_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = o_t_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { deriv_sum_out[3 * deriv_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = c_t_deriv_sum; __syncthreads(); # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { deriv_sum_out[4 * deriv_sum_out_stride + j] += smem[tid]; } } } __global__ static void _cuda_compress_uint8_sign(const BaseFloat *src, MatrixDim dim, unsigned char *dest, int dest_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dest_index = i + j * dest_stride, src_index = i + j * dim.stride; if (i < dim.cols && j < dim.rows) { BaseFloat f = src[src_index]; dest[dest_index] = (f > 0.0 ? (unsigned char)1 : (unsigned char)0); } } // The following inline templated functions are a workaround for the // fact that (I believe) std::numeric_limits is not available in CUDA; // they allow us to access the minimum and maximum elements of certain // types from templated code. template <typename I> __device__ static inline int minimum_integer_value(); template <typename I> __device__ static inline int maximum_integer_value(); template<> __device__ int maximum_integer_value<int8_t>() { return 127; } template<> __device__ int minimum_integer_value<int8_t>() { return -128; } template<> __device__ int maximum_integer_value<uint8_t>() { return 255; } template<> __device__ int minimum_integer_value<uint8_t>() { return 0; } template<> __device__ int maximum_integer_value<int16_t>() { return 32767; } template<> __device__ int minimum_integer_value<int16_t>() { return -32768; } template<> __device__ int maximum_integer_value<uint16_t>() { return 65535; } template<> __device__ int minimum_integer_value<uint16_t>() { return 0; } template <typename I> __global__ static void _cuda_compress_bounds_check(const BaseFloat *src, MatrixDim dim, I *dest, int dest_stride, float inv_scale) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dest_index = i + j * dest_stride, src_index = i + j * dim.stride; const int min_value = minimum_integer_value<I>(), max_value = maximum_integer_value<I>(); int compressed_value; int ok = (i < dim.cols && j < dim.rows); if (ok) { float f = src[src_index]; // note: I'm not sure what __float2int_rn does if input is outside of // integer range, but it doesn't matter much as in the situations where this // type of compression would make sense, the input should be well inside the // range of 'int', and if it fails, we've probably already catastrophically // diverged. int i = __float2int_rn(f * inv_scale); if (i < min_value) compressed_value = min_value; else if (i > max_value) compressed_value = max_value; else compressed_value = i; } __syncthreads(); if (ok) { dest[dest_index] = compressed_value; } } template <typename I> __global__ static void _cuda_compress_no_bounds_check(const BaseFloat *src, MatrixDim dim, I *dest, int dest_stride, float inv_scale) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dest_index = i + j * dest_stride, src_index = i + j * dim.stride; if (i < dim.cols && j < dim.rows) { float f = src[src_index]; int i = __float2int_rn(f * inv_scale); I s = i; dest[dest_index] = s; } } template <typename I> __global__ static void _cuda_uncompress(BaseFloat *dest, MatrixDim dim, const I *src, int src_stride, float scale) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int src_index = i + j * src_stride, dest_index = i + j * dim.stride; if (i < dim.cols && j < dim.rows) { I s = src[src_index]; dest[dest_index] = float(s * scale); } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cuda_int32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cuda_int32_add(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cuda_int32_sequence(dim3 Gr, dim3 Bl, int32_cuda* data, int length, int32_cuda base) { hipLaunchKernelGGL(( _sequence), dim3(Gr), dim3(Bl), 0, 0, data, length, base); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA);} void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA);} void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst, const float* const * src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim); } void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const * dst, const float* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim); } void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride); } void cudaF_mul_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _mul_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* const * src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim); } void cudaF_add_to_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim src_dim, int dst_stride) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, src_dim, dst_stride); } void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const * dst, const float* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaF_min(dim3 Gr, dim3 Bl, float* mat, const float* other, MatrixDim mat_d, int other_stride) { hipLaunchKernelGGL(( _min), dim3(Gr),dim3(Bl), 0, 0, mat,other,mat_d,other_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaF_diff_group_pnorm(dim3 Gr, dim3 Bl, float *id, const float *iv, const float *ov, const float* od, MatrixDim id_dim, int iv_stride, int ov_stride, int od_stride, int group_size, float power) { hipLaunchKernelGGL(( _diff_group_pnorm), dim3(Gr), dim3(Bl), 0, 0, id, iv, ov, od, id_dim, iv_stride, ov_stride, od_stride, group_size, power); } void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim y_dim, int x1_stride, int x2_stride, int group_size) { hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, y_dim, x1_stride, x2_stride, group_size); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaF_add_mat_repeated(dim3 Gr, dim3 Bl, float alpha, const float* src, MatrixDim src_dim, float *dst, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_mat_repeated), dim3(Gr),dim3(Bl), 0, 0, alpha, src, src_dim, dst, dst_dim); } void cudaF_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _set_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_max_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d, TransReduceOp<MAX,float>()); } void cudaF_min_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d, TransReduceOp<MIN,float>()); } void cudaF_sum_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d, TransReduceOp<SUM,float>()); } void cudaF_add_col_sum_mat(int Gr, int Bl, float* result, const float* mat, const MatrixDim d, const float alpha, const float beta) { hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr), dim3(Bl), 0, 0, result, mat, d, TransReduceOp<SUMAB, float>(alpha, beta)); } void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cublas_copy_kaldi_fd(int Gr, int Bl, int n, const float* x, int incx, double* y, int incy) { hipLaunchKernelGGL(( _cublas_copy_kaldi), dim3(Gr),dim3(Bl), 0, 0, n, x, incx, y, incy); } void cublas_copy_kaldi_df(int Gr, int Bl, int n, const double* x, int incx, float* y, int incy) { hipLaunchKernelGGL(( _cublas_copy_kaldi), dim3(Gr),dim3(Bl), 0, 0, n, x, incx, y, incy); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaF_vec_min(int Gr, int Bl, const float* v, float* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc, TransReduceOp<MIN, float>()); } void cudaF_vec_max(int Gr, int Bl, const float* v, float* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc, TransReduceOp<MAX, float>()); } void cudaF_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(dim3 Gr, dim3 Bl, const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { hipLaunchKernelGGL(( _trace_mat_mat<32>) , dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat_MNT(int Gr, int Bl, const float alpha, const float* M, const MatrixDim dim_M, const float* N, const int stride_N, const float beta, float* v) { hipLaunchKernelGGL(( _add_diag_mat_mat_MNT), dim3(Gr),dim3(Bl), 0, 0, alpha,M,dim_M,N,stride_N,beta,v); } void cudaF_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const float alpha, const float* M, const int stride_M, const float* N, const MatrixDim dim_N, const float beta, float* v) { if (Bl.x == 16) { hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<16>) , dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v); } else if (Bl.x==32) { hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<32>), dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v); } } void cudaF_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const float alpha, const float* M, const int stride_M, const float* N, const MatrixDim dim_N, const float beta, float* v) { if (Bl.x == 16) { hipLaunchKernelGGL(( _add_diag_mat_mat_MN<16>) , dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v); } else if (Bl.x==32) { hipLaunchKernelGGL(( _add_diag_mat_mat_MN<32>), dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v); } } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc, TransReduceOp<SUM, float>()); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int num_elements) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements); } void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, float alpha, const Int32Pair* indices, const float* x, int s, float* data) { hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data); } void cudaF_matrix_add_to_elements(dim3 Gr, dim3 Bl, float alpha, float* mat, MatrixDim dim, const MatrixIndexT_cuda* elements) { hipLaunchKernelGGL(( _cuda_matrix_add_to_elements), dim3(Gr), dim3(Bl), 0, 0, alpha, mat, dim, elements); } void cudaF_vector_copy_elements(dim3 Gr, dim3 Bl, float *data, int dim, const float *src_mat, int mat_stride, bool transpose, const MatrixIndexT_cuda* elements) { hipLaunchKernelGGL(( _cuda_vector_copy_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, src_mat, mat_stride, transpose, elements); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v, ceiling_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaF_group_spec_pnorm(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride, int group_size, float power) { if (power == float(0)) { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<L0NORM, float>()); } else if (power == float(1)) { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<L1NORM, float>()); } else if (power == float(2)) { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<L2NORM, float>()); } else if (power == std::numeric_limits<float>::infinity()) { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<LINFNORM, float>()); } else { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<LPNORM, float>(power)); } } void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<MAX, float>()); } void cudaF_sigmoid(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_sigmoid(dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_tanh(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_diff_tanh(dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaF_ensure_nonzero(dim3 Gr, dim3 Bl, const float *x, MatrixDim d, float epsilon, int y_stride, float *y) { hipLaunchKernelGGL(( _ensure_nonzero), dim3(Gr),dim3(Bl), 0, 0, x, d, epsilon, y_stride, y); } void cudaF_parametric_relu(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride, const float* a, const float* b) { hipLaunchKernelGGL(( _parametric_relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, a, b); } void cudaF_diff_parametric_relu(dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride, const float* a, const float* b) { hipLaunchKernelGGL(( _diff_parametric_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride, a, b); } void cudaF_heaviside(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _heaviside), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaF_log_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x, MatrixDim y_dim, int x_stride) { hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, y_dim, x_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaF_normalize_per_row(size_t Gr, size_t Bl, float *y, int y_stride, const float *x, MatrixDim x_d, float target_rms, bool add_log_stddev) { hipLaunchKernelGGL(( _normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, y, y_stride, x, x_d, target_rms, add_log_stddev); } void cudaF_one(int Gr, int Bl, float* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_diff_softmax(dim3 Gr, dim3 Bl, float* x, const MatrixDim dim, const float* value, const int value_stride, const float* diff, const int diff_stride) { hipLaunchKernelGGL(( _diff_softmax), dim3(Gr), dim3(Bl), 0, 0, x, dim, value, value_stride, diff, diff_stride); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaF_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim, const float* out_value, const int out_value_stride, const float* out_deriv, const int out_deriv_stride, float* in_deriv) { hipLaunchKernelGGL(( _diff_log_softmax), dim3(Gr), dim3(Bl), 0, 0, in_deriv_dim, out_value, out_value_stride, out_deriv, out_deriv_stride, in_deriv); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indexes) { hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA);} void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA);} void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst, const double* const * src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim); } void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const * dst, const double* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim); } void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride); } void cudaD_mul_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { hipLaunchKernelGGL(( _mul_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride); } void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* const * src, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim); } void cudaD_add_to_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim src_dim, int dst_stride) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, src_dim, dst_stride); } void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* const * dst, const double* src, MatrixDim src_dim) { hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride); } void cudaD_min(dim3 Gr, dim3 Bl, double* mat, const double* other, MatrixDim mat_d, int other_stride) { hipLaunchKernelGGL(( _min), dim3(Gr),dim3(Bl), 0, 0, mat,other,mat_d,other_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size); } void cudaD_diff_group_pnorm(dim3 Gr, dim3 Bl, double *id, const double *iv, const double *ov, const double* od, MatrixDim id_dim, int iv_stride, int ov_stride, int od_stride, int group_size, double power) { hipLaunchKernelGGL(( _diff_group_pnorm), dim3(Gr), dim3(Bl), 0, 0, id, iv, ov, od, id_dim, iv_stride, ov_stride, od_stride, group_size, power); } void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim y_dim, int x1_stride, int x2_stride, int group_size) { hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, y_dim, x1_stride, x2_stride, group_size); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } else { hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride); } } void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaD_add_mat_repeated(dim3 Gr, dim3 Bl, double alpha, const double* src, MatrixDim src_dim, double *dst, MatrixDim dst_dim) { hipLaunchKernelGGL(( _add_mat_repeated), dim3(Gr),dim3(Bl), 0, 0, alpha, src, src_dim, dst, dst_dim); } void cudaD_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { hipLaunchKernelGGL(( _set_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_max_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d, TransReduceOp<MAX,double>()); } void cudaD_min_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d, TransReduceOp<MIN,double>()); } void cudaD_sum_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d, TransReduceOp<SUM,double>()); } void cudaD_add_col_sum_mat(int Gr, int Bl, double* result, const double* mat, const MatrixDim d, const double alpha, const double beta) { hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr), dim3(Bl), 0, 0, result, mat, d, TransReduceOp<SUMAB, double>(alpha, beta)); } void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim); } void cudaD_vec_min(int Gr, int Bl, const double* v, double* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc, TransReduceOp<MIN, double>()); } void cudaD_vec_max(int Gr, int Bl, const double* v, double* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc, TransReduceOp<MAX, double>()); } void cudaD_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(dim3 Gr, dim3 Bl, const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { hipLaunchKernelGGL(( _trace_mat_mat<32>) , dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat_MNT(int Gr, int Bl, const double alpha, const double* M, const MatrixDim dim_M, const double* N, const int stride_N, const double beta, double* v) { hipLaunchKernelGGL(( _add_diag_mat_mat_MNT), dim3(Gr),dim3(Bl), 0, 0, alpha,M,dim_M,N,stride_N,beta,v); } void cudaD_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const double alpha, const double* M, const int stride_M, const double* N, const MatrixDim dim_N, const double beta, double* v) { if (Bl.x == 16) { hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<16>) , dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v); } else if (Bl.x==32) { hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<32>), dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v); } } void cudaD_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const double alpha, const double* M, const int stride_M, const double* N, const MatrixDim dim_N, const double beta, double* v) { if (Bl.x == 16) { hipLaunchKernelGGL(( _add_diag_mat_mat_MN<16>) , dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v); } else if (Bl.x==32) { hipLaunchKernelGGL(( _add_diag_mat_mat_MN<32>), dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v); } } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v,value,dim,inc, TransReduceOp<SUM, double>()); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int num_elements) { hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements); } void cudaD_vector_copy_elements(dim3 Gr, dim3 Bl, double *data, int dim, const double *src_mat, int mat_stride, bool transpose, const MatrixIndexT_cuda* elements) { hipLaunchKernelGGL(( _cuda_vector_copy_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, src_mat, mat_stride, transpose, elements); } void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, double alpha, const Int32Pair* indices, const double* x, int s, double* data) { hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data); } void cudaD_matrix_add_to_elements(dim3 Gr, dim3 Bl, double alpha, double* mat, MatrixDim dim, const MatrixIndexT_cuda* elements) { hipLaunchKernelGGL(( _cuda_matrix_add_to_elements), dim3(Gr), dim3(Bl), 0, 0, alpha, mat, dim, elements); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim); } void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val, float *count, int dim) { hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v,ceiling_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power); } void cudaD_group_spec_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { if (power == double(0)) { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<L0NORM, double>()); } else if (power == double(1)) { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<L1NORM, double>()); } else if (power == double(2)) { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<L2NORM, double>()); } else if (power == std::numeric_limits<double>::infinity()) { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<LINFNORM, double>()); } else { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<LPNORM, double>(power)); } } void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, TransReduceOp<MAX, double>()); } void cudaD_sigmoid(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_sigmoid(dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_tanh(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_diff_tanh(dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride); } void cudaD_ensure_nonzero(dim3 Gr, dim3 Bl, const double *x, MatrixDim d, double epsilon, int y_stride, double *y) { hipLaunchKernelGGL(( _ensure_nonzero), dim3(Gr),dim3(Bl), 0, 0, x, d, epsilon, y_stride, y); } void cudaD_parametric_relu(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, const double* a, const double* b) { hipLaunchKernelGGL(( _parametric_relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, a, b); } void cudaD_diff_parametric_relu(dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride, const double* a, const double* b) { hipLaunchKernelGGL(( _diff_parametric_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride, a, b); } void cudaD_heaviside(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _heaviside), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride); } void cudaD_log_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x, MatrixDim y_dim, int x_stride) { hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, y_dim, x_stride); } void cudaD_normalize_per_row(size_t Gr, size_t Bl, double *y, int y_stride, const double *x, MatrixDim x_d, double target_rms, bool add_log_stddev) { hipLaunchKernelGGL(( _normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, y, y_stride, x, x_d, target_rms, add_log_stddev); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d, int stride_grad) { hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, MatrixDim d) { hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_diff_softmax(dim3 Gr, dim3 Bl, double* x, const MatrixDim dim, const double* value, const int value_stride, const double* diff, const int diff_stride) { hipLaunchKernelGGL(( _diff_softmax), dim3(Gr), dim3(Bl), 0, 0, x, dim, value, value_stride, diff, diff_stride); } void cudaD_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim, const double* out_value, const int out_value_stride, const double* out_deriv, const int out_deriv_stride, double* in_deriv) { hipLaunchKernelGGL(( _diff_log_softmax), dim3(Gr), dim3(Bl), 0, 0, in_deriv_dim, out_value, out_value_stride, out_deriv, out_deriv_stride, in_deriv); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices); } void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indexes) { hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } // Some conversion kernels for which it's more convenient // to not name them F or D. void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in); } void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val, float* trace_vec) { hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val, trace_vec); } void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val, float* trace_vec) { hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val, trace_vec); } void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val, double* trace_vec) { hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val, trace_vec); } void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val, double* trace_vec) { hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val, trace_vec); } void cudaD_lstm_nonlinearity(dim3 Gr, dim3 Bl, const double* in, const int in_stride, const double* params, const int params_stride, const int out_stride, const int cell_dim, const int have_dropout_mask, const int num_rows, double* out) { hipLaunchKernelGGL(( _lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0, in, in_stride, params, params_stride, out_stride, cell_dim, have_dropout_mask, num_rows, out); } void cudaF_lstm_nonlinearity(dim3 Gr, dim3 Bl, const float* in, const int in_stride, const float* params, const int params_stride, const int out_stride, const int cell_dim, const int have_dropout_mask, const int num_rows, float* out) { hipLaunchKernelGGL(( _lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0, in, in_stride, params, params_stride, out_stride, cell_dim, have_dropout_mask, num_rows, out); } void cudaD_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim, const int have_dropout_mask, const int num_rows, const double* input, const int input_stride, const double* params, const int params_stride, const double* output_deriv, const int output_deriv_stride, const double* deriv_sum_in, const int deriv_sum_in_stride, const double* self_repair_config, double count, double* input_deriv, const int input_deriv_stride, double* params_deriv, const int params_deriv_stride, double* value_sum_out, const int value_sum_out_stride, double* deriv_sum_out, const int deriv_sum_out_stride, double* self_repair_sum_out, const int self_repair_sum_out_stride) { hipLaunchKernelGGL(( _diff_lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0, cell_dim, have_dropout_mask, num_rows, input, input_stride, params, params_stride, output_deriv, output_deriv_stride, deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv, input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out, value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride, self_repair_sum_out, self_repair_sum_out_stride); } void cudaF_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim, const int have_dropout_mask, const int num_rows, const float* input, const int input_stride, const float* params, const int params_stride, const float* output_deriv, const int output_deriv_stride, const double* deriv_sum_in, const int deriv_sum_in_stride, const float* self_repair_config, double count, float* input_deriv, const int input_deriv_stride, float* params_deriv, const int params_deriv_stride, double* value_sum_out, const int value_sum_out_stride, double* deriv_sum_out, const int deriv_sum_out_stride, float* self_repair_sum_out, const int self_repair_sum_out_stride) { hipLaunchKernelGGL(( _diff_lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0, cell_dim, have_dropout_mask, num_rows, input, input_stride, params, params_stride, output_deriv, output_deriv_stride, deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv, input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out, value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride, self_repair_sum_out, self_repair_sum_out_stride); } void cudaD_copy_cols_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { hipLaunchKernelGGL(( _copy_cols_from_vec), dim3(Gr), dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaF_copy_cols_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { hipLaunchKernelGGL(( _copy_cols_from_vec), dim3(Gr), dim3(Bl), 0, 0, mat_out, d_out, v_in); } void cudaF_diff_normalize_per_row(size_t Gr, size_t Bl, float *id, int id_stride, const float *iv, MatrixDim iv_dim, const float* od, int od_stride, float target_rms, bool add_log_stddev) { hipLaunchKernelGGL(( _diff_normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, id, id_stride, iv, iv_dim, od, od_stride, target_rms, add_log_stddev); } void cudaD_diff_normalize_per_row(size_t Gr, size_t Bl, double *id, int id_stride, const double *iv, MatrixDim iv_dim, const double* od, int od_stride, double target_rms, bool add_log_stddev) { hipLaunchKernelGGL(( _diff_normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, id, id_stride, iv, iv_dim, od, od_stride, target_rms, add_log_stddev); } void cudaD_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr, int* out_col_idx, double* out_val, const int* row_indexes, const int num_selected_rows, const int* in_row_ptr, const int* in_col_idx, const double* in_val) { hipLaunchKernelGGL(( _select_rows), dim3(Gr), dim3(Bl), 0, 0, out_row_ptr, out_col_idx, out_val, row_indexes, num_selected_rows, in_row_ptr, in_col_idx, in_val); } void cudaF_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr, int* out_col_idx, float* out_val, const int* row_indexes, const int num_selected_rows, const int* in_row_ptr, const int* in_col_idx, const float* in_val) { hipLaunchKernelGGL(( _select_rows), dim3(Gr), dim3(Bl), 0, 0, out_row_ptr, out_col_idx, out_val, row_indexes, num_selected_rows, in_row_ptr, in_col_idx, in_val); } void cudaD_add_smat(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, double alpha, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { hipLaunchKernelGGL(( _add_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx, smat_val); } void cudaF_add_smat(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, float alpha, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { hipLaunchKernelGGL(( _add_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx, smat_val); } void cudaD_add_smat_trans(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, double alpha, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { hipLaunchKernelGGL(( _add_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx, smat_val); } void cudaF_add_smat_trans(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, float alpha, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { hipLaunchKernelGGL(( _add_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx, smat_val); } void cudaD_apply_exp_special(dim3 Gr, dim3 Bl, double* out, MatrixDim out_dim, const double* in, int in_stride) { hipLaunchKernelGGL(( _apply_exp_special), dim3(Gr), dim3(Bl), 0, 0, out, out_dim, in, in_stride); } void cudaF_apply_exp_special(dim3 Gr, dim3 Bl, float* out, MatrixDim out_dim, const float* in, int in_stride) { hipLaunchKernelGGL(( _apply_exp_special), dim3(Gr), dim3(Bl), 0, 0, out, out_dim, in, in_stride); } void cuda_compress_uint8_sign(dim3 Gr, dim3 Bl, const BaseFloat *src, MatrixDim dim, unsigned char *dest, int dest_stride) { hipLaunchKernelGGL(( _cuda_compress_uint8_sign), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride); } void cuda_compress_int16(dim3 Gr, dim3 Bl, const BaseFloat *src, MatrixDim dim, int16_t *dest, int dest_stride, float inv_scale, bool bounds_check) { if (bounds_check) { hipLaunchKernelGGL(( _cuda_compress_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale); } else { hipLaunchKernelGGL(( _cuda_compress_no_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale); } } void cuda_compress_uint16(dim3 Gr, dim3 Bl, const BaseFloat *src, MatrixDim dim, uint16_t *dest, int dest_stride, float inv_scale, bool bounds_check) { if (bounds_check) { hipLaunchKernelGGL(( _cuda_compress_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale); } else { hipLaunchKernelGGL(( _cuda_compress_no_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale); } } void cuda_compress_int8(dim3 Gr, dim3 Bl, const BaseFloat *src, MatrixDim dim, int8_t *dest, int dest_stride, float inv_scale, bool bounds_check) { if (bounds_check) { hipLaunchKernelGGL(( _cuda_compress_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale); } else { hipLaunchKernelGGL(( _cuda_compress_no_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale); } } void cuda_compress_uint8(dim3 Gr, dim3 Bl, const BaseFloat *src, MatrixDim dim, uint8_t *dest, int dest_stride, float inv_scale, bool bounds_check) { if (bounds_check) { hipLaunchKernelGGL(( _cuda_compress_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale); } else { hipLaunchKernelGGL(( _cuda_compress_no_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale); } } void cuda_uncompress_uint8(dim3 Gr, dim3 Bl, BaseFloat *dest, MatrixDim dim, const uint8_t *src, int src_stride, float scale) { hipLaunchKernelGGL(( _cuda_uncompress), dim3(Gr), dim3(Bl), 0, 0, dest, dim, src, src_stride, scale); } void cuda_uncompress_int8(dim3 Gr, dim3 Bl, BaseFloat *dest, MatrixDim dim, const int8_t *src, int src_stride, float scale) { hipLaunchKernelGGL(( _cuda_uncompress), dim3(Gr), dim3(Bl), 0, 0, dest, dim, src, src_stride, scale); } void cuda_uncompress_uint16(dim3 Gr, dim3 Bl, BaseFloat *dest, MatrixDim dim, const uint16_t *src, int src_stride, float scale) { hipLaunchKernelGGL(( _cuda_uncompress), dim3(Gr), dim3(Bl), 0, 0, dest, dim, src, src_stride, scale); } void cuda_uncompress_int16(dim3 Gr, dim3 Bl, BaseFloat *dest, MatrixDim dim, const int16_t *src, int src_stride, float scale) { hipLaunchKernelGGL(( _cuda_uncompress), dim3(Gr), dim3(Bl), 0, 0, dest, dim, src, src_stride, scale); }
ead0023d625cb2dca44f491778e46f59f95243cd.cu
// cudamatrix/cu-kernels.cu // Copyright 2009-2012 Karel Vesely // 2013 Ehsan Variani // 2013 Johns Hopkins University (author: Daniel Povey) // 2013 Hainan Xu // 2013 Xiaohui Zhang // 2013-2015 Guoguo Chen // 2016-2017 Shiyin Kang // 2017 Hossein Hadian, Daniel Galvez // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED // WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE, // MERCHANTABLITY OR NON-INFRINGEMENT. // See the Apache 2 License for the specific language governing permissions and // limitations under the License. // In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers #include <cfloat> #include <limits> #include <math_constants.h> #include "cudamatrix/cu-kernels-ansi.h" /*********************************************************************** * Generic __device__ functions */ template<typename Real> __device__ static Real _sum_reduce(Real buffer[]) { // Total number of active threads int32_cuda nTotalThreads = blockDim.x; __syncthreads(); // perform tree-based reduction (sum) while (nTotalThreads > 1) { int32_cuda halfPoint = ((1 + nTotalThreads) >> 1); // divide by two // only the first half of the threads will be active. if (threadIdx.x >= halfPoint) { // was < // Get the shared value stored by another thread Real temp = 0.0; if (threadIdx.x < nTotalThreads) { // was +halfPoint temp = buffer[threadIdx.x]; // was +halfPoint } buffer[threadIdx.x - halfPoint] += temp; } __syncthreads(); nTotalThreads = ((1 + nTotalThreads) >> 1); // divide by two. } // the result return buffer[0]; } /*********************************************************************** * CUDA kernels * the functions are templated to have the float/double operations */ /* * CuMatrix */ template<typename Real> __global__ static void _copy_low_upp(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i <= j || i >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } template<typename Real> __global__ static void _copy_upp_low(Real* A, MatrixDim dimA) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j <= i || j >= dimA.rows) return; int index_1 = i * dimA.stride + j; int index_2 = j * dimA.stride + i; A[index_2] = A[index_1]; } // mat += diag(vec) * mat2. template<typename Real> __global__ static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim, const Real *vec, const Real *mat2, int mat2_row_stride, int mat2_col_stride, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = j * mat_dim.stride + i, index2 = j * mat2_row_stride + i * mat2_col_stride; if (i < mat_dim.cols && j < mat_dim.rows) { mat[index] = alpha * vec[j] * mat2[index2] + beta * mat[index]; } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dmat.cols && j < dmat.rows) { int32_cuda index_B = (j * (j + 1) / 2) + i; int32_cuda index_A = j * dmat.stride + i; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) { // we interpret these indexes oppositely from normal, but it doesn't // matter as it's invoked in a symmetric way. int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // transpose the indices used to index the source TpMatrix. if (i < dmat.rows && j < dmat.cols) { int32_cuda index_B = (j * (j + 1) / 2) + i; int32_cuda index_A = i * dmat.stride + j; if (i <= j) { A[index_A] = B[index_B]; } else { A[index_A] = 0.0; } } } template<typename Real, typename OtherReal> __global__ static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index. int32_cuda index_out = i + j * d_out.stride; int32_cuda index_in = i + j * d_in.stride; if (i < d_out.cols && j < d_out.rows) mat_out[index_out] = static_cast<Real>(mat_in[index_in]); } template<int TileDim, typename Real, typename OtherReal> __global__ static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in, MatrixDim d_out, MatrixDim d_in) { // Use shared meme to achieve both coalesced memory reading and writing // '+1' to avoid bank conflict when reading sbuf __shared__ Real sbuf[TileDim][TileDim + 1]; const int32_cuda i_in = blockIdx.y * TileDim + threadIdx.y; // row-index const int32_cuda j_in = blockIdx.x * TileDim + threadIdx.x; // col-index const int32_cuda tile_stride_in = CU1DBLOCK / TileDim * d_in.stride; int32_cuda index_in = i_in * d_in.stride + j_in; # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_in + i < d_in.rows && j_in < d_in.cols) { sbuf[threadIdx.y + i][threadIdx.x] = static_cast<Real>(mat_in[index_in]); } index_in += tile_stride_in; } __syncthreads(); // Grid is transposed, but block is not yet. // Warp (blockDim.x) is always along the row-dim. const int32_cuda i_out = blockIdx.x * TileDim + threadIdx.y; const int32_cuda j_out = blockIdx.y * TileDim + threadIdx.x; const int32_cuda tile_stride_out = CU1DBLOCK / TileDim * d_out.stride; int32_cuda index_out = i_out * d_out.stride + j_out; # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_out + i < d_out.rows && j_out < d_out.cols) { // block is tranposed when reading sbuf mat_out[index_out] = sbuf[threadIdx.x][threadIdx.y + i]; } index_out += tile_stride_out; } } // Copy from CSR sparse matrix to dense matrix // // We use warpSize threads per row to access only the nnz elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows. template<typename Real, typename OtherReal> __global__ static void _copy_from_smat(Real* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const OtherReal* smat_val) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx if (i < mat_dim.rows) { const int nz_start = smat_row_ptr[i]; const int nz_end = smat_row_ptr[i + 1]; for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id += warpSize) { const int j = smat_col_idx[nz_id]; // col idx mat[i * mat_dim.stride + j] = static_cast<Real>(smat_val[nz_id]); } } } /// Select a subset of the rows of a CSR SparseMatrix. /// Sets 'out' to only the rows of 'in' that are listed /// in 'row_indexes'. 'row_indexes' must be sorted and unique, /// and satisfy 0 <= row_indexes[i] < in.size(). /// /// Note: 'out_row_ptr' is an input parameter that is calculated before /// calling this kernel function /// /// We use warpSize threads per row to access only the nnz elements. /// Every CU1DBLOCK/warpSize rows share one thread block. /// 1D grid to cover all selected rows. template<typename Real> __global__ static void _select_rows(const int* out_row_ptr, int* out_col_idx, Real* out_val, const int* row_indexes, const int num_selected_rows, const int* in_row_ptr, const int* in_col_idx, const Real* in_val) { const int out_i = blockIdx.x * blockDim.y + threadIdx.y; // out row idx if (out_i < num_selected_rows) { const int in_i = row_indexes[out_i]; const int in_row_start = in_row_ptr[in_i]; const int out_row_start = out_row_ptr[out_i]; const int row_length = in_row_ptr[in_i + 1] - in_row_start; for (int k = threadIdx.x; k < row_length; k += warpSize) { const int in_n = in_row_start + k; const int out_n = out_row_start + k; out_col_idx[out_n] = in_col_idx[in_n]; out_val[out_n] = in_val[in_n]; } } } // mat += alpha * smat // // We use warpSize threads per row to access only the nonzero elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows of smat. template<typename Real> __global__ static void _add_smat(Real* mat, MatrixDim mat_dim, Real alpha, const int* smat_row_ptr, const int* smat_col_idx, const Real* smat_val) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx if (i < mat_dim.rows) { const int row_start = smat_row_ptr[i]; const int row_end = smat_row_ptr[i + 1]; for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) { const int j = smat_col_idx[n]; // col idx of smat mat[i * mat_dim.stride + j] += alpha * smat_val[n]; } } } // mat += alpha * smat^T // // We use warpSize threads per row to access only the nonzero elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows of smat. template<typename Real> __global__ static void _add_smat_trans(Real* mat, MatrixDim mat_dim, Real alpha, const int* smat_row_ptr, const int* smat_col_idx, const Real* smat_val) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx if (i < mat_dim.cols) { const int row_start = smat_row_ptr[i]; const int row_end = smat_row_ptr[i + 1]; for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) { const int j = smat_col_idx[n]; // col idx of smat mat[j * mat_dim.stride + i] += alpha * smat_val[n]; } } } /// For each element x of the matrix, set it to /// (x < 0 ? exp(x) : x + 1). /// Use block/grid sizes for simple matrix ops template<typename T> __global__ static void _apply_exp_special(T* out, MatrixDim out_dim, const T* in, int in_stride) { const int i = blockIdx.x * blockDim.x + threadIdx.x; const int j = blockIdx.y * blockDim.y + threadIdx.y; if (i < out_dim.rows && j < out_dim.cols) { T x = in[i * in_stride + j]; if (x < T(0)) { out[i * out_dim.stride + j] = exp(x); } else { out[i * out_dim.stride + j] = x + T(1); } } } /// Fill the array 'data' with the sequence [base ... base + length) /// Use 1D block and 1D grid template<typename T> __global__ static void _sequence(T* data, int length, T base) { const int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < length) { data[i] = base + T(i); } } // Copy from CSR sparse matrix to transposed dense matrix // // We use warpSize threads per row to access only the nnz elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows. template<typename Real, typename OtherReal> __global__ static void _copy_from_smat_trans(Real* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const OtherReal* smat_val) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat if (i < mat_dim.cols) { const int nz_start = smat_row_ptr[i]; const int nz_end = smat_row_ptr[i + 1]; for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id += warpSize) { const int j = smat_col_idx[nz_id]; // col idx of smat mat[j * mat_dim.stride + i] = static_cast<Real>(smat_val[nz_id]); } } } // First stage of trace(mat * smat^T) // We use warpSize threads per row to access only the nnz elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows of smat. template<typename Real> __global__ static void _trace_mat_smat_trans(const Real* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const Real* smat_val, Real* trace_vec) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat if (i < mat_dim.rows) { const int nz_start = smat_row_ptr[i]; const int nz_end = smat_row_ptr[i + 1]; for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id += warpSize) { const int j = smat_col_idx[nz_id]; // col idx of smat trace_vec[nz_id] = mat[i * mat_dim.stride + j] * smat_val[nz_id]; } } } // First stage of trace(mat * smat) // We use warpSize threads per row to access only the nnz elements. // Every CU1DBLOCK/warpSize rows share one thread block. // 1D grid to cover all rows of smat. template<typename Real> __global__ static void _trace_mat_smat(const Real* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const Real* smat_val, Real* trace_vec) { const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat if (i < mat_dim.cols) { const int nz_start = smat_row_ptr[i]; const int nz_end = smat_row_ptr[i + 1]; for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id += warpSize) { const int j = smat_col_idx[nz_id]; // col idx of smat trace_vec[nz_id] = mat[j * mat_dim.stride + i] * smat_val[nz_id]; } } } template<typename Real> __global__ static void _apply_exp(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) { mat[index] = exp(mat[index]); } } template<typename Real> __global__ static void _scale_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i + 1) * (i + 2) / 2) - 1; if (i < dim) { mat[index] = value * mat[index]; } } template<typename Real> __global__ static void _set_diag(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = i + i * d.stride; if (i < d.rows && i < d.cols) { mat[index] = value; } } template<typename Real> __global__ static void _set_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i + 1) * (i + 2) / 2) - 1; if (i < dim) { mat[index] = value; } } template<typename Real> __global__ static void _add_diag_packed(Real* mat, Real value, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i + 1) * (i + 2) / 2) - 1; if (i < dim) { mat[index] = mat[index] + value; } } template<typename Real> __global__ static void _set_const(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = value; } template<typename Real> __global__ static void _set_zero_above_diag(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < i) mat[index] = 0.0; } template<typename Real> __global__ static void _add(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] + value; } template<typename Real> __global__ static void _scale(Real* mat, Real value, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = mat[index] * value; } template<typename Real> __global__ static void _apply_log(Real* mat, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = log(mat[index]); } template<typename Real> __global__ static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] * A[src_index]; } template<typename Real> __global__ static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride; if (i < dst_d.cols && j < dst_d.rows) mat[dst_index] = mat[dst_index] / A[src_index]; } template<typename Real> __global__ static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride; if (i < dst_d.cols && j < dst_d.rows) { Real a = mat[dst_index], b = A[src_index]; mat[dst_index] = fmax(a, b); } } template<typename Real> __global__ static void _min(Real* mat, const Real* other, MatrixDim mat_d, int other_stride) { int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda mat_index = i * mat_d.stride + j; int32_cuda other_index = i * other_stride + j; if (j < mat_d.cols && i < mat_d.rows) { Real a = mat[mat_index], b = other[other_index]; mat[mat_index] = fmin(a, b); } } template<typename Real> __global__ static void _vec_mul_elements(Real* v, const Real* a, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) v[i] = v[i] * a[i]; } template<typename Real> __global__ static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[i]; } template<typename Real> __global__ static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] *= scale[j]; } template<typename Real> __global__ static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; int src_index = i / group_size + j * src_stride; y[dst_index] *= x[src_index]; } } template<typename Real> __global__ void _diff_group_pnorm(Real *id, const Real *iv, const Real *ov, const Real* od, MatrixDim id_dim, int iv_stride, int ov_stride, int od_stride, int group_size, Real power) { const int j = blockIdx.x * blockDim.x + threadIdx.x; if (j < id_dim.cols) { const int grid_stride = gridDim.y * blockDim.y; const int src_j = j / group_size; int i = blockIdx.y * blockDim.y + threadIdx.y; for (; i < id_dim.rows; i += grid_stride) { const int iv_index = j + i * iv_stride; Real iv_ij = iv[iv_index]; Real ans; if (power == Real(2)) { const int ov_index = src_j + i * ov_stride; Real ov_ij = ov[ov_index]; ans = ov_ij <= 0.0 ? 0.0 : iv_ij / ov_ij; } else if (power == Real(1)) { Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1); ans = (iv_ij == Real(0) ? 0.0 : iv_ij_sign); } else if (power == (sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF)) { const int ov_index = src_j + i * ov_stride; Real ov_ij = ov[ov_index]; Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1); ans = ov_ij <= 0.0 ? 0.0 : (iv_ij_sign * (abs(iv_ij) == ov_ij ? 1.0 : 0.0)); } else { const int ov_index = src_j + i * ov_stride; Real ov_ij = ov[ov_index]; Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1); if (ov_ij <= 0.0) { ans = 0.0; // The derivative is either 0 or undefined at the origin. } else { ans = iv_ij_sign * pow(std::abs(iv_ij), power - 1) * pow(ov_ij, 1 - power); } } const int od_index = src_j + i * od_stride; const int id_index = j + i * id_dim.stride; id[id_index] = ans * od[od_index]; } } } /// deriv is the derivative we will output; vec is the input we're computing /// the group max on, "maxv" is the previously computed group max. template<typename Real> __global__ static void _calc_group_max_deriv(Real *deriv, const Real *vec, const Real *maxv, MatrixDim deriv_dim, int vec_stride, int maxv_stride, int group_size) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < deriv_dim.rows && i < deriv_dim.cols) { int deriv_index = i + j * deriv_dim.stride; int vec_index = i + j * vec_stride; int maxv_index = i / group_size + j * maxv_stride; Real vec_element = vec[vec_index], // The element of the original vector. max_element = maxv[maxv_index]; // this is the max value Real ans = (max_element == vec_element ? 1.0 : 0.0); deriv[deriv_index] = ans; } } /// Set each element to y = (x == orig ? changed : x). template<typename Real> __global__ static void _replace_value(Real *vec, int dim, Real orig, Real changed) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) if (vec[i] == orig) vec[i] = changed; } template<typename Real> __global__ static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) { const int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; if (i < d.rows) { const int32_cuda start = i * d.stride; const Real scale = Real(1) / vec_div[i]; const int32_cuda grid_stride = blockDim.x * gridDim.x; for (int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; j < d.cols; j += grid_stride) { mat[start + j] *= scale; } } } template<typename Real> __global__ static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha * src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) dst[index] = alpha * src[index_src] + dst[index]; } template<typename Real> __global__ static void _add_mat_blocks(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = i + j * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.rows + q * d.cols] + dst[index]; } } } template<typename Real> __global__ static void _add_mat_repeated(Real alpha, const Real* src, MatrixDim src_dim, Real* dst, MatrixDim dst_dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda src_i = i % src_dim.cols, src_j = j % src_dim.rows, dst_index = i + j * dst_dim.stride, src_index = src_i + src_j * src_dim.stride; if (i < dst_dim.cols && j < dst_dim.rows) dst[dst_index] += alpha * src[src_index]; } template<typename Real> __global__ static void _add_mat_blocks_trans(Real alpha, const Real* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, Real* dst, MatrixDim d, int src_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; int32_cuda index_src = j + i * src_stride; if (i < d.cols && j < d.rows) for (int32_cuda p = 0; p < num_row_blocks; p++) { for (int32_cuda q = 0; q < num_col_blocks; q++) { dst[index] = alpha * src[index_src + p * src_stride * d.cols + q * d.rows] + dst[index]; } } } template<typename Real> __global__ static void _set_mat_mat_div_mat(const Real* A, const Real* B, const Real* C, Real* dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride, a_index = i + j * stride_a, b_index = i + j * stride_b, c_index = i + j * stride_c; if (i < d.cols && j < d.rows) if (C[c_index] == 0) dst[index] = A[a_index]; else dst[index] = A[a_index] * B[b_index] / C[c_index]; } // Given a matrix input S (not packed!) and a lower-triangular matrix L, this // function does S = beta S + alpha * L^T L. This is used in PSD matrix // inversion. The i index is the row of the destination S and the j the column // (although of course the output is symmetric so it doesn't matter in a sense). // The main point of this is to make use of various symmetries and zero-ness. template<typename Real> __global__ static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim, Real *S, MatrixDim sdim) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (i >= sdim.rows || j > i) return; // this thread computes the dot-product of the i'th column of // L with the j'th column of L. The values we're multiplying // are only nonzero for row-index k greater or equal to // max(i, j), which equals i. Real sum = 0.0; for (int k = i; k < sdim.rows; k++) { int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k; sum += T[i_index] * T[j_index]; } int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i; S[output_index1] = alpha * sum + beta * S[output_index1]; S[output_index2] = alpha * sum + beta * S[output_index2]; } template<typename Real> __global__ static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha * col[j] + beta * dst[index]; } template<typename Real> __global__ static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride; if (i < d.cols && j < d.rows) dst[index] = alpha * row[i] + beta * dst[index]; } template<typename Real> __global__ static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * dmat.stride; int32_cuda index2 = i + j * dmask.stride; if (i < dmat.cols && j < dmat.rows) if (mask[index2] == 0) mat[index] = 0; } template<typename Real> __global__ static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim, const Real *mat2, int mat2_row_stride, int mat2_col_stride, const Real *vec, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * mat_dim.stride, index2 = i * mat2_col_stride + j * mat2_row_stride; if (j < mat_dim.rows && i < mat_dim.cols) mat[index] = alpha * mat2[index2] * vec[i] + beta * mat[index]; } template<typename Real> __global__ static void _add_mat_mat_elements(Real *data, const Real *srcA_data, const Real *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, Real alpha, Real beta) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda tgt_index = i + j * dim.stride; int32_cuda srcA_index = i + j * srcA_stride; int32_cuda srcB_index = i + j * srcB_stride; if (i < dim.cols && j < dim.rows) { data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index] + beta * data[tgt_index]; } } /* * CuVector */ // very limited application! template<typename Real> __global__ static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2, Real param_3, int* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) { Real ratio = a[i] / param_3; if ((ratio < 0.0) || (ratio >= 1.01)) { *flag = 1; return; } if (ratio < param_1) { Real factor = ((param_1 / ratio) > param_2) ? param_2 : (param_1 / ratio); v[i] = v[i] / factor; } else if (ratio > param_1) { Real factor = ((ratio / param_1) > param_2) ? param_2 : (ratio / param_1); v[i] = v[i] * factor; } } } template<typename Real, typename OtherReal> __global__ static void _cublas_copy_kaldi(int n, const Real* x, int incx, OtherReal* y, int incy) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < n) { y[i * incy] = static_cast<OtherReal>(x[i * incx]); } } // This kernel writes a copy of the vector "v_in" to each row of the matrix // "m_out". the dimension of v_in should be equal to the #columns of m_out. template<typename Real> __global__ static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index. int j = blockIdx.y * blockDim.y + threadIdx.y; // row index. if (i < d.cols && j < d.rows) { int index = i + j * d.stride; m_out[index] = v_in[i]; } } // This kernel writes a copy of the vector "v_in" to each col of the matrix // "m_out". the dimension of v_in should be equal to the #row of m_out. template<typename Real> __global__ static void _copy_cols_from_vec(Real* m_out, MatrixDim d, const Real* v_in) { int i = blockIdx.y * blockDim.y + threadIdx.y; // row id int j = blockIdx.x * blockDim.x + threadIdx.x; // col id if (i < d.rows && j < d.cols) { m_out[i * d.stride + j] = v_in[i]; } } // _trace_mat_mat reduce the partial sum to // value[blockIdx.y * gridDim.x + blockIdx.x] // It use shared mem to transpose matrix B to ensure coalesced memory access template<int TileDim, typename Real> __global__ static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { // Reuse shared mem and make indexing easier. "+1" to avoid bank conflict __shared__ union { Real trans[TileDim][TileDim + 1]; Real sum[CU1DBLOCK]; } smem; // linear thread id; const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x; const int32_cuda grid_height = gridDim.y * TileDim; const int32_cuda ja = blockIdx.x * TileDim + threadIdx.x; const int32_cuda ib = blockIdx.x * TileDim + threadIdx.y; int32_cuda ia = blockIdx.y * TileDim + threadIdx.y; int32_cuda jb = blockIdx.y * TileDim + threadIdx.x; // Grid reduce Real tsum = Real(0); for (int32_cuda i0 = 0; i0 < dA.rows; i0 += grid_height) { // Load from B, transpose the block and store in shared mem if (jb < dA.rows) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (ib + i < dA.cols) { smem.trans[threadIdx.x][threadIdx.y + i] = B[(ib + i) * B_stride + jb]; } } } __syncthreads(); // Load from A, sum up the product. if (ja < dA.cols) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (ia + i < dA.rows) { tsum += A[(ia + i) * dA.stride + ja] * smem.trans[threadIdx.y + i][threadIdx.x]; } } } __syncthreads(); ia += grid_height; jb += grid_height; } smem.sum[tid] = tsum; __syncthreads(); // Block reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) smem.sum[tid] += smem.sum[tid + shift]; __syncthreads(); } // Warp reduce. Implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { smem.sum[tid] += smem.sum[tid + shift]; } } // output 1 sum per thread block if (tid == 0) { value[blockIdx.y * gridDim.x + blockIdx.x] = smem.sum[0]; } } // _trace_mat_mat_trans reduce the partial sum to // value[blockIdx.y * gridDim.x + blockIdx.x] template<typename Real> __global__ static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA, int B_stride, Real* value) { __shared__ Real ssum[CU1DBLOCK]; // linear thread id; const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x; const int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; const int32_cuda grid_height = gridDim.y * blockDim.y; int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y; // Grid reduce Real tsum = Real(0); if (j < dA.cols) { while (i < dA.rows) { tsum += A[i * dA.stride + j] * B[i * B_stride + j]; i += grid_height; } } ssum[tid] = tsum; __syncthreads(); // Block reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) ssum[tid] += ssum[tid + shift]; __syncthreads(); } // Warp reduce. Implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // output 1 sum per thread block if (tid == 0) { value[blockIdx.y * gridDim.x + blockIdx.x] = ssum[0]; } } // v = alpha * diag(M * N^T) + beta * v template<typename Real> __global__ static void _add_diag_mat_mat_MNT(const Real alpha, const Real* M, const MatrixDim dim_M, const Real* N, const int stride_N, const Real beta, Real* v) { __shared__ Real ssum[CU1DBLOCK]; const int tid = threadIdx.x; const int i = blockIdx.x; const int m_start = i * dim_M.stride; const int n_start = i * stride_N; // Loop along the matrix row. Reduce to CU1DBLOCK elements per row. Real tsum = Real(0); for (int j = tid; j < dim_M.cols; j += CU1DBLOCK) { tsum += M[m_start + j] * N[n_start + j]; } ssum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize elements. # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) ssum[tid] += ssum[tid + shift]; __syncthreads(); } // Warp reduce to 1 element. Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // output 1 sum per thread block if (tid == 0) { v[i] = alpha * ssum[0] + beta * v[i]; } } // v = alpha * diag(M^T * N) + beta * v template<int TileDim, typename Real> __global__ static void _add_diag_mat_mat_MTN(const Real alpha, const Real* M, const int stride_M, const Real* N, const MatrixDim dim_N, const Real beta, Real* v) { __shared__ Real ssum[CU1DBLOCK]; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int j = blockIdx.x * blockDim.x + threadIdx.x; if (j >= dim_N.cols) return; // Loop along the matrix column. // Reduce to CU1DBLOCK / TileDim elements per column. Real tsum = Real(0); for (int i = threadIdx.y; i < dim_N.rows; i += blockDim.y) { tsum += M[i * stride_M + j] * N[i * dim_N.stride + j]; } ssum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize / TileDim elements per column. # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim; shift >>= 1) { if (tid < shift) { ssum[tid] += ssum[tid + shift]; } __syncthreads(); } // Warp reduce to 1 element per column. // Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift >= TileDim; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // output TileDim sums per thread block if (tid < TileDim) { v[j] = alpha * ssum[tid] + beta * v[j]; } } // v = alpha * diag(M * N) + beta * v template<int TileDim, typename Real> __global__ static void _add_diag_mat_mat_MN(const Real alpha, const Real* M, const int stride_M, const Real* N, const MatrixDim dim_N, const Real beta, Real* v) { // Reuse shared mem and make indexing easier. "+1" to avoid bank conflict __shared__ union { Real trans[TileDim][TileDim + 1]; Real sum[CU1DBLOCK]; } smem; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int i_m = blockIdx.x * TileDim + threadIdx.y; const int j_n = blockIdx.x * TileDim + threadIdx.x; int i_n = threadIdx.y; int j_m = threadIdx.x; // Loop along the matrix column. // Reduce to CU1DBLOCK / TileDim elements per column. Real tsum = Real(0); for (int block_i_n = 0; block_i_n < dim_N.rows; block_i_n += TileDim) { // Load, transpose and store M to shared mem. if (j_m < dim_N.rows) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_m + i < dim_N.cols) { smem.trans[threadIdx.x][threadIdx.y + i] = M[(i_m + i) * stride_M + j_m]; } } } __syncthreads(); // Load N, sum up the product. if (j_n < dim_N.cols) { # pragma unroll for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) { if (i_n + i < dim_N.rows) { tsum += N[(i_n + i) * dim_N.stride + j_n] * smem.trans[threadIdx.y + i][threadIdx.x]; } } } __syncthreads(); i_n += TileDim; j_m += TileDim; } smem.sum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize / TileDim elements per column. # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim; shift >>= 1) { if (tid < shift) { smem.sum[tid] += smem.sum[tid + shift]; } __syncthreads(); } // Warp reduce to 1 element per column. // Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift >= TileDim; shift >>= 1) { smem.sum[tid] += smem.sum[tid + shift]; } } // output TileDim sums per thread block if (tid < TileDim && j_n < dim_N.cols) { v[j_n] = alpha * smem.sum[tid] + beta * v[j_n]; } } template<typename Real> __global__ static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y, Real beta, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) v[i] = alpha * x[i] * y[i] + beta * v[i]; } template<typename Real> __global__ static void _copy_col_from_mat_df(double* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (double) mat[index]; } template<typename Real> __global__ static void _copy_col_from_mat_fd(float* v, int col, const Real* mat, MatrixDim dmat, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = col + i * dmat.stride; // if (blockIdx.y > 0) return; if (i < dim) v[i] = (float) mat[index]; } template<typename Real> __global__ static void _vec_apply_exp(Real* v, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { v[i] = exp(v[i]); } } template<typename Real> __global__ static void _vec_apply_log(Real* v, Real* flag, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // if (blockIdx.y > 0) return; if (i < dim) { if (v[i] < 0) { *flag = 1; return; } v[i] = log(v[i]); } } template<typename Real> __global__ static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z, MatrixDim d, Real* z2, MatrixDim d2, Real* t) { int i = threadIdx.x; __shared__ Real tot_objf[CU1DBLOCK]; __shared__ Real tot_weight[CU1DBLOCK]; Real tmp_weight_sum = 0; Real tmp_tot_objf = 0; int size = s / CU1DBLOCK; //the least size in a loop (later part) int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1 int loop_start; int loop_end; if (i < threshold) { loop_start = i * (size + 1); loop_end = (i + 1) * (size + 1); } else { loop_start = threshold + i * size; loop_end = threshold + (i + 1) * size; } for (int j = loop_start; j < loop_end; j++) { //* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) ); int m = (x + j)->row; //*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int)); int label = (x + j)->column; // *(Real*) ((size_t)x + j*(2*sizeof(int) + sizeof(Real)) + 2*sizeof(int)); Real weight = (x + j)->weight; tmp_weight_sum += weight; Real this_prob = *(z + m * d.stride + label); tmp_tot_objf += weight * log(this_prob); // there might be problems here.... *(z2 + m * d2.stride + label) += weight / this_prob; } tot_objf[i] = tmp_tot_objf; tot_weight[i] = tmp_weight_sum; __syncthreads(); *t = _sum_reduce(tot_objf); __syncthreads(); *(t + 1) = _sum_reduce(tot_weight); return; } template<typename Real> __global__ static void _cuda_vector_copy_elements(Real *data, int dim, const Real *src_mat, int mat_stride, bool transpose, const MatrixIndexT_cuda* elements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= dim) return; int j = elements[i]; int mat_index; if (transpose) mat_index = i + j * mat_stride; else mat_index = j + i * mat_stride; data[i] = src_mat[mat_index]; } template<typename Real> __global__ static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha, MatrixElement<Real>* x, int num_elements) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= num_elements) return; data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight; } template<typename Real> __global__ static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha, const Int32Pair* indices, const Real* x, int s, Real* data) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i >= s) return; int data_i = indices[i].first * dim.stride + indices[i].second; data[data_i] += alpha * x[i]; } template<typename Real> __global__ static void _cuda_matrix_add_to_elements(Real alpha, Real* mat, MatrixDim dim, const MatrixIndexT_cuda* elements) { int row = blockIdx.x * blockDim.x + threadIdx.x; if (row < dim.rows) { int col = elements[row]; if (col >= 0) { int index = col + row * dim.stride; mat[index] += alpha; } } } template<typename Real> __global__ static void _matrix_lookup(const Real *data, MatrixDim dim, const Int32Pair *indices, int indices_size, Real *output) { int ind = blockIdx.x * blockDim.x + threadIdx.x; if (ind >= indices_size) return; int data_ind = indices[ind].first * dim.stride + indices[ind].second; output[ind] = data[data_ind]; } template<typename Real> __global__ static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row int32_cuda index_mat1 = i + j * mat1_dim.stride; int32_cuda index_mat2 = i + j * mat2_stride; int32_cuda index_mask = i + j * mask_stride; if (i < mat1_dim.cols && j < mat1_dim.rows) mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0); } enum EnumTransformReduce { SUMAB, SUM, MAX, MIN, LINFNORM, L2NORM, L1NORM, L0NORM, LPNORM }; template<EnumTransformReduce TransReduceType, typename Real> struct TransReduceOp { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return Real(0); } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return Real(0); } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return Real(0); } }; template<typename Real> struct TransReduceOp<SUMAB, Real> { const Real alpha_; const Real beta_; TransReduceOp(const Real& a, const Real& b) : alpha_(a), beta_(b) { } __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return x; } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { if (beta_ == Real(0)) { return alpha_ * x; } else { return alpha_ * x + beta_ * output; } } }; template<typename Real> struct TransReduceOp<SUM, Real> { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return x; } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<MAX, Real> { __forceinline__ __device__ Real InitValue() const { return sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF; } __forceinline__ __device__ Real Transform(const Real& x) const { return x; } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return fmax(a, b); } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<MIN, Real> { __forceinline__ __device__ Real InitValue() const { return sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF; } __forceinline__ __device__ Real Transform(const Real& x) const { return x; } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return min(a, b); } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<LINFNORM, Real> { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return abs(x); } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return fmax(a, b); } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<L2NORM, Real> { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return x * x; } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return sqrt(x); } }; template<typename Real> struct TransReduceOp<L1NORM, Real> { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return abs(x); } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<L0NORM, Real> { __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return Real(x == Real(0) ? 0 : 1); } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return x; } }; template<typename Real> struct TransReduceOp<LPNORM, Real> { const Real power_; TransReduceOp(const Real& p) : power_(p) { } __forceinline__ __device__ Real InitValue() const { return Real(0); } __forceinline__ __device__ Real Transform(const Real& x) const { return pow(abs(x), power_); } __forceinline__ __device__ Real Reduce(const Real& a, const Real& b) const { return a + b; } __forceinline__ __device__ Real PostReduce(const Real& x, const Real& output) const { return pow(x, Real(1) / power_); } }; // Vector reduce. template<EnumTransformReduce TransReduceType, typename Real> __global__ static void _vec_transform_reduce( const Real* v, Real* result, const int dim, const int inc, const TransReduceOp<TransReduceType, Real> op) { __shared__ Real sdata[CU1DBLOCK]; Real tdata = op.InitValue(); const int tid = threadIdx.x; const int vec_len = dim * inc; const int grid_stride = gridDim.x * blockDim.x * inc; int i = (blockIdx.x * blockDim.x + tid) * inc; // Grid reduce. Loop over the whole vector v. for (; i < vec_len; i += grid_stride) { tdata = op.Reduce(tdata, op.Transform(v[i])); } sdata[tid] = tdata; __syncthreads(); // Tree reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]); } __syncthreads(); } // Reduce last warp. Threads implicitly synchronized within a warp. if (tid < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) { sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]); } } // Output to vector result. if (tid == 0) result[blockIdx.x] = op.PostReduce(sdata[0], result[blockIdx.x]); } // Reduce a matrix 'mat' to a column vector 'result' template<EnumTransformReduce TransReduceType, typename Real> __global__ static void _transform_reduce_mat_cols( Real *result, const Real *mat, const MatrixDim d, const TransReduceOp<TransReduceType, Real> op) { __shared__ Real sdata[CU1DBLOCK]; const int tid = threadIdx.x; const int i = blockIdx.x; const int row_start = i * d.stride; Real tdata = op.InitValue(); for (int j = tid; j < d.cols; j += CU1DBLOCK) { tdata = op.Reduce(tdata, op.Transform(mat[row_start + j])); } sdata[tid] = tdata; __syncthreads(); // Tree reduce # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]); __syncthreads(); } // Reduce last warp. Threads implicitly synchronized within a warp. if (tid < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]); } // Output to vector result. if (tid == 0) { result[i] = op.PostReduce(sdata[0], result[i]); } } template<EnumTransformReduce TransReduceType, typename Real> __global__ static void _group_transform_reduce( Real *y, const Real *x, const MatrixDim d, const int src_stride, const int group_size, const TransReduceOp<TransReduceType, Real> op) { __shared__ Real sreduction[CU1DBLOCK]; const int i = blockIdx.x; const int x_start = i * src_stride; const int y_start = i * d.stride; const int threads_per_group = blockDim.x; // Reduce n groups per thread block const int n = blockDim.y; const int len = group_size * n; // linear thread id const int tid = threadIdx.y * threads_per_group + threadIdx.x; int j = threadIdx.y * group_size + threadIdx.x; // col-id of *x int group_id = threadIdx.y; // col-id of *y int group_end = x_start + (group_id + 1) * group_size; while (group_id < d.cols) { // reduce to threads_per_group elements per group int x_idx = x_start + j; Real treduction = op.Transform(x[x_idx]); x_idx += threads_per_group; while (x_idx < group_end) { treduction = op.Reduce(treduction, op.Transform(x[x_idx])); x_idx += threads_per_group; } sreduction[tid] = treduction; if (threads_per_group > warpSize) { __syncthreads(); } // tree-reduce to 2x warpSize elements per group # pragma unroll for (int shift = threads_per_group / 2; shift > warpSize; shift >>= 1) { if (threadIdx.x < shift) { sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]); } __syncthreads(); } // Warp-reduce to 1 element per group. // Threads implicitly synchronized within the warp. const int warp_reduce_size = threads_per_group / 2 < warpSize ? threads_per_group / 2 : warpSize; if (threadIdx.x < warp_reduce_size) { # pragma unroll for (int shift = warp_reduce_size; shift > 0; shift >>= 1) { sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]); } } // Store the result. if (threadIdx.x == 0) { y[y_start + group_id] = op.PostReduce(sreduction[tid], y[y_start + group_id]); } j += len; group_end += len; group_id += n; } } template<typename Real> __global__ static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) { if (v[i] < floor_val) { v[i] = floor_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count, int dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) { if (v[i] > ceiling_val) { v[i] = ceiling_val; count[i] = 1; } else { count[i] = 0; } } } template<typename Real> __global__ static void _apply_pow(Real* mat, Real power, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (power == 1.0) return; if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { if (!(mat[index] >= 0.0)) return; mat[index] = sqrt(mat[index]); } else { mat[index] = pow(mat[index], power); } } } template<typename Real> __global__ static void _apply_pow_abs(Real* mat, Real power, bool include_sign, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { if (include_sign == true && mat[index] < 0) { if (power == 1.0) mat[index] = -std::abs(mat[index]); if (power == 2.0) { mat[index] = -mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = -sqrt(std::abs(mat[index])); } else { mat[index] = -pow(std::abs(mat[index]), power); } } else { if (power == 1.0) mat[index] = std::abs(mat[index]); if (power == 2.0) { mat[index] = mat[index] * mat[index]; } else if (power == 0.5) { mat[index] = sqrt(std::abs(mat[index])); } else if (power < 0.0 && mat[index] == 0.0) { mat[index] = 0.0; } else { mat[index] = pow(std::abs(mat[index]), power); } } } } template<typename Real> __global__ static void _apply_heaviside(Real* mat, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0); } template<typename Real> __global__ static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index int index = i + j * d.stride; if (i < d.cols && j < d.rows) { mat[index] = max(mat[index], floor_val); } } template<typename Real> __global__ static void _copy_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + reorder[i]; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0.0; } } } template<typename Real> __global__ static void _add_cols(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[i], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = j * src_stride + index; Real val = src[src_index]; dst[dst_index] += val; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int index = reorder[j], dst_index = j * dst_dim.stride + i; if (index >= 0) { int src_index = reorder[j] * src_stride + i; Real val = src[src_index]; dst[dst_index] = val; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_rows(Real* dst, const Real * const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; const Real *pointer = src[j]; if (pointer != NULL) { dst[dst_index] = pointer[i]; } else { dst[dst_index] = 0; } } } template<typename Real> __global__ static void _copy_to_rows(Real* const * dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { Real *pointer = dst[j]; if (pointer != NULL) { pointer[i] = src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (reorder[j] >= 0) { int src_index = reorder[j] * src_stride + i; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _mul_rows(Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (reorder[j] >= 0) { int src_index = reorder[j] * src_stride + i; dst[dst_index] *= src[src_index]; } } } template<typename Real> __global__ static void _add_rows(Real alpha, Real* dst, const Real * const *src, MatrixDim dst_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < dst_dim.cols && j < dst_dim.rows) { int dst_index = j * dst_dim.stride + i; if (src[j] != NULL) { dst[dst_index] += alpha * src[j][i]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* dst, const Real *src, const MatrixIndexT_cuda* reorder, MatrixDim src_dim, int dst_stride) { int c = blockIdx.x * blockDim.x + threadIdx.x; // col index int r = blockIdx.y * blockDim.y + threadIdx.y; // row index if (c < src_dim.cols && r < src_dim.rows) { int src_index = r * src_dim.stride + c; if (reorder[r] >= 0) { int dst_index = reorder[r] * dst_stride + c; dst[dst_index] += alpha * src[src_index]; } } } template<typename Real> __global__ static void _add_to_rows(Real alpha, Real* const * dst, const Real *src, MatrixDim src_dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // col index int j = blockIdx.y * blockDim.y + threadIdx.y; // row index if (i < src_dim.cols && j < src_dim.rows) { if (dst[j] != NULL) { dst[j][i] += alpha * src[j * src_dim.stride + i]; } } } template<typename Real> __global__ static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) { mat[index] = min(mat[index], ceiling_val); } } template<typename Real> __global__ static void _invert_elements(Real* data, MatrixDim d) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int index = i + j * d.stride; if (i < d.cols && j < d.rows) data[index] = 1.0 / data[index]; } // matrix-wise, do data = alpha * data + beta * A * B^T, // where B is a block matrix. template<typename Real> __global__ static void _add_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; // Cast from void; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[j]; int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset, B_num_rows = block_data.matrix_dim.rows, B_num_cols = block_data.matrix_dim.cols, B_row_stride = block_data.matrix_dim.stride; // Cast from void; const Real *B_data = static_cast<Real*>(block_data.matrix_data); // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < B_num_cols; k++) { const Real *this_B_col = B_data + k; const Real *this_A_row = A_data + i * A_row_stride + B_row_start * A_col_stride; // this_A_row points to the element A[i][B_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < B_num_rows; l++) // l indexes rows of B. sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + B_col_start); data[index] = alpha * sum + beta * data[index]; } } // For a block matrix B, does B = alpha * C * D + beta * B. // the (x,y,z) indices are the block index, then the row // and column indices within the block. Note: transposition of C and D // is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride), // so it's invisible to this code. The num-cols and num-rows of C and D // are only provided to the extent that they are not already determined // by other quantities. template<typename Real> __global__ static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks, const Real *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const Real *D_data, int D_row_stride, int D_col_stride, Real alpha, Real beta) { int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B. int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block if (b >= num_blocks) return; const CuBlockMatrixData &block_data = B_cu_data[b]; if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols) return; // we're outside the dimensions of the b'th block. // B_elem is the element of B we're writing to. Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data) + i * block_data.matrix_dim.stride + j; Real B_val = *B_elem; // B_row and B_col are the (row, col) index into the full matrix B. int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j; const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data + D_col_stride * B_col; Real sum = 0.0; for (int k = 0; k < C_num_cols; k++) { sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride]; } *B_elem = alpha * sum + beta * B_val; } template<typename Real> __global__ static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim, const Real *A_data, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, Real alpha, Real beta) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data" int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B. if (i >= A_num_rows || j >= B_num_blocks) return; const CuBlockMatrixData &cu_data = B_cu_data[j]; // BT means B transposed. int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset, BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols = cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride; // Cast from void; const Real *B_data = static_cast<Real*>(cu_data.matrix_data); // we avoided a bunch of hassle by doing this (relates to Ansi-C requirement). for (int k = 0; k < BT_num_cols; k++) { const Real *this_BT_col = B_data + k * BT_col_stride; const Real *this_A_row = A_data + i * A_row_stride + BT_row_start * A_col_stride; // this_A_row points to the element A[i][BT_row_start], it's really just // part of this row of A. Real sum = 0.0; for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B. sum += this_BT_col[l] * this_A_row[l * A_col_stride]; int index = i * dim.stride + (k + BT_col_start); data[index] = alpha * sum + beta * data[index]; } } template<typename Real> __global__ static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indices) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride + indices[col].first, src_end_index = row * src_dim.stride + indices[col].second; Real sum = 0.0; for (int index = src_start_index; index < src_end_index; index++) sum += src_data[index]; data[dst_index] = sum; } template<typename Real> __global__ static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data, MatrixDim src_dim, const Int32Pair *indexes) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; if (row >= dim.rows || col >= dim.cols) return; int dst_index = row * dim.stride + col; int src_index_start = indexes[row].first, src_index_end = indexes[row].second; for (int row_index = src_index_start; row_index < src_index_end; row_index++) data[dst_index] += src_data[row_index * src_dim.stride + col]; } template<typename Real> __global__ static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride, src_index = i + j * src_stride; // compute the function y[index] = log(1 + exp(x[index])) if (i < d.cols && j < d.rows) { Real val = x[src_index], result; if (val >= 10.0) result = val; // function approaches y=x as x gets large else result = log1p(exp(val)); y[dst_index] = result; } } template<typename Real> __global__ static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride, int group_size, Real power) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; if (j < d.rows && i < d.cols) { int dst_index = i + j * d.stride; Real tmp = 0; int src_begin_index = i * group_size + j * src_stride; int src_end_index = src_begin_index + group_size; for (int src_index = src_begin_index; src_index < src_end_index; src_index++) { tmp += pow(std::abs(x[src_index]), power); } tmp = pow(tmp, Real(1.0 / power)); if (!isnan(tmp)) { y[dst_index] = tmp; } else { Real max_value = x[src_begin_index], min_value = max_value; for (int src_index = src_begin_index + 1; src_index < src_end_index; src_index++) { if (x[src_index] > max_value) max_value = x[src_index]; if (x[src_index] < min_value) min_value = x[src_index]; } tmp = 0.0; // let max_value be the largest abs(value) Real max_abs_value = (max_value > -min_value ? max_value : -min_value); if (max_abs_value == 0) { y[dst_index] = 0.0; } else { for (int src_index = src_begin_index; src_index < src_end_index; src_index++) { Real x_scaled = x[src_index] / max_abs_value; tmp += pow(std::abs(x_scaled), Real(power)); } y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value; } } } } /* * cu:: */ template<typename Real> __global__ static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride, src_index = i + j * src_stride; if (i < d.cols && j < d.rows) { Real res = 1.0 / (1.0 + exp(-x[src_index])); y[dst_index] = res; } } template<typename Real> __global__ static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride; int e_index = i + j * e_stride; int y_index = i + j * y_stride; if (i < d.cols && j < d.rows) eout[dst_index] = y[y_index] * (1.0 - y[y_index]) * e[e_index]; } template<typename Real> __global__ static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride, src_index = i + j * src_stride; if (i < d.cols && j < d.rows) { Real exp_2x = exp(2.0 * x[src_index]); Real res; if (isinf(exp_2x)) { res = 1.0; } else { res = (exp_2x - 1.0) / (exp_2x + 1.0); } y[dst_index] = res; } } template<typename Real> __global__ static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d, int e_stride, int y_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride; int e_index = i + j * e_stride; int y_index = i + j * y_stride; if (i < d.cols && j < d.rows) eout[dst_index] = (1.0 - y[y_index] * y[y_index]) * e[e_index]; } /* This function copies x to y while bounding the elements away from zero using the scalar function: y = x if x <= -epsilon or x >= +epsilon +epsilon if 0 <= x < epsilon -epsilon if -epsilon < x < 0. where: x is the source matrix, of dimension and stride given by d epsilon > 0 y is the destination matrix, with the num-rows and num-cols given by d, but stride given by y_stride. */ template<typename Real> __global__ static void _ensure_nonzero(const Real *x, MatrixDim d, Real epsilon, int y_stride, Real *y) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int x_index = i + j * d.stride, y_index = i + j * y_stride; if (i < d.cols && j < d.rows) { Real src = x[x_index], dst; if (src <= -epsilon || src >= epsilon) dst = src; else if (src >= 0) dst = epsilon; else dst = -epsilon; __syncthreads(); // This allows it to do consolidated write below, which // should improve speed. y[y_index] = dst; } } template<typename Real> __global__ static void _parametric_relu(Real* y, const Real* x, MatrixDim d, int src_stride, const Real* a, const Real* b) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride, src_index = i + j * src_stride; if (i < d.cols && j < d.rows) { Real res = (x[src_index] > 0.0) ? a[i] * x[src_index] : b[i] * x[src_index]; y[dst_index] = res; } } template<typename Real> __global__ static void _diff_parametric_relu(Real* eout, const Real* e, const Real* y, MatrixDim d, int e_stride, int y_stride, const Real* a, const Real* b) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride; int e_index = i + j * e_stride; int y_index = i + j * y_stride; if (i < d.cols && j < d.rows ) eout[dst_index] = (y[y_index] > 0.0 ? a[i] * e[e_index] : b[i] * e[e_index]); } template<typename Real> __global__ static void _heaviside(Real*y, const Real*x, MatrixDim d, int src_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dst_index = i + j * d.stride, src_index = i + j * src_stride; if (i < d.cols && j < d.rows) { Real res = (x[src_index] > 0.0 ? 1.0 : 0.0); y[dst_index] = res; } } template<typename Real> __global__ static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) { __shared__ Real smem[CU1DBLOCK]; const int i = blockIdx.x; const int x_start = i * src_stride; const int y_start = i * d.stride; const int tid = threadIdx.x; // find max element of the row // reduce to CU1DBLOCK elements per row. Real tmax = sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF; for (int j = tid; j < d.cols; j += CU1DBLOCK) { tmax = fmax(tmax, x[x_start + j]); } smem[tid] = tmax; __syncthreads(); // reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { smem[tid] = fmax(smem[tid], smem[tid + shift]); } __syncthreads(); } // reduce to 1 element per row if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { smem[tid] = fmax(smem[tid], smem[tid + shift]); } } // broadcast max to all threads __syncthreads(); Real max = smem[0]; // sum_j(exp(x(i,j)-max)) // reduce to CU1DBLOCK elements per row. Real tsum = Real(0); for (int j = tid; j < d.cols; j += CU1DBLOCK) { tsum += exp(x[x_start + j] - max); } smem[tid] = tsum; __syncthreads(); // reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { smem[tid] += smem[tid + shift]; } __syncthreads(); } // reduce to 1 element per row if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { smem[tid] += smem[tid + shift]; } } // broadcast sum to all threads __syncthreads(); Real inv_sum = Real(1) / smem[0]; // normalize the row for (int j = tid; j < d.cols; j += CU1DBLOCK) { y[y_start + j] = exp(x[x_start + j] - max) * inv_sum; } } // The output y_i = scale * x_i, // and we want to RMS value of the y_i to equal target_rms, // so y^t y = D * target_rms^2 (if y is one row of the input). // we need to have scale = 1.0 / sqrt(x^t x / (D * target_rms^2)). // there is also flooring involved, to avoid division-by-zero // problems. It's important for the backprop, that the floor's // square root is exactly representable as float. // If add_log_stddev is true, log(max(epsi, sqrt(x^t x / D))) // is an extra dimension of the output. // // 1D grid is used. Each 256-thread block works on 1 row of the data matrix. // The block is also of 1D. Strided memory access is used if the length of the // row is longer than 256. template<typename Real> __global__ static void _normalize_per_row(Real *y, int y_stride, const Real *x, MatrixDim x_d, Real target_rms, bool add_log_stddev) { const int i = blockIdx.x; const int tid = threadIdx.x; const Real* x_row = x + i * x_d.stride; __shared__ Real ssum[CU1DBLOCK]; // Reduce x_j^2 to CU1DBLOCK elements per row Real tsum = Real(0); for (int j = tid; j < x_d.cols; j += CU1DBLOCK) { tsum += x_row[j] * x_row[j]; } ssum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) ssum[tid] += ssum[tid + shift]; __syncthreads(); } // Reduce last warp to 1 element per row. // Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66 if (tid == 0) { ssum[0] = sqrt( fmax(ssum[0] / (target_rms * target_rms * x_d.cols), kSquaredNormFloor)); } // Broadcast floored stddev to all threads. __syncthreads(); const Real stddev_div_target_rms = ssum[0]; const Real scale = Real(1) / stddev_div_target_rms; // Store normalized input to output Real* y_row = y + i * y_stride; for (int j = tid; j < x_d.cols; j += CU1DBLOCK) { y_row[j] = x_row[j] * scale; } if (tid == 0 && add_log_stddev) { y_row[x_d.cols] = log(stddev_div_target_rms * target_rms); } } template<typename Real> __global__ static void _diff_normalize_per_row(Real *id, int id_stride, const Real *iv, MatrixDim iv_dim, const Real* od, int od_stride, Real target_rms, bool add_log_stddev) { const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66 const Real kInvNormFloor = 8589934592.0; const int tid = threadIdx.x; const int i = blockIdx.x; const Real* iv_row = iv + i * iv_dim.stride; const Real* od_row = od + i * od_stride; // reduce to CU1DBLOCK elements per row Real dot_products = Real(0); Real in_norm = Real(0); for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) { const Real iv_ij = iv_row[j]; dot_products += iv_ij * od_row[j]; in_norm += iv_ij * iv_ij; } __shared__ Real sprod[CU1DBLOCK]; __shared__ Real snorm[CU1DBLOCK]; sprod[tid] = dot_products; snorm[tid] = in_norm; __syncthreads(); // reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { sprod[tid] += sprod[tid + shift]; snorm[tid] += snorm[tid + shift]; } __syncthreads(); } // reduce to 1 element per row if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { sprod[tid] += sprod[tid + shift]; snorm[tid] += snorm[tid + shift]; } } // broadcast the sum results __syncthreads(); dot_products = sprod[0]; in_norm = snorm[0]; Real log_stddev_deriv; if (add_log_stddev) { log_stddev_deriv = Real(1) / max(in_norm, iv_dim.cols * kSquaredNormFloor) * od_row[iv_dim.cols]; } const Real inv_d_scaled = Real(1) / (iv_dim.cols * target_rms * target_rms); in_norm = Real(1) / sqrt(max(in_norm * inv_d_scaled, kSquaredNormFloor)); const Real f = in_norm == kInvNormFloor ? Real(0) : in_norm; dot_products *= f * f * f * inv_d_scaled; for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) { const Real iv_ij = iv_row[j]; Real id_ij = id[i * id_stride + j]; if (add_log_stddev) { id_ij += log_stddev_deriv * iv_ij; } if (id != od) { id_ij += in_norm * od_row[j]; } else { id_ij *= in_norm; } id_ij -= dot_products * iv_ij; id[i * id_stride + j] = id_ij; } } // Per-row log-softmax operation on 'x', with writing to 'y'. // note, x and y may point to the same memory. This is equivalent to setting // matrix y to matrix x and then, for each row of y, subtracting the offset that // will make exp(y.row[j]) sum to 1 for each row j. // // It expects to be called with CU1DBLOCK threads. // The number of blocks [i.e. the gridDim] equals to y_dim.rows, // so one block of threads processes each row. x and y are // expected to have the same dimension, but possibly different row strides. template<typename Real> __global__ static void _log_softmax_reduce(Real* y, const Real* x, MatrixDim y_dim, int x_stride) { __shared__ Real smem[CU1DBLOCK]; const int i = blockIdx.x; const int x_start = i * x_stride; const int y_start = i * y_dim.stride; const int tid = threadIdx.x; // find max element of the row // reduce to CU1DBLOCK elements per row. Real tmax = -1e20; for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) { tmax = fmax(tmax, x[x_start + j]); } smem[tid] = tmax; __syncthreads(); // reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { smem[tid] = fmax(smem[tid], smem[tid + shift]); } __syncthreads(); } // reduce to 1 element per row if (tid < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) { smem[tid] = fmax(smem[tid], smem[tid + shift]); } } // broadcast max to all threads __syncthreads(); Real max = smem[0]; // sum_j(exp(x(i,j)-max)) // reduce to CU1DBLOCK elements per row. Real tsum = Real(0); for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) { tsum += exp(x[x_start + j] - max); } smem[tid] = tsum; __syncthreads(); // reduce to 2x warpSize elements per row # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { smem[tid] += smem[tid + shift]; } __syncthreads(); } // reduce to 1 element per row if (tid < warpSize) { for (int shift = warpSize; shift > 0; shift >>= 1) { smem[tid] += smem[tid + shift]; } } // broadcast sum to all threads __syncthreads(); Real log_sum = log(smem[0]); // normalize the row for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) { y[y_start + j] = x[x_start + j] - max - log_sum; } } template<typename Real> __global__ static void _splice(Real* y, const Real* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d_out.stride; if (i < d_out.cols && j < d_out.rows) { int32_cuda src_col = i % d_in.cols; int32_cuda src_row = j + off[i / d_in.cols]; if (src_row < 0) src_row = 0; if (src_row >= d_in.rows) src_row = d_in.rows - 1; y[index] = x[src_col + src_row * d_in.stride]; } } template<typename Real> __global__ static void _take_mean(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index1 = i + j * d_in.stride; int32_cuda index2 = j + i * d_in.stride; if (i <= j && j < d_in.rows) { int32_cuda index_sp = (j * (j + 1) / 2) + i; y[index_sp] = 0.5 * (x[index1] + x[index2]); } } template<typename Real> __global__ static void _take_lower(const Real* x, Real* y, MatrixDim d_in) { int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j > i || i >= d_in.rows) return; int index = i * d_in.stride + j; Real val = x[index]; int index_sp = (i * (i + 1) / 2) + j; y[index_sp] = val; } template<typename Real> __global__ static void _take_upper(const Real* x, Real* y, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index if (j < i || j >= d_in.rows) return; int32_cuda index = i * d_in.stride + j; int32_cuda index_sp = (j * (j + 1) / 2) + i; y[index_sp] = x[index]; } template<typename Real> __global__ static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda index = ((i + 1) * (i + 2) / 2) - 1; if (i < dim) { y[i] = x[index]; } } template<typename Real> __global__ static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) { int i = blockIdx.x * blockDim.x + threadIdx.x; // column index int j = blockIdx.y * blockDim.y + threadIdx.y; // if (i < dim.cols && j < dim.rows) { int dst_index = i + j * dim.stride, src_index; if (j <= i) { // no transpose src_index = (i * (i + 1) / 2) + j; } else { // transpose. src_index = (j * (j + 1) / 2) + i; } y[dst_index] = x[src_index]; } } template<typename Real> __global__ static void _copy(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d_out.stride; if (i < d_out.cols && j < d_out.rows) { int32_cuda src_col = copy_from[i]; if (src_col >= 0 && src_col < d_in.cols) { y[index] = x[src_col + j * d_in.stride]; } else { y[index] = 1.0 / 0.0; } } } template<typename Real> __global__ static void _one(Real* x, int dim) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; if (i < dim) { x[i] = 1.0; } } template<typename Real> __global__ static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d_out.stride; if (i < d_out.cols && j < d_out.rows) { int32_cuda src_row = copy_from[j]; y[index] = x[i + src_row * d_in.stride]; } } template<typename Real> __global__ static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d, int stride_grad) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; int32_cuda index = i + j * d.stride, grad_index = i + j * stride_grad; if (i < d.cols && j < d.rows) { if (wei[index] == 0.0) return; //skip L1 if zero weight! Real l1_signed = l1; if (wei[index] < 0.0) //flip sign l1_signed = -l1; Real before = wei[index]; //simulate update Real after = wei[index] - lr * grad[grad_index] - l1_signed; if ((after > 0.0) ^ (before > 0.0)) { //sign changed? wei[index] = 0.0; grad[grad_index] = 0.0; } else { wei[index] -= l1_signed; } } } template<typename Real> __global__ static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id, MatrixDim d) { const int32_cuda i = blockIdx.x; const int32_cuda base = i * d.stride; const int32_cuda tid = threadIdx.x; __shared__ Real smax[CU1DBLOCK]; __shared__ int32_cuda sidx[CU1DBLOCK]; Real tmax = -1e20; int32_cuda tidx = -1; // Loop over blocks for coalesced memory access. for (int32_cuda j = tid; j < d.cols; j += CU1DBLOCK) { const Real val = mat[base + j]; if (val > tmax) { tmax = val; tidx = j; } } smax[tid] = tmax; sidx[tid] = tidx; // Parallel reduce #pragma unroll for (int32_cuda num_working_threads = CU1DBLOCK / 2; num_working_threads >= warpSize; num_working_threads >>= 1) { __syncthreads(); if (tid < num_working_threads) { if (smax[tid + num_working_threads] > smax[tid]) { smax[tid] = smax[tid + num_working_threads]; sidx[tid] = sidx[tid + num_working_threads]; } } } // Warp reduce without __syncthreads() // (note.: synchronizes implicitly within a warp at the multiprocessor) if (tid < warpSize / 2) { #pragma unroll for (int32_cuda num_working_threads = warpSize / 2; num_working_threads > 0; num_working_threads >>= 1) { if (smax[tid + num_working_threads] > smax[tid]) { smax[tid] = smax[tid + num_working_threads]; sidx[tid] = sidx[tid + num_working_threads]; } } } if (tid == 0) { if (vec_val) { vec_val[i] = smax[0]; } vec_id[i] = sidx[0]; } } template<typename Real> __global__ static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out, Real* vec_log_post, MatrixDim d) { int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; if (i > 0) return; if (j < d.rows) { int32_cuda index = vec_tgt[j] + j * d.stride; Real value = mat_net_out[index]; if (value < 1e-20) value = 1e-20; vec_log_post[j] = log(value); mat_net_out[index] -= 1.0; } } template<typename Real> __global__ static void _diff_softmax(Real* x, const MatrixDim dim, const Real* value, const int value_stride, const Real* diff, const int diff_stride) { __shared__ Real ssum[CU1DBLOCK]; const int tid = threadIdx.x; const int i = blockIdx.x; const int value_start = i * value_stride; const int diff_start = i * diff_stride; const int x_start = i * dim.stride; // Loop along the matrix row. Reduce to CU1DBLOCK elements per row. Real tsum = Real(0); for (int j = tid; j < dim.cols; j += CU1DBLOCK) { tsum += value[value_start + j] * diff[diff_start + j]; } ssum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize elements. # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { ssum[tid] += ssum[tid + shift]; } __syncthreads(); } // Warp reduce to 1 element. Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // Broadcast result to all threads __syncthreads(); const Real pe = ssum[0]; // Apply element-wise x = value * (diff - pe) for (int j = tid; j < dim.cols; j += CU1DBLOCK) { x[x_start + j] = value[value_start + j] * (diff[diff_start + j] - pe); } } // Differentiate backward through the log softmax function. // "out_value" is the log softmax output. Does, for each row i, // in_deriv(i) = out_deriv(i) - sum(out_deriv(i)) .* exp(out_value(i)) // ???(i) is row-vector. // CUDA thread layout: 1 thread block (CU1DBLOCK == 256 threads) per matrix-row. template<typename Real> __global__ static void _diff_log_softmax(const MatrixDim in_deriv_dim, const Real* out_value, const int out_value_stride, const Real* out_deriv, const int out_deriv_stride, Real* in_deriv) { __shared__ Real ssum[CU1DBLOCK]; const int tid = threadIdx.x; const int i = blockIdx.x; const int out_value_start = i * out_value_stride; const int out_deriv_start = i * out_deriv_stride; const int in_deriv_start = i * in_deriv_dim.stride; // Loop along the matrix row. Reduce to CU1DBLOCK elements per row. Real tsum = Real(0); for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) { tsum += out_deriv[out_deriv_start + j]; } ssum[tid] = tsum; __syncthreads(); // Tree reduce to 2x warpSize elements. # pragma unroll for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) { if (tid < shift) { ssum[tid] += ssum[tid + shift]; } __syncthreads(); } // Warp reduce to 1 element. Threads implicitly synchronized within a warp. if (tid < warpSize) { # pragma unroll for (int shift = warpSize; shift > 0; shift >>= 1) { ssum[tid] += ssum[tid + shift]; } } // Broadcast result to all threads __syncthreads(); const Real sum_e = ssum[0]; // Apply element-wise x = out_deriv - exp(value) * sum_e for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) { in_deriv[in_deriv_start + j] = out_deriv[out_deriv_start + j] - exp(out_value[out_value_start + j]) * sum_e; } } /** this function computes the core part of the LSTM nonlinearity. @param [in] in A matrix, of dimension num_rows by 5*cell_dim (i.e. its num-cols must be a multiple of 5). The column-space is interpreted as 5 consecutive blocks, each of dimension cell_dim, which we name: (i_part, f_part, c_part, o_part, c_{t-1}). If 'have_dropout_mask' is nonzero, each row of 'in' will have 3 extra elements, interpreted as dropout masks/scales for i_t, f_t and o_t. @param [in] params A matrix, of dimension 3 by cell_dim, with rows containing the 3 diagonal parameter matrices used in LSTMs, namely w_{ic}, w_{fc} and w_{oc}. @param [out] out A matrix, of dimension num_rows by 2*cell_dim. The quantities c_t and m_t respectively are put there (in two blocks of column-dimension cell_dim), according to the following equations: i_t = Sigmoid(i_part + w_{ic}*c_{t-1}) f_t = Sigmoid(f_part + w_{fc}*c_{t-1}) c_t = f_t*c_{t-1} + i_t * Tanh(c_part) o_t = Sigmoid(o_part + w_{oc}*c_t) m_t = o_t * Tanh(c_t) We use 1D thread block with CU1DBLOCK threads. It works best when cell_dim is a multiple of CU1DBLOCK. We use 1d Grid. Each block is working on one row of the in and out matrices. */ template<typename Real> __global__ static void _lstm_nonlinearity(const Real* in, const int in_stride, const Real* params, const int params_stride, const int out_stride, const int cell_dim, const int have_dropout_mask, const int num_rows, Real* out) { const int tid = threadIdx.x; const int i = blockIdx.x; const Real* i_part = in + i * in_stride; const Real* f_part = in + i * in_stride + cell_dim; const Real* c_part = in + i * in_stride + cell_dim * 2; const Real* o_part = in + i * in_stride + cell_dim * 3; const Real* c_tm1 = in + i * in_stride + cell_dim * 4; const Real* w_ic = params; const Real* w_fc = params + params_stride; const Real* w_oc = params + params_stride * 2; Real* c_t = out + i * out_stride; Real* m_t = out + i * out_stride + cell_dim; Real i_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5] : 1), f_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 1] : 1), o_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 2] : 1); for (int j = tid; j < cell_dim; j += CU1DBLOCK) { Real c_tm1_j = c_tm1[j]; Real i_t_j = Real(1) / (Real(1) + exp(-i_part[j] - w_ic[j] * c_tm1_j)); Real f_t_j = Real(1) / (Real(1) + exp(-f_part[j] - w_fc[j] * c_tm1_j)); Real c_t_j = f_t_j * f_scale * c_tm1_j + i_t_j * i_scale * tanh(c_part[j]); Real o_t_j = Real(1) / (Real(1) + exp(-o_part[j] - w_oc[j] * c_t_j)); c_t[j] = c_t_j; m_t[j] = o_t_j * o_scale * tanh(c_t_j); } } /** This function does the 'backward' pass corresponding to the function ComputeLstmNonlinearity. It's a little more complicated than you might expect because of the 'self-repair' mechanism that we use to prevent the sigmoid and tanh nonlinearities oversaturating, and because of the average-activation and average-derivative stats that we store for these nonlinearites (these stats are used both to control the self-repair mechanism, and for diagnostic purposes). Because the forward pass computes various intermediate values that are not output, this function actually has to do the same computations as the forward pass before it actually does the backprop. In the following description, `C` is for `cell_dim`, `N` is for `num_rows`. @param [in] input The same as in ComputeLstmNonlinearity(). A matrix, of dimension N by 5C (i.e. its num-cols must be a multiple of 5). The column-space is interpreted as 5 consecutive blocks, each of dimension C, which we name: (i_part, f_part, c_part, o_part, c_{t-1}). If 'have_dropout_mask' is nonzero, each row of 'in' will have 3 extra elements, interpreted as dropout masks/scales for i_t, f_t and o_t. @param [in] params The same as in ComputeLstmNonlinearity(). A matrix, of dimension 3 by C, with rows containing the three diagonal parameter matrices used in LSTMs, namely w_{ic}, w_{fc} and w_{oc}. @param [in] output_deriv A matrix, of dimension N by 2C, containing the derivative of the objective function we're backpropagating, w.r.t. the quantities c_t and m_t (in two blocks of column-dimension C). @param [in] deriv_sum_in This is used in the self-repair code to identify oversaturated nonlinearities. It is a matrix, of dimension 5 by C, corresponding to the totals of the derivatives of the 5 sigmoid and tanh nonlinearities, in they order they appear in the equations in the documentation of ComputeLstmNonlinearity() respectively, they appear in the equations for (i_t, f_t, c_t, o_t, m_t). This will be divided by 'count_in' to get the average derivative value so far, for each of the nonlinearities. @param [in] self_repair_config A vector of dimension 10, containing the configuration of the self-repair to be used for the 5 nonlinearities. The first 5 elements are the self_repair_lower_threshold values (typically 0.05 for sigmoid and 0.2 for tanh), and the next 5 elements are the corresponding self-repair-scales (typically 10^-5). @param [in] count_in The data-count that corresponds to the stats in 'deriv_sum_in' at entry to the function. This function should tolerate the count being zero (in that case, it is free to do the self-repair or not, as this should only happen on the 1st minibatch of each training job). @param [out] input_deriv May be NULL; if not, this function writes, to this location, the backpropagated derivative of the objective function w.r.t. the 'input' matrix. This matrix should have the same dimension as 'input' i.e. N by 5C. In addition to the regular backpropagated derivative, the output will include small values relating to 'self-repair'. @param [out] params_deriv May be NULL; if not, this is where this function *writes* [not adds] the backpropagated derivative of the objective function w.r.t. 'params'; it should have the same dimension as 'params' (3 by C). (This matrix will then be processed by the natural gradient code and added to the appropriate copy of the parameter matrix, outside this function). @param [out] value_sum_out Must be NULL if params_deriv is NULL; if not, a matrix of dimension 5 by C. This function *adds* to this location the total value of each of the sigmoid/tanh nonlinearities that it computes (this is for diagnostic purposes). @param [out] deriv_sum_out Must be NULL if params_deriv is NULL; if not, a matrix of dimension 5 by C; this function *adds* to this location the total of the derivative of each of the sigmoid/tanh nonlinearities that it computes (this is for diagnostic purposes and to control the self-repair). This function should tolerate the case when 'deriv_sum_out' points to the same data as 'deriv_sum_in'. @param [out] self_repair_sum_out Must be NULL if params_deriv is NULL; if not, a matrix of dimension 5 by C; this function *writes* to this location the sum of the number of times the self-repair code was activated (integer values 0 <= k <= N). This will be processed outside this function into self-repair stats for diagnostics. // Use 2D block (8x32 threads) as we need to compute column sum. // Use 1D grid to cover the data matrix `cell_dim`. */ template<typename Real> __global__ static void _diff_lstm_nonlinearity(const int cell_dim, const int have_dropout_mask, const int num_rows, const Real* input, const int input_stride, const Real* params, const int params_stride, const Real* output_deriv, const int output_deriv_stride, const double* deriv_sum_in, const int deriv_sum_in_stride, const Real* self_repair_config, double count, Real* input_deriv, const int input_deriv_stride, Real* params_deriv, const int params_deriv_stride, double* value_sum_out, const int value_sum_out_stride, double* deriv_sum_out, const int deriv_sum_out_stride, Real* self_repair_sum_out, const int self_repair_sum_out_stride) { __shared__ Real smem[CU1DBLOCK]; const int j = blockIdx.x * blockDim.x + threadIdx.x; const int tid = threadIdx.y * blockDim.x + threadIdx.x; const int grid_stride = gridDim.y * blockDim.y; const int i0 = blockIdx.y * blockDim.y + threadIdx.y; Real w_ic_deriv_sum = 0; Real w_fc_deriv_sum = 0; Real w_oc_deriv_sum = 0; Real i_t_value_sum = 0, i_t_deriv_sum = 0; Real f_t_value_sum = 0, f_t_deriv_sum = 0; Real c_part_value_sum = 0, c_part_deriv_sum = 0; Real o_t_value_sum = 0, o_t_deriv_sum = 0; Real c_t_value_sum = 0, c_t_deriv_sum = 0; bool update_sr[5]; if (j < cell_dim) { const Real w_ic = params[j]; const Real w_fc = params[params_stride + j]; const Real w_oc = params[2 * params_stride + j]; const Real* sr_config = self_repair_config; # pragma unroll for (int i = 0; i < 5; i++) { update_sr[i] = deriv_sum_in[i * deriv_sum_in_stride + j] < sr_config[i] * count; } const Real i_t_self_repair = (update_sr[0] ? sr_config[5] : 0); const Real f_t_self_repair = (update_sr[1] ? sr_config[6] : 0); const Real c_part_self_repair = (update_sr[2] ? sr_config[7] : 0); const Real o_t_self_repair = (update_sr[3] ? sr_config[8] : 0); const Real c_t_self_repair = (update_sr[4] ? sr_config[9] : 0); for (int i = i0; i < num_rows; i += grid_stride) { const Real i_part = input[i * input_stride + j]; const Real f_part = input[i * input_stride + j + cell_dim]; const Real c_part = input[i * input_stride + j + 2 * cell_dim]; const Real o_part = input[i * input_stride + j + 3 * cell_dim]; const Real c_prev = input[i * input_stride + j + 4 * cell_dim]; const Real i_scale = (have_dropout_mask ? input[i * input_stride + cell_dim * 5] : 1), f_scale = (have_dropout_mask ? input[i * input_stride + cell_dim * 5 + 1] :1), o_scale = (have_dropout_mask ? input[i * input_stride + cell_dim * 5 + 2] :1); const Real i_t = Real(1) / (1 + exp(-i_part - w_ic * c_prev)); const Real f_t = Real(1) / (1 + exp(-f_part - w_fc * c_prev)); const Real tanh_c_part = tanh(c_part); const Real c_t = f_t * f_scale * c_prev + i_t * i_scale * tanh_c_part; const Real o_t = 1 / (1 + exp(-o_part - w_oc * c_t)); const Real tanh_c_t = tanh(c_t); const Real i_t_deriv = i_t * (1 - i_t); const Real f_t_deriv = f_t * (1 - f_t); const Real c_part_deriv = 1 - tanh_c_part * tanh_c_part; const Real o_t_deriv = o_t * (1 - o_t); const Real c_t_deriv = 1 - tanh_c_t * tanh_c_t; if (params_deriv) { i_t_value_sum += i_t; f_t_value_sum += f_t; c_part_value_sum += tanh_c_part; o_t_value_sum += o_t; c_t_value_sum += tanh_c_t; i_t_deriv_sum += i_t_deriv; f_t_deriv_sum += f_t_deriv; c_part_deriv_sum += c_part_deriv; o_t_deriv_sum += o_t_deriv; c_t_deriv_sum += c_t_deriv; } const Real dc_t_out = output_deriv[i * output_deriv_stride + j]; const Real dm_t = output_deriv[i * output_deriv_stride + j + cell_dim]; const Real dtanh_c_t = o_t * o_scale * dm_t; const Real do_t = o_scale * tanh_c_t * dm_t; const Real do_t_input = (o_t_deriv * do_t - (2 * o_t - 1) * o_t_self_repair); const Real dc_t = (c_t_deriv * dtanh_c_t + dc_t_out + do_t_input * w_oc) - tanh_c_t * c_t_self_repair; const Real dtanh_c_part = i_t * i_scale * dc_t; const Real df_t = dc_t * f_scale * c_prev; const Real df_t_input = (df_t * f_t_deriv - (2 * f_t - 1) * f_t_self_repair); const Real di_t = dc_t * i_scale * tanh_c_part; const Real di_t_input = (di_t * i_t_deriv - (2 * i_t - 1) * i_t_self_repair); if (params_deriv) { w_ic_deriv_sum += c_prev * di_t_input; w_fc_deriv_sum += c_prev * df_t_input; w_oc_deriv_sum += c_t * do_t_input; } const Real dc_prev = w_ic * di_t_input + w_fc * df_t_input + f_t * f_scale * dc_t; const Real do_part = do_t_input; const Real dc_part = (c_part_deriv * dtanh_c_part - tanh_c_part * c_part_self_repair); const Real df_part = df_t_input; const Real di_part = di_t_input; if (input_deriv) { input_deriv[i * input_deriv_stride + j] = di_part; input_deriv[i * input_deriv_stride + j + cell_dim] = df_part; input_deriv[i * input_deriv_stride + j + cell_dim * 2] = dc_part; input_deriv[i * input_deriv_stride + j + cell_dim * 3] = do_part; input_deriv[i * input_deriv_stride + j + cell_dim * 4] = dc_prev; } } } if (params_deriv) { // compute params_deriv smem[tid] = w_ic_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { params_deriv[j] = smem[tid]; } __syncthreads(); smem[tid] = w_fc_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { params_deriv[params_deriv_stride + j] = smem[tid]; } __syncthreads(); smem[tid] = w_oc_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { params_deriv[2 * params_deriv_stride + j] = smem[tid]; } // compute value_sum_out __syncthreads(); smem[tid] = i_t_value_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { value_sum_out[j] += smem[tid]; } __syncthreads(); smem[tid] = f_t_value_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { value_sum_out[value_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = c_part_value_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { value_sum_out[2 * value_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = o_t_value_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { value_sum_out[3 * value_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = c_t_value_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { value_sum_out[4 * value_sum_out_stride + j] += smem[tid]; } // need to update self_repair_sum_out before deriv_sum_out, because // deriv_sum_out and deriv_sum_in might point to the same memory. if (i0 < 5 && j < cell_dim) { self_repair_sum_out[i0 * self_repair_sum_out_stride + j] = update_sr[i0] ? num_rows : 0; } // compute derive_sum_out __syncthreads(); smem[tid] = i_t_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { deriv_sum_out[j] += smem[tid]; } __syncthreads(); smem[tid] = f_t_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { deriv_sum_out[deriv_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = c_part_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { deriv_sum_out[2 * deriv_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = o_t_deriv_sum; # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { deriv_sum_out[3 * deriv_sum_out_stride + j] += smem[tid]; } __syncthreads(); smem[tid] = c_t_deriv_sum; __syncthreads(); # pragma unroll for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) { __syncthreads(); if (tid < shift) { smem[tid] += smem[tid + shift]; } } if (tid < warpSize && j < cell_dim) { deriv_sum_out[4 * deriv_sum_out_stride + j] += smem[tid]; } } } __global__ static void _cuda_compress_uint8_sign(const BaseFloat *src, MatrixDim dim, unsigned char *dest, int dest_stride) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dest_index = i + j * dest_stride, src_index = i + j * dim.stride; if (i < dim.cols && j < dim.rows) { BaseFloat f = src[src_index]; dest[dest_index] = (f > 0.0 ? (unsigned char)1 : (unsigned char)0); } } // The following inline templated functions are a workaround for the // fact that (I believe) std::numeric_limits is not available in CUDA; // they allow us to access the minimum and maximum elements of certain // types from templated code. template <typename I> __device__ static inline int minimum_integer_value(); template <typename I> __device__ static inline int maximum_integer_value(); template<> __device__ int maximum_integer_value<int8_t>() { return 127; } template<> __device__ int minimum_integer_value<int8_t>() { return -128; } template<> __device__ int maximum_integer_value<uint8_t>() { return 255; } template<> __device__ int minimum_integer_value<uint8_t>() { return 0; } template<> __device__ int maximum_integer_value<int16_t>() { return 32767; } template<> __device__ int minimum_integer_value<int16_t>() { return -32768; } template<> __device__ int maximum_integer_value<uint16_t>() { return 65535; } template<> __device__ int minimum_integer_value<uint16_t>() { return 0; } template <typename I> __global__ static void _cuda_compress_bounds_check(const BaseFloat *src, MatrixDim dim, I *dest, int dest_stride, float inv_scale) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dest_index = i + j * dest_stride, src_index = i + j * dim.stride; const int min_value = minimum_integer_value<I>(), max_value = maximum_integer_value<I>(); int compressed_value; int ok = (i < dim.cols && j < dim.rows); if (ok) { float f = src[src_index]; // note: I'm not sure what __float2int_rn does if input is outside of // integer range, but it doesn't matter much as in the situations where this // type of compression would make sense, the input should be well inside the // range of 'int', and if it fails, we've probably already catastrophically // diverged. int i = __float2int_rn(f * inv_scale); if (i < min_value) compressed_value = min_value; else if (i > max_value) compressed_value = max_value; else compressed_value = i; } __syncthreads(); if (ok) { dest[dest_index] = compressed_value; } } template <typename I> __global__ static void _cuda_compress_no_bounds_check(const BaseFloat *src, MatrixDim dim, I *dest, int dest_stride, float inv_scale) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int dest_index = i + j * dest_stride, src_index = i + j * dim.stride; if (i < dim.cols && j < dim.rows) { float f = src[src_index]; int i = __float2int_rn(f * inv_scale); I s = i; dest[dest_index] = s; } } template <typename I> __global__ static void _cuda_uncompress(BaseFloat *dest, MatrixDim dim, const I *src, int src_stride, float scale) { int i = blockIdx.x * blockDim.x + threadIdx.x; int j = blockIdx.y * blockDim.y + threadIdx.y; int src_index = i + j * src_stride, dest_index = i + j * dim.stride; if (i < dim.cols && j < dim.rows) { I s = src[src_index]; dest[dest_index] = float(s * scale); } } /*********************************************************************** * ANSI-C wrappers of CUDA kernels */ /* * "int32" */ void cuda_int32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cuda_int32_add(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cuda_int32_sequence(dim3 Gr, dim3 Bl, int32_cuda* data, int length, int32_cuda base) { _sequence<<<Gr, Bl>>>(data, length, base); } /* * "float" */ /* * CuMatrix */ void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA);} void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA);} void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *vec, const float *mat2, int mat2_row_stride, int mat2_col_stride, float beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst, const float* const * src, MatrixDim dst_dim) { _copy_rows<<<Gr,Bl>>>(dst, src, dst_dim); } void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const * dst, const float* src, MatrixDim src_dim) { _copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim); } void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride); } void cudaF_mul_rows(dim3 Gr, dim3 Bl, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _mul_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* const * src, MatrixDim dst_dim) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim); } void cudaF_add_to_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src, const MatrixIndexT_cuda* reorder, MatrixDim src_dim, int dst_stride) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, src_dim, dst_stride); } void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const * dst, const float* src, MatrixDim src_dim) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim); } void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaF_min(dim3 Gr, dim3 Bl, float* mat, const float* other, MatrixDim mat_d, int other_stride) { _min<<<Gr,Bl>>>(mat,other,mat_d,other_stride); } void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaF_diff_group_pnorm(dim3 Gr, dim3 Bl, float *id, const float *iv, const float *ov, const float* od, MatrixDim id_dim, int iv_stride, int ov_stride, int od_stride, int group_size, float power) { _diff_group_pnorm<<<Gr, Bl>>>(id, iv, ov, od, id_dim, iv_stride, ov_stride, od_stride, group_size, power); } void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1, const float *x2, MatrixDim y_dim, int x1_stride, int x2_stride, int group_size) { _calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, y_dim, x1_stride, x2_stride, group_size); } void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, float* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { _add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaF_add_mat_repeated(dim3 Gr, dim3 Bl, float alpha, const float* src, MatrixDim src_dim, float *dst, MatrixDim dst_dim) { _add_mat_repeated<<<Gr,Bl>>>(alpha, src, src_dim, dst, dst_dim); } void cudaF_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B, const float *C, float *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _set_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d, stride_a, stride_b, stride_c); } void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T, MatrixDim tdim, float *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col, float beta, float* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row, float beta, float* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat, MatrixDim mat_dim, const float *mat2, int mat2_row_stride, int mat2_col_stride, const float *vec, float beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data, const float *srcA_data, const float *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, float alpha, float beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaF_max_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { _transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d, TransReduceOp<MAX,float>()); } void cudaF_min_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { _transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d, TransReduceOp<MIN,float>()); } void cudaF_sum_mat_cols(int Gr, int Bl, float* result, const float* mat, const MatrixDim d) { _transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d, TransReduceOp<SUM,float>()); } void cudaF_add_col_sum_mat(int Gr, int Bl, float* result, const float* mat, const MatrixDim d, const float alpha, const float beta) { _transform_reduce_mat_cols<<<Gr, Bl>>>(result, mat, d, TransReduceOp<SUMAB, float>(alpha, beta)); } void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig, float changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a, float param_1, float param_2, float param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cublas_copy_kaldi_fd(int Gr, int Bl, int n, const float* x, int incx, double* y, int incy) { _cublas_copy_kaldi<<<Gr,Bl>>>(n, x, incx, y, incy); } void cublas_copy_kaldi_df(int Gr, int Bl, int n, const double* x, int incx, float* y, int incy) { _cublas_copy_kaldi<<<Gr,Bl>>>(n, x, incx, y, incy); } void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaF_vec_min(int Gr, int Bl, const float* v, float* value, int dim, int inc) { _vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc, TransReduceOp<MIN, float>()); } void cudaF_vec_max(int Gr, int Bl, const float* v, float* value, int dim, int inc) { _vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc, TransReduceOp<MAX, float>()); } void cudaF_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat_trans<<<Gr,Bl>>>(A,B,dA,B_stride,value); } void cudaF_trace_mat_mat(dim3 Gr, dim3 Bl, const float* A, const float* B, MatrixDim dA, int B_stride, float* value) { _trace_mat_mat<32> <<<Gr,Bl>>>(A,B,dA,B_stride,value); } void cudaF_add_diag_mat_mat_MNT(int Gr, int Bl, const float alpha, const float* M, const MatrixDim dim_M, const float* N, const int stride_N, const float beta, float* v) { _add_diag_mat_mat_MNT<<<Gr,Bl>>>(alpha,M,dim_M,N,stride_N,beta,v); } void cudaF_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const float alpha, const float* M, const int stride_M, const float* N, const MatrixDim dim_N, const float beta, float* v) { if (Bl.x == 16) { _add_diag_mat_mat_MTN<16> <<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v); } else if (Bl.x==32) { _add_diag_mat_mat_MTN<32><<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v); } } void cudaF_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const float alpha, const float* M, const int stride_M, const float* N, const MatrixDim dim_N, const float beta, float* v) { if (Bl.x == 16) { _add_diag_mat_mat_MN<16> <<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v); } else if (Bl.x==32) { _add_diag_mat_mat_MN<32><<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v); } } void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x, const float* y, float beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) { _vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc, TransReduceOp<SUM, float>()); } void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, float alpha, MatrixElement<float>* x, int num_elements) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements); } void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, float alpha, const Int32Pair* indices, const float* x, int s, float* data) { _cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data); } void cudaF_matrix_add_to_elements(dim3 Gr, dim3 Bl, float alpha, float* mat, MatrixDim dim, const MatrixIndexT_cuda* elements) { _cuda_matrix_add_to_elements<<<Gr, Bl>>>(alpha, mat, dim, elements); } void cudaF_vector_copy_elements(dim3 Gr, dim3 Bl, float *data, int dim, const float *src_mat, int mat_stride, bool transpose, const MatrixIndexT_cuda* elements) { _cuda_vector_copy_elements<<<Gr, Bl>>>(data, dim, src_mat, mat_stride, transpose, elements); } void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s, const float* z, MatrixDim d, float* z2, MatrixDim d2, float* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaD_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<double>* x, int s, const double* z, MatrixDim d, double* z2, MatrixDim d2, double* t) { _cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t); } void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst, const float *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val, float *count, int dim) { _vec_apply_ceiling<<<Gr,Bl>>>(v, ceiling_val,count,dim); } void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d, const float *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, float alpha, float beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const float *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const float *D_data, int D_row_stride, int D_col_stride, float alpha, float beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaF_soft_hinge(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size, float power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaF_group_spec_pnorm(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride, int group_size, float power) { if (power == float(0)) { _group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<L0NORM, float>()); } else if (power == float(1)) { _group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<L1NORM, float>()); } else if (power == float(2)) { _group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<L2NORM, float>()); } else if (power == std::numeric_limits<float>::infinity()) { _group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<LINFNORM, float>()); } else { _group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<LPNORM, float>(power)); } } void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d, int src_stride, int group_size) { _group_transform_reduce<<<Gr,Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<MAX, float>()); } void cudaF_sigmoid(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_sigmoid(dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_tanh(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_diff_tanh(dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaF_ensure_nonzero(dim3 Gr, dim3 Bl, const float *x, MatrixDim d, float epsilon, int y_stride, float *y) { _ensure_nonzero<<<Gr,Bl>>>(x, d, epsilon, y_stride, y); } void cudaF_parametric_relu(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride, const float* a, const float* b) { _parametric_relu<<<Gr,Bl>>>(y, x, d, src_stride, a, b); } void cudaF_diff_parametric_relu(dim3 Gr, dim3 Bl, float* eout, const float* e, const float* y, MatrixDim d, int e_stride, int y_stride, const float* a, const float* b) { _diff_parametric_relu<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride, a, b); } void cudaF_heaviside(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d, int src_stride) { _heaviside<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaF_log_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x, MatrixDim y_dim, int x_stride) { _log_softmax_reduce<<<Gr,Bl>>>(y, x, y_dim, x_stride); } void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaF_normalize_per_row(size_t Gr, size_t Bl, float *y, int y_stride, const float *x, MatrixDim x_d, float target_rms, bool add_log_stddev) { _normalize_per_row<<<Gr, Bl>>>(y, y_stride, x, x_d, target_rms, add_log_stddev); } void cudaF_one(int Gr, int Bl, float* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y, MatrixDim dim) { _copy_from_sp<<<Gr,Bl>>>(x, y, dim); } void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1, float lr, MatrixDim d, int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val, int32_cuda* vec_id, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, d); } void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, float* mat_net_out, float* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaF_diff_softmax(dim3 Gr, dim3 Bl, float* x, const MatrixDim dim, const float* value, const int value_stride, const float* diff, const int diff_stride) { _diff_softmax<<<Gr, Bl>>>(x, dim, value, value_stride, diff, diff_stride); } void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaF_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim, const float* out_value, const int out_value_stride, const float* out_deriv, const int out_deriv_stride, float* in_deriv) { _diff_log_softmax<<<Gr, Bl>>>(in_deriv_dim, out_value, out_value_stride, out_deriv, out_deriv_stride, in_deriv); } void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const float* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim, const float *src_data, MatrixDim src_dim, const Int32Pair *indexes) { _add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes); } void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim, const Int32Pair *indices, int indices_size, float *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1, const float *mat2, float *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } /* * "double" */ /* * CuMatrix */ void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_upp_low<<<Gr,Bl>>>(A,dimA);} void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) { _copy_low_upp<<<Gr,Bl>>>(A,dimA);} void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *vec, const double *mat2, int mat2_row_stride, int mat2_col_stride, double beta) { _add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride, mat2_col_stride, beta); } void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat); } void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B, MatrixDim dmat) { _copy_from_tp<<<Gr,Bl>>>(A,B,dmat); } void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_exp<<<Gr,Bl>>>(mat,d); } void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) { _apply_pow<<<Gr,Bl>>>(mat, power, d); } void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power, bool include_sign, MatrixDim d) { _apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d); } void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_heaviside<<<Gr,Bl>>>(mat, d); } void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst, const double* const * src, MatrixDim dst_dim) { _copy_rows<<<Gr,Bl>>>(dst, src, dst_dim); } void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const * dst, const double* src, MatrixDim src_dim) { _copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim); } void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride); } void cudaD_mul_rows(dim3 Gr, dim3 Bl, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim dst_dim, int src_stride) { _mul_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride); } void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* const * src, MatrixDim dst_dim) { _add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim); } void cudaD_add_to_rows(dim3 Gr, dim3 Bl, double alpha, double* dst, const double* src, const MatrixIndexT_cuda* reorder, MatrixDim src_dim, int dst_stride) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, src_dim, dst_stride); } void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* const * dst, const double* src, MatrixDim src_dim) { _add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim); } void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val, MatrixDim d) { _apply_floor<<<Gr,Bl>>>(mat, floor_val, d); } void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val, MatrixDim d) { _apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d); } void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) { _set_diag<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _set_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _add_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _set_const<<<Gr,Bl>>>(mat,value,d); } void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _set_zero_above_diag<<<Gr,Bl>>>(mat, d); } void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _add<<<Gr,Bl>>>(mat,value,d); } void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value, int dim) { _scale_diag_packed<<<Gr,Bl>>>(mat,value,dim); } void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) { _scale<<<Gr,Bl>>>(mat,value,d); } void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) { _apply_log<<<Gr,Bl>>>(mat,d); } void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d, int src_stride) { _max<<<Gr,Bl>>>(mat,A,dst_d,src_stride); } void cudaD_min(dim3 Gr, dim3 Bl, double* mat, const double* other, MatrixDim mat_d, int other_stride) { _min<<<Gr,Bl>>>(mat,other,mat_d,other_stride); } void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_cols_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale, MatrixDim d) { _mul_rows_vec<<<Gr,Bl>>>(mat,scale,d); } void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { _mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size); } void cudaD_diff_group_pnorm(dim3 Gr, dim3 Bl, double *id, const double *iv, const double *ov, const double* od, MatrixDim id_dim, int iv_stride, int ov_stride, int od_stride, int group_size, double power) { _diff_group_pnorm<<<Gr, Bl>>>(id, iv, ov, od, id_dim, iv_stride, ov_stride, od_stride, group_size, power); } void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1, const double* x2, MatrixDim y_dim, int x1_stride, int x2_stride, int group_size) { _calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, y_dim, x1_stride, x2_stride, group_size); } void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div, MatrixDim d) { _div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d); } void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } else { _add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride); } } void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src, int32_cuda num_row_blocks, int32_cuda num_col_blocks, double* dst, MatrixDim d, int src_stride, int A_trans) { if (A_trans) { _add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } else { _add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst, d, src_stride); } } void cudaD_add_mat_repeated(dim3 Gr, dim3 Bl, double alpha, const double* src, MatrixDim src_dim, double *dst, MatrixDim dst_dim) { _add_mat_repeated<<<Gr,Bl>>>(alpha, src, src_dim, dst, dst_dim); } void cudaD_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A, const double *B, const double *C, double *dst, MatrixDim d, int stride_a, int stride_b, int stride_c) { _set_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d,stride_a,stride_b,stride_c); } void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta, const double* T, MatrixDim tdim, double *S, MatrixDim sdim) { _sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim); } void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col, double beta, double* dst, MatrixDim d) { _add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d); } void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row, double beta, double* dst, MatrixDim d) { _add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d); } void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat, MatrixDim mat_dim, const double *mat2, int mat2_row_stride, int mat2_col_stride, const double *vec, double beta) { _add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride, mat2_col_stride, vec, beta); } void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data, const double *srcA_data, const double *srcB_data, MatrixDim dim, int srcA_stride, int srcB_stride, double alpha, double beta) { _add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim, srcA_stride, srcB_stride, alpha, beta); } // CURRENTLY UNUSED... void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask, MatrixDim dmat, MatrixDim dmask) { _apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask); } /* * CuVector */ void cudaD_max_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { _transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d, TransReduceOp<MAX,double>()); } void cudaD_min_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { _transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d, TransReduceOp<MIN,double>()); } void cudaD_sum_mat_cols(int Gr, int Bl, double* result, const double* mat, const MatrixDim d) { _transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d, TransReduceOp<SUM,double>()); } void cudaD_add_col_sum_mat(int Gr, int Bl, double* result, const double* mat, const MatrixDim d, const double alpha, const double beta) { _transform_reduce_mat_cols<<<Gr, Bl>>>(result, mat, d, TransReduceOp<SUMAB, double>(alpha, beta)); } void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig, double changed) { _replace_value<<<Gr,Bl>>>(v, dim, orig, changed); } void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a, double param_1, double param_2, double param_3, int* flag, int dim) { _set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim); } void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a, int dim) { _vec_mul_elements<<<Gr,Bl>>>(v, a, dim); } void cudaD_vec_min(int Gr, int Bl, const double* v, double* value, int dim, int inc) { _vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc, TransReduceOp<MIN, double>()); } void cudaD_vec_max(int Gr, int Bl, const double* v, double* value, int dim, int inc) { _vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc, TransReduceOp<MAX, double>()); } void cudaD_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat_trans<<<Gr,Bl>>>(A,B,dA,B_stride,value); } void cudaD_trace_mat_mat(dim3 Gr, dim3 Bl, const double* A, const double* B, MatrixDim dA, int B_stride, double* value) { _trace_mat_mat<32> <<<Gr,Bl>>>(A,B,dA,B_stride,value); } void cudaD_add_diag_mat_mat_MNT(int Gr, int Bl, const double alpha, const double* M, const MatrixDim dim_M, const double* N, const int stride_N, const double beta, double* v) { _add_diag_mat_mat_MNT<<<Gr,Bl>>>(alpha,M,dim_M,N,stride_N,beta,v); } void cudaD_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const double alpha, const double* M, const int stride_M, const double* N, const MatrixDim dim_N, const double beta, double* v) { if (Bl.x == 16) { _add_diag_mat_mat_MTN<16> <<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v); } else if (Bl.x==32) { _add_diag_mat_mat_MTN<32><<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v); } } void cudaD_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const double alpha, const double* M, const int stride_M, const double* N, const MatrixDim dim_N, const double beta, double* v) { if (Bl.x == 16) { _add_diag_mat_mat_MN<16> <<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v); } else if (Bl.x==32) { _add_diag_mat_mat_MN<32><<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v); } } void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x, const double* y, double beta, int dim) { _add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim); } void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col, const double* mat, MatrixDim dmat, int dim) { _copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim); } void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) { _vec_transform_reduce<<<Gr,Bl>>>(v,value,dim,inc, TransReduceOp<SUM, double>()); } void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, double alpha, MatrixElement<double>* x, int num_elements) { _cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements); } void cudaD_vector_copy_elements(dim3 Gr, dim3 Bl, double *data, int dim, const double *src_mat, int mat_stride, bool transpose, const MatrixIndexT_cuda* elements) { _cuda_vector_copy_elements<<<Gr, Bl>>>(data, dim, src_mat, mat_stride, transpose, elements); } void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim, double alpha, const Int32Pair* indices, const double* x, int s, double* data) { _cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data); } void cudaD_matrix_add_to_elements(dim3 Gr, dim3 Bl, double alpha, double* mat, MatrixDim dim, const MatrixIndexT_cuda* elements) { _cuda_matrix_add_to_elements<<<Gr, Bl>>>(alpha, mat, dim, elements); } void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst, const double *src, int dim) { _vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim); } void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val, float *count, int dim) { _vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim); } void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val, float *count, int dim) { _vec_apply_ceiling<<<Gr,Bl>>>(v,ceiling_val,count,dim); } void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) { _vec_apply_exp<<<Gr,Bl>>>(v,dim); } void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) { _vec_apply_log<<<Gr,Bl>>>(v,flag,dim); } void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) { _invert_elements<<<Gr,Bl>>>(data, d); } void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d, const double *Adata, int A_num_rows, int A_num_cols, int A_row_stride, int A_col_stride, const CuBlockMatrixData *B_cu_data, int B_num_blocks, double alpha, double beta, int B_trans) { if (B_trans) { _add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } else { _add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols, A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta); } } void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data, int num_blocks, const double *C_data, int C_num_cols, int C_row_stride, int C_col_stride, const double *D_data, int D_row_stride, int D_col_stride, double alpha, double beta) { _block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols, C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha, beta); } /* * cu:: */ void cudaD_soft_hinge(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { _group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power); } void cudaD_group_spec_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size, double power) { if (power == double(0)) { _group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<L0NORM, double>()); } else if (power == double(1)) { _group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<L1NORM, double>()); } else if (power == double(2)) { _group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<L2NORM, double>()); } else if (power == std::numeric_limits<double>::infinity()) { _group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<LINFNORM, double>()); } else { _group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<LPNORM, double>(power)); } } void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, int group_size) { _group_transform_reduce<<<Gr,Bl>>>(y, x, d, src_stride, group_size, TransReduceOp<MAX, double>()); } void cudaD_sigmoid(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _sigmoid<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_sigmoid(dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_tanh(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _tanh<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_diff_tanh(dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride) { _diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride); } void cudaD_ensure_nonzero(dim3 Gr, dim3 Bl, const double *x, MatrixDim d, double epsilon, int y_stride, double *y) { _ensure_nonzero<<<Gr,Bl>>>(x, d, epsilon, y_stride, y); } void cudaD_parametric_relu(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride, const double* a, const double* b) { _parametric_relu<<<Gr,Bl>>>(y, x, d, src_stride, a, b); } void cudaD_diff_parametric_relu(dim3 Gr, dim3 Bl, double* eout, const double* e, const double* y, MatrixDim d, int e_stride, int y_stride, const double* a, const double* b) { _diff_parametric_relu<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride, a, b); } void cudaD_heaviside(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d, int src_stride) { _heaviside<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x, MatrixDim d, int src_stride) { _softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride); } void cudaD_log_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x, MatrixDim y_dim, int x_stride) { _log_softmax_reduce<<<Gr,Bl>>>(y, x, y_dim, x_stride); } void cudaD_normalize_per_row(size_t Gr, size_t Bl, double *y, int y_stride, const double *x, MatrixDim x_d, double target_rms, bool add_log_stddev) { _normalize_per_row<<<Gr, Bl>>>(y, y_stride, x, x_d, target_rms, add_log_stddev); } void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) { _splice<<<Gr,Bl>>>(y,x,off,d_out,d_in); } void cudaD_one(int Gr, int Bl, double* x, int dim) { _one<<<Gr,Bl>>>(x,dim); } void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_mean<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_lower<<<Gr,Bl>>>(x,y,d_in); } void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_in) { _take_upper<<<Gr,Bl>>>(x,y,d_in); } void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y, MatrixDim d_out) { _copy_from_sp<<<Gr,Bl>>>(x,y,d_out); } void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x, const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) { _randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in); } void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1, double lr, MatrixDim d, int stride_grad) { _regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad); } void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val, int32_cuda* vec_id, MatrixDim d) { _find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, d); } void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt, double* mat_net_out, double* vec_log_post, MatrixDim d) { _diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d); } void cudaD_diff_softmax(dim3 Gr, dim3 Bl, double* x, const MatrixDim dim, const double* value, const int value_stride, const double* diff, const int diff_stride) { _diff_softmax<<<Gr, Bl>>>(x, dim, value, value_stride, diff, diff_stride); } void cudaD_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim, const double* out_value, const int out_value_stride, const double* out_deriv, const int out_deriv_stride, double* in_deriv) { _diff_log_softmax<<<Gr, Bl>>>(in_deriv_dim, out_value, out_value_stride, out_deriv, out_deriv_stride, in_deriv); } void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { _copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in); } void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indices) { _sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices); } void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim, const double *src_data, MatrixDim src_dim, const Int32Pair *indexes) { _add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes); } void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim, const Int32Pair *indices, int indices_size, double *output) { _matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output); } void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1, const double *mat2, double *mask, MatrixDim mat1_dim, int mat2_stride, int mask_stride) { _equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride, mask_stride); } // Some conversion kernels for which it's more convenient // to not name them F or D. void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out, const float* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out, const double* mat_in, MatrixDim d_out, MatrixDim d_in) { _copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in); } void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { _copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { _copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { _copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { _copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { _copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { _copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { _copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { _copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val); } void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val, float* trace_vec) { _trace_mat_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val, trace_vec); } void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val, float* trace_vec) { _trace_mat_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val, trace_vec); } void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val, double* trace_vec) { _trace_mat_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val, trace_vec); } void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat, MatrixDim mat_dim, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val, double* trace_vec) { _trace_mat_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx, smat_val, trace_vec); } void cudaD_lstm_nonlinearity(dim3 Gr, dim3 Bl, const double* in, const int in_stride, const double* params, const int params_stride, const int out_stride, const int cell_dim, const int have_dropout_mask, const int num_rows, double* out) { _lstm_nonlinearity<<<Gr, Bl>>>( in, in_stride, params, params_stride, out_stride, cell_dim, have_dropout_mask, num_rows, out); } void cudaF_lstm_nonlinearity(dim3 Gr, dim3 Bl, const float* in, const int in_stride, const float* params, const int params_stride, const int out_stride, const int cell_dim, const int have_dropout_mask, const int num_rows, float* out) { _lstm_nonlinearity<<<Gr, Bl>>>( in, in_stride, params, params_stride, out_stride, cell_dim, have_dropout_mask, num_rows, out); } void cudaD_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim, const int have_dropout_mask, const int num_rows, const double* input, const int input_stride, const double* params, const int params_stride, const double* output_deriv, const int output_deriv_stride, const double* deriv_sum_in, const int deriv_sum_in_stride, const double* self_repair_config, double count, double* input_deriv, const int input_deriv_stride, double* params_deriv, const int params_deriv_stride, double* value_sum_out, const int value_sum_out_stride, double* deriv_sum_out, const int deriv_sum_out_stride, double* self_repair_sum_out, const int self_repair_sum_out_stride) { _diff_lstm_nonlinearity<<<Gr, Bl>>>( cell_dim, have_dropout_mask, num_rows, input, input_stride, params, params_stride, output_deriv, output_deriv_stride, deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv, input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out, value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride, self_repair_sum_out, self_repair_sum_out_stride); } void cudaF_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim, const int have_dropout_mask, const int num_rows, const float* input, const int input_stride, const float* params, const int params_stride, const float* output_deriv, const int output_deriv_stride, const double* deriv_sum_in, const int deriv_sum_in_stride, const float* self_repair_config, double count, float* input_deriv, const int input_deriv_stride, float* params_deriv, const int params_deriv_stride, double* value_sum_out, const int value_sum_out_stride, double* deriv_sum_out, const int deriv_sum_out_stride, float* self_repair_sum_out, const int self_repair_sum_out_stride) { _diff_lstm_nonlinearity<<<Gr, Bl>>>( cell_dim, have_dropout_mask, num_rows, input, input_stride, params, params_stride, output_deriv, output_deriv_stride, deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv, input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out, value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride, self_repair_sum_out, self_repair_sum_out_stride); } void cudaD_copy_cols_from_vec(dim3 Gr, dim3 Bl, double *mat_out, MatrixDim d_out, const double *v_in) { _copy_cols_from_vec<<<Gr, Bl>>>(mat_out, d_out, v_in); } void cudaF_copy_cols_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out, const float *v_in) { _copy_cols_from_vec<<<Gr, Bl>>>(mat_out, d_out, v_in); } void cudaF_diff_normalize_per_row(size_t Gr, size_t Bl, float *id, int id_stride, const float *iv, MatrixDim iv_dim, const float* od, int od_stride, float target_rms, bool add_log_stddev) { _diff_normalize_per_row<<<Gr, Bl>>>(id, id_stride, iv, iv_dim, od, od_stride, target_rms, add_log_stddev); } void cudaD_diff_normalize_per_row(size_t Gr, size_t Bl, double *id, int id_stride, const double *iv, MatrixDim iv_dim, const double* od, int od_stride, double target_rms, bool add_log_stddev) { _diff_normalize_per_row<<<Gr, Bl>>>(id, id_stride, iv, iv_dim, od, od_stride, target_rms, add_log_stddev); } void cudaD_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr, int* out_col_idx, double* out_val, const int* row_indexes, const int num_selected_rows, const int* in_row_ptr, const int* in_col_idx, const double* in_val) { _select_rows<<<Gr, Bl>>>(out_row_ptr, out_col_idx, out_val, row_indexes, num_selected_rows, in_row_ptr, in_col_idx, in_val); } void cudaF_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr, int* out_col_idx, float* out_val, const int* row_indexes, const int num_selected_rows, const int* in_row_ptr, const int* in_col_idx, const float* in_val) { _select_rows<<<Gr, Bl>>>(out_row_ptr, out_col_idx, out_val, row_indexes, num_selected_rows, in_row_ptr, in_col_idx, in_val); } void cudaD_add_smat(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, double alpha, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { _add_smat<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx, smat_val); } void cudaF_add_smat(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, float alpha, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { _add_smat<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx, smat_val); } void cudaD_add_smat_trans(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim, double alpha, const int* smat_row_ptr, const int* smat_col_idx, const double* smat_val) { _add_smat_trans<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx, smat_val); } void cudaF_add_smat_trans(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim, float alpha, const int* smat_row_ptr, const int* smat_col_idx, const float* smat_val) { _add_smat_trans<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx, smat_val); } void cudaD_apply_exp_special(dim3 Gr, dim3 Bl, double* out, MatrixDim out_dim, const double* in, int in_stride) { _apply_exp_special<<<Gr, Bl>>>(out, out_dim, in, in_stride); } void cudaF_apply_exp_special(dim3 Gr, dim3 Bl, float* out, MatrixDim out_dim, const float* in, int in_stride) { _apply_exp_special<<<Gr, Bl>>>(out, out_dim, in, in_stride); } void cuda_compress_uint8_sign(dim3 Gr, dim3 Bl, const BaseFloat *src, MatrixDim dim, unsigned char *dest, int dest_stride) { _cuda_compress_uint8_sign<<<Gr, Bl>>>(src, dim, dest, dest_stride); } void cuda_compress_int16(dim3 Gr, dim3 Bl, const BaseFloat *src, MatrixDim dim, int16_t *dest, int dest_stride, float inv_scale, bool bounds_check) { if (bounds_check) { _cuda_compress_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale); } else { _cuda_compress_no_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale); } } void cuda_compress_uint16(dim3 Gr, dim3 Bl, const BaseFloat *src, MatrixDim dim, uint16_t *dest, int dest_stride, float inv_scale, bool bounds_check) { if (bounds_check) { _cuda_compress_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale); } else { _cuda_compress_no_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale); } } void cuda_compress_int8(dim3 Gr, dim3 Bl, const BaseFloat *src, MatrixDim dim, int8_t *dest, int dest_stride, float inv_scale, bool bounds_check) { if (bounds_check) { _cuda_compress_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale); } else { _cuda_compress_no_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale); } } void cuda_compress_uint8(dim3 Gr, dim3 Bl, const BaseFloat *src, MatrixDim dim, uint8_t *dest, int dest_stride, float inv_scale, bool bounds_check) { if (bounds_check) { _cuda_compress_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale); } else { _cuda_compress_no_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale); } } void cuda_uncompress_uint8(dim3 Gr, dim3 Bl, BaseFloat *dest, MatrixDim dim, const uint8_t *src, int src_stride, float scale) { _cuda_uncompress<<<Gr, Bl>>>(dest, dim, src, src_stride, scale); } void cuda_uncompress_int8(dim3 Gr, dim3 Bl, BaseFloat *dest, MatrixDim dim, const int8_t *src, int src_stride, float scale) { _cuda_uncompress<<<Gr, Bl>>>(dest, dim, src, src_stride, scale); } void cuda_uncompress_uint16(dim3 Gr, dim3 Bl, BaseFloat *dest, MatrixDim dim, const uint16_t *src, int src_stride, float scale) { _cuda_uncompress<<<Gr, Bl>>>(dest, dim, src, src_stride, scale); } void cuda_uncompress_int16(dim3 Gr, dim3 Bl, BaseFloat *dest, MatrixDim dim, const int16_t *src, int src_stride, float scale) { _cuda_uncompress<<<Gr, Bl>>>(dest, dim, src, src_stride, scale); }
05fc88d764b5897ec3acd5031702dcf5442d49c2.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ __global__ void reduce(T1 *g_idata, T1 *g_odata, unsigned int n) { __shared__ volatile T1 sdata[2 * 256]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T1 mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T1* smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
05fc88d764b5897ec3acd5031702dcf5442d49c2.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * NVIDIA Corporation and its licensors retain all intellectual property and * proprietary rights in and to this software and related documentation. * Any use, reproduction, disclosure, or distribution of this software * and related documentation without an express license agreement from * NVIDIA Corporation is strictly prohibited. * * Please refer to the applicable NVIDIA end user license agreement (EULA) * associated with this source code for terms and conditions that govern * your use of this NVIDIA software. * */ __global__ void reduce(T1 *g_idata, T1 *g_odata, unsigned int n) { __shared__ volatile T1 sdata[2 * 256]; // perform first level of reduction, // reading from global memory, writing to shared memory unsigned int tid = threadIdx.x; unsigned int i = blockIdx.x*blockSize*2 + threadIdx.x; unsigned int gridSize = blockSize*2*gridDim.x; T1 mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds -- this is optimized away for powerOf2 sized arrays if (nIsPow2 || i + blockSize < n) mySum += g_idata[i+blockSize]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockSize >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockSize >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockSize >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile T1* smem = sdata; if (blockSize >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockSize >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockSize >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockSize >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockSize >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockSize >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; }
d95e1840918337d00d2e4822e0fb1915e5edb386.hip
// !!! This is a file automatically generated by hipify!!! /** * @brief Breadth-first Search Top-Down test program * @file */ #include "Static/BreadthFirstSearch/TopDown2.cuh" #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <hip/hip_runtime_api.h> //--profile-from-start off int main(int argc, char* argv[]) { using namespace timer; using namespace hornets_nest; graph::GraphStd<vid_t, eoff_t> graph; CommandLineParam cmd(graph, argc, argv,false); HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGraph hornet_graph(hornet_init); BfsTopDown2 bfs_top_down(hornet_graph); vid_t root = graph.max_out_degree_id(); if (argc==3) root = atoi(argv[2]); bfs_top_down.set_parameters(root); Timer<DEVICE> TM; hipProfilerStart(); TM.start(); bfs_top_down.run(); TM.stop(); hipProfilerStop(); TM.print("TopDown2"); auto is_correct = bfs_top_down.validate(); std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n"); return !is_correct; }
d95e1840918337d00d2e4822e0fb1915e5edb386.cu
/** * @brief Breadth-first Search Top-Down test program * @file */ #include "Static/BreadthFirstSearch/TopDown2.cuh" #include <Graph/GraphStd.hpp> #include <Util/CommandLineParam.hpp> #include <cuda_profiler_api.h> //--profile-from-start off int main(int argc, char* argv[]) { using namespace timer; using namespace hornets_nest; graph::GraphStd<vid_t, eoff_t> graph; CommandLineParam cmd(graph, argc, argv,false); HornetInit hornet_init(graph.nV(), graph.nE(), graph.csr_out_offsets(), graph.csr_out_edges()); HornetGraph hornet_graph(hornet_init); BfsTopDown2 bfs_top_down(hornet_graph); vid_t root = graph.max_out_degree_id(); if (argc==3) root = atoi(argv[2]); bfs_top_down.set_parameters(root); Timer<DEVICE> TM; cudaProfilerStart(); TM.start(); bfs_top_down.run(); TM.stop(); cudaProfilerStop(); TM.print("TopDown2"); auto is_correct = bfs_top_down.validate(); std::cout << (is_correct ? "\nCorrect <>\n\n" : "\n! Not Correct\n\n"); return !is_correct; }
e6b9116d142a5d6740149f9f9fbbe6fc2e505062.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * TP 2 - Convolution d'images * -------------------------- * Mmoire constante et textures * * File: student.cu * Author: Maxime MARIA */ #include "student.hpp" #include "chronoGPU.hpp" namespace IMAC { // ================================================== For image comparison std::ostream &operator <<(std::ostream &os, const uchar4 &c) { os << "[" << uint(c.x) << "," << uint(c.y) << "," << uint(c.z) << "," << uint(c.w) << "]"; return os; } void compareImages(const std::vector<uchar4> &a, const std::vector<uchar4> &b) { bool error = false; if (a.size() != b.size()) { std::cout << "Size is different !" << std::endl; error = true; } else { for (uint i = 0; i < a.size(); ++i) { // Floating precision can cause small difference between host and device if ( std::abs(a[i].x - b[i].x) > 2 || std::abs(a[i].y - b[i].y) > 2 || std::abs(a[i].z - b[i].z) > 2 || std::abs(a[i].w - b[i].w) > 2) { std::cout << "Error at index " << i << ": a = " << a[i] << " - b = " << b[i] << " - " << std::abs(a[i].x - b[i].x) << std::endl; error = true; break; } } } if (error) { std::cout << " -> You failed, retry!" << std::endl; } else { std::cout << " -> Well done!" << std::endl; } } // ================================================== __constant__ float dev_matConv[225]; texture<uchar4, 2, hipReadModeElementType> texRef; __device__ float clampfGPU(const float val, const float min , const float max) { return fminf(max, fmaxf(min, val)); } __global__ void convGPU(const uchar4 *input, const uint imgWidth, const uint imgHeight, const uint matSize, uchar4 *output){ // id global en x const int idThreadGX = threadIdx.x // id du thread dans le block + blockIdx.x // id du block dans la grid * blockDim.x; // taille d'un block, nb threads dans blocks // nb threads global en x const int nbThreadsGX = blockDim.x * gridDim.x; // nb blocks dans grid // id global en y const int idThreadGY = threadIdx.y // id du thread dans le block + blockIdx.y // id du block dans la grid * blockDim.y; // taille d'un block, nb threads dans blocks // nb threads global en y const int nbThreadsGY = blockDim.y * gridDim.y; // nb blocks dans grid for (int idY = idThreadGY; idY < imgHeight; idY += nbThreadsGY) { for(int idX = idThreadGX; idX < imgWidth; idX += nbThreadsGX){ float3 sum = make_float3(0.f,0.f,0.f); // Apply convolution for ( uint j = 0; j < matSize; ++j ) { for ( uint i = 0; i < matSize; ++i ) { int dX = idX + i - matSize / 2; int dY = idY + j - matSize / 2; // Handle borders if ( dX < 0 ) dX = 0; if ( dX >= imgWidth ) dX = imgWidth - 1; if ( dY < 0 ) dY = 0; if ( dY >= imgHeight ) dY = imgHeight - 1; const int idMat = j * matSize + i; uchar4 value = tex2D(texRef, dX, dY); sum.x += (float)value.x * dev_matConv[idMat]; sum.y += (float)value.y * dev_matConv[idMat]; sum.z += (float)value.z * dev_matConv[idMat]; /*sum.x += (float)input[idPixel].x * matConv[idMat]; sum.y += (float)input[idPixel].y * matConv[idMat]; sum.z += (float)input[idPixel].z * matConv[idMat];*/ } } const int idOut = idY * imgWidth + idX; output[idOut].x = (uchar)clampfGPU( sum.x, 0.f, 255.f ); output[idOut].y = (uchar)clampfGPU( sum.y, 0.f, 255.f ); output[idOut].z = (uchar)clampfGPU( sum.z, 0.f, 255.f ); output[idOut].w = 255; } } } void studentJob(const std::vector<uchar4> &inputImg, // Input image const uint imgWidth, const uint imgHeight, // Image size const std::vector<float> &matConv, // Convolution matrix (square) const uint matSize, // Matrix size (width or height) const std::vector<uchar4> &resultCPU, // Just for comparison std::vector<uchar4> &output // Output image ) { ChronoGPU chrGPU; // 2 arrays for GPU uchar4 *dev_inputImg = NULL; uchar4 *dev_output = NULL; std::cout << "Allocating 3 arrays: "; chrGPU.start(); const size_t bytesImg = inputImg.size() * sizeof(uchar4); const size_t bytesMat = matConv.size() * sizeof(float); size_t pitchImg = 0; HANDLE_ERROR(hipMallocPitch((void**) &dev_inputImg, &pitchImg, imgWidth * sizeof(uchar4), imgHeight)); HANDLE_ERROR(hipMalloc((void **) &dev_output, bytesImg)); chrGPU.stop(); std::cout << "Allocation -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; std::cout << "Copying data to GPU : "; chrGPU.start(); // Copy data from host to device (input arrays) HANDLE_ERROR(hipMemcpy2D(dev_inputImg, pitchImg, inputImg.data(), imgWidth * sizeof(uchar4), imgWidth * sizeof(uchar4), imgHeight, hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpyToSymbol(dev_matConv, matConv.data(), bytesMat)); chrGPU.stop(); std::cout << "Copying -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; // Texture hipChannelFormatDesc channelDesc = hipCreateChannelDesc<uchar4>(); HANDLE_ERROR(hipBindTexture2D(NULL, texRef, dev_inputImg, hipCreateChannelDesc<uchar4>(), imgWidth, imgHeight, pitchImg)); // Launch kernel chrGPU.start();//dim3 std::cout << "Lauching the kernel"; hipLaunchKernelGGL(( convGPU), dim3(dim3(16, 16)), dim3(dim3(32, 32)), 0, 0, dev_inputImg, imgWidth, imgHeight, matSize, dev_output); chrGPU.stop(); std::cout << "Calculations -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl; std::cout << "Copying data to CPU : "; chrGPU.start(); // Copy data from device to host (output array) HANDLE_ERROR (hipMemcpy(output.data(), dev_output, bytesImg, hipMemcpyDeviceToHost)); chrGPU.stop(); std::cout << "Copying -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; compareImages(resultCPU, output); // Free arrays on device HANDLE_ERROR(hipUnbindTexture(texRef)); HANDLE_ERROR(hipFree(dev_inputImg)); HANDLE_ERROR(hipFree(dev_output)); } }
e6b9116d142a5d6740149f9f9fbbe6fc2e505062.cu
/* * TP 2 - Convolution d'images * -------------------------- * Mémoire constante et textures * * File: student.cu * Author: Maxime MARIA */ #include "student.hpp" #include "chronoGPU.hpp" namespace IMAC { // ================================================== For image comparison std::ostream &operator <<(std::ostream &os, const uchar4 &c) { os << "[" << uint(c.x) << "," << uint(c.y) << "," << uint(c.z) << "," << uint(c.w) << "]"; return os; } void compareImages(const std::vector<uchar4> &a, const std::vector<uchar4> &b) { bool error = false; if (a.size() != b.size()) { std::cout << "Size is different !" << std::endl; error = true; } else { for (uint i = 0; i < a.size(); ++i) { // Floating precision can cause small difference between host and device if ( std::abs(a[i].x - b[i].x) > 2 || std::abs(a[i].y - b[i].y) > 2 || std::abs(a[i].z - b[i].z) > 2 || std::abs(a[i].w - b[i].w) > 2) { std::cout << "Error at index " << i << ": a = " << a[i] << " - b = " << b[i] << " - " << std::abs(a[i].x - b[i].x) << std::endl; error = true; break; } } } if (error) { std::cout << " -> You failed, retry!" << std::endl; } else { std::cout << " -> Well done!" << std::endl; } } // ================================================== __constant__ float dev_matConv[225]; texture<uchar4, 2, cudaReadModeElementType> texRef; __device__ float clampfGPU(const float val, const float min , const float max) { return fminf(max, fmaxf(min, val)); } __global__ void convGPU(const uchar4 *input, const uint imgWidth, const uint imgHeight, const uint matSize, uchar4 *output){ // id global en x const int idThreadGX = threadIdx.x // id du thread dans le block + blockIdx.x // id du block dans la grid * blockDim.x; // taille d'un block, nb threads dans blocks // nb threads global en x const int nbThreadsGX = blockDim.x * gridDim.x; // nb blocks dans grid // id global en y const int idThreadGY = threadIdx.y // id du thread dans le block + blockIdx.y // id du block dans la grid * blockDim.y; // taille d'un block, nb threads dans blocks // nb threads global en y const int nbThreadsGY = blockDim.y * gridDim.y; // nb blocks dans grid for (int idY = idThreadGY; idY < imgHeight; idY += nbThreadsGY) { for(int idX = idThreadGX; idX < imgWidth; idX += nbThreadsGX){ float3 sum = make_float3(0.f,0.f,0.f); // Apply convolution for ( uint j = 0; j < matSize; ++j ) { for ( uint i = 0; i < matSize; ++i ) { int dX = idX + i - matSize / 2; int dY = idY + j - matSize / 2; // Handle borders if ( dX < 0 ) dX = 0; if ( dX >= imgWidth ) dX = imgWidth - 1; if ( dY < 0 ) dY = 0; if ( dY >= imgHeight ) dY = imgHeight - 1; const int idMat = j * matSize + i; uchar4 value = tex2D(texRef, dX, dY); sum.x += (float)value.x * dev_matConv[idMat]; sum.y += (float)value.y * dev_matConv[idMat]; sum.z += (float)value.z * dev_matConv[idMat]; /*sum.x += (float)input[idPixel].x * matConv[idMat]; sum.y += (float)input[idPixel].y * matConv[idMat]; sum.z += (float)input[idPixel].z * matConv[idMat];*/ } } const int idOut = idY * imgWidth + idX; output[idOut].x = (uchar)clampfGPU( sum.x, 0.f, 255.f ); output[idOut].y = (uchar)clampfGPU( sum.y, 0.f, 255.f ); output[idOut].z = (uchar)clampfGPU( sum.z, 0.f, 255.f ); output[idOut].w = 255; } } } void studentJob(const std::vector<uchar4> &inputImg, // Input image const uint imgWidth, const uint imgHeight, // Image size const std::vector<float> &matConv, // Convolution matrix (square) const uint matSize, // Matrix size (width or height) const std::vector<uchar4> &resultCPU, // Just for comparison std::vector<uchar4> &output // Output image ) { ChronoGPU chrGPU; // 2 arrays for GPU uchar4 *dev_inputImg = NULL; uchar4 *dev_output = NULL; std::cout << "Allocating 3 arrays: "; chrGPU.start(); const size_t bytesImg = inputImg.size() * sizeof(uchar4); const size_t bytesMat = matConv.size() * sizeof(float); size_t pitchImg = 0; HANDLE_ERROR(cudaMallocPitch((void**) &dev_inputImg, &pitchImg, imgWidth * sizeof(uchar4), imgHeight)); HANDLE_ERROR(cudaMalloc((void **) &dev_output, bytesImg)); chrGPU.stop(); std::cout << "Allocation -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; std::cout << "Copying data to GPU : "; chrGPU.start(); // Copy data from host to device (input arrays) HANDLE_ERROR(cudaMemcpy2D(dev_inputImg, pitchImg, inputImg.data(), imgWidth * sizeof(uchar4), imgWidth * sizeof(uchar4), imgHeight, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpyToSymbol(dev_matConv, matConv.data(), bytesMat)); chrGPU.stop(); std::cout << "Copying -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; // Texture cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uchar4>(); HANDLE_ERROR(cudaBindTexture2D(NULL, texRef, dev_inputImg, cudaCreateChannelDesc<uchar4>(), imgWidth, imgHeight, pitchImg)); // Launch kernel chrGPU.start();//dim3 std::cout << "Lauching the kernel"; convGPU<<<dim3(16, 16), dim3(32, 32)>>>(dev_inputImg, imgWidth, imgHeight, matSize, dev_output); chrGPU.stop(); std::cout << "Calculations -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl; std::cout << "Copying data to CPU : "; chrGPU.start(); // Copy data from device to host (output array) HANDLE_ERROR (cudaMemcpy(output.data(), dev_output, bytesImg, cudaMemcpyDeviceToHost)); chrGPU.stop(); std::cout << "Copying -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl; compareImages(resultCPU, output); // Free arrays on device HANDLE_ERROR(cudaUnbindTexture(texRef)); HANDLE_ERROR(cudaFree(dev_inputImg)); HANDLE_ERROR(cudaFree(dev_output)); } }
798aacb235cb44bf24295531ebd2e6b5b3bda367.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "caffe/util/math_functions.hpp" namespace caffe { int param_len = 0; int **a; int **a_host; bool **b; bool **b_host; template <typename Dtype> __global__ void SGDUpdate(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate, float dvf_threshold, int* per_block_results, bool* whether_update) { __shared__ int sdata[CAFFE_CUDA_NUM_THREADS]; sdata[threadIdx.x] = 0; __syncthreads(); CUDA_KERNEL_LOOP(i, N) { // threshold works here // g[i] denotes the updates computed in this iteration // we first aggregated g[i] with h[i] // h[i] is the update computed in last iteration g[i] = h[i] = momentum*h[i] + local_rate*g[i]; // *********************************************************** // How to offload following function to SmartNIC // MPI cannot be used anymore if we offload following function to SmartNIC // *********************************************************** if (g[i] >= 0 && g[i] <= dvf_threshold) { // here we drop g[i], store the dropped value in h[i] // this is different with the paper, since we use momentum SGD g[i] = 0; h[i] = 1.0/momentum * h[i]; // this is a trick for h[i], must use it // sdata is used to trace which update is useful sdata[threadIdx.x] += 1; whether_update[i] = false; } else if (g[i] < 0 && g[i] >= -dvf_threshold) { // here we dlso rop g[i], store the dropped value in h[i] g[i] = 0; h[i] = 1.0/momentum * h[i]; sdata[threadIdx.x] += 1; whether_update[i] = false; } else { // here we do not drop g[i], store the dropped value in h[i] whether_update[i] = true; } } __syncthreads(); for(int offset = blockDim.x/2; offset > 0; offset >>= 1) { if(threadIdx.x < offset) { // sdata is used to trace which update is useful sdata[threadIdx.x] += sdata[threadIdx.x + offset]; } __syncthreads(); } if(threadIdx.x == 0) { per_block_results[blockIdx.x] = sdata[0]; } } template <typename Dtype> long sgd_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate, float dvf_threshold, int* update_vector, int param_id) { long dn = 0; size_t block_num = CAFFE_GET_BLOCKS(N); int *d_partial_sums = 0; int *d_partial_sums_host = 0; bool *whether_update = 0; bool *whether_update_host = 0; if (param_id == param_len) { param_len += 1; hipMalloc((void**)&d_partial_sums, sizeof(int) * block_num); d_partial_sums_host = (int*) malloc(sizeof(int) * block_num); hipMalloc((void**)&whether_update, sizeof(bool) * N); whether_update_host = (bool*) malloc(sizeof(bool) * N); int **a_tmp = (int**)malloc(sizeof(int*) * param_len); int **a_host_tmp = (int**)malloc(sizeof(int*) * param_len); bool **b_tmp = (bool**)malloc(sizeof(bool*) * param_len); bool **b_host_tmp = (bool**)malloc(sizeof(bool*) * param_len); for (int i=0; i<(param_len-1); i++) { a_tmp[i] = a[i]; a_host_tmp[i] = a_host[i]; b_tmp[i] = b[i]; b_host_tmp[i] = b_host[i]; } a_tmp[param_len-1] = d_partial_sums; a_host_tmp[param_len-1] = d_partial_sums_host; b_tmp[param_len-1] = whether_update; b_host_tmp[param_len-1] = whether_update_host; free(a); free(a_host); free(b); free(b_host); a = a_tmp; a_host = a_host_tmp; b = b_tmp; b_host = b_host_tmp; } else { d_partial_sums = a[param_id]; d_partial_sums_host = a_host[param_id]; whether_update = b[param_id]; whether_update_host = b_host[param_id]; } SGDUpdate<Dtype> // NOLINT_NEXT_LINE(whitespacehipLaunchKernelGGL((/operators)) , dim3(block_num), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, N, g, h, momentum, local_rate, dvf_threshold, d_partial_sums, whether_update); CUDA_POST_KERNEL_CHECK; //************************************************************** // Communication happends here, delete the MPI related communication function // need to implemente the communication function using smart NIC APIs // following is what MPI do in previous implementation: // MPI_Send g to rank 0, in Rank 0, g[i] += receviced_g[i] // Then, in Rank0, average the update: g[i] = g[i] / RankNumber // MPI_Bcast g to other ranks // g would be used to update parameteres in another function, and we would not care it. //************************************************************* hipMemcpy(d_partial_sums_host, d_partial_sums, sizeof(int)*block_num, hipMemcpyDeviceToHost); hipMemcpy(whether_update_host, whether_update, sizeof(bool)*N, hipMemcpyDeviceToHost); for (int i=0; i < block_num; i++) { dn += d_partial_sums_host[i]; } for (int i=0; i < N; i++) { update_vector[i] += whether_update_host[i]; } // hipFree(d_partial_sums); // hipFree(whether_update); // free(d_partial_sums_host); // free(whether_update_host); return dn; } template long sgd_update_gpu<float>(int, float*, float*, float, float, float, int*, int); template long sgd_update_gpu<double>(int, double*, double*, double, double, float, int*, int); } // namespace caffe
798aacb235cb44bf24295531ebd2e6b5b3bda367.cu
#include "caffe/util/math_functions.hpp" namespace caffe { int param_len = 0; int **a; int **a_host; bool **b; bool **b_host; template <typename Dtype> __global__ void SGDUpdate(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate, float dvf_threshold, int* per_block_results, bool* whether_update) { __shared__ int sdata[CAFFE_CUDA_NUM_THREADS]; sdata[threadIdx.x] = 0; __syncthreads(); CUDA_KERNEL_LOOP(i, N) { // threshold works here // g[i] denotes the updates computed in this iteration // we first aggregated g[i] with h[i] // h[i] is the update computed in last iteration g[i] = h[i] = momentum*h[i] + local_rate*g[i]; // *********************************************************** // How to offload following function to SmartNIC // MPI cannot be used anymore if we offload following function to SmartNIC // *********************************************************** if (g[i] >= 0 && g[i] <= dvf_threshold) { // here we drop g[i], store the dropped value in h[i] // this is different with the paper, since we use momentum SGD g[i] = 0; h[i] = 1.0/momentum * h[i]; // this is a trick for h[i], must use it // sdata is used to trace which update is useful sdata[threadIdx.x] += 1; whether_update[i] = false; } else if (g[i] < 0 && g[i] >= -dvf_threshold) { // here we dlso rop g[i], store the dropped value in h[i] g[i] = 0; h[i] = 1.0/momentum * h[i]; sdata[threadIdx.x] += 1; whether_update[i] = false; } else { // here we do not drop g[i], store the dropped value in h[i] whether_update[i] = true; } } __syncthreads(); for(int offset = blockDim.x/2; offset > 0; offset >>= 1) { if(threadIdx.x < offset) { // sdata is used to trace which update is useful sdata[threadIdx.x] += sdata[threadIdx.x + offset]; } __syncthreads(); } if(threadIdx.x == 0) { per_block_results[blockIdx.x] = sdata[0]; } } template <typename Dtype> long sgd_update_gpu(int N, Dtype* g, Dtype* h, Dtype momentum, Dtype local_rate, float dvf_threshold, int* update_vector, int param_id) { long dn = 0; size_t block_num = CAFFE_GET_BLOCKS(N); int *d_partial_sums = 0; int *d_partial_sums_host = 0; bool *whether_update = 0; bool *whether_update_host = 0; if (param_id == param_len) { param_len += 1; cudaMalloc((void**)&d_partial_sums, sizeof(int) * block_num); d_partial_sums_host = (int*) malloc(sizeof(int) * block_num); cudaMalloc((void**)&whether_update, sizeof(bool) * N); whether_update_host = (bool*) malloc(sizeof(bool) * N); int **a_tmp = (int**)malloc(sizeof(int*) * param_len); int **a_host_tmp = (int**)malloc(sizeof(int*) * param_len); bool **b_tmp = (bool**)malloc(sizeof(bool*) * param_len); bool **b_host_tmp = (bool**)malloc(sizeof(bool*) * param_len); for (int i=0; i<(param_len-1); i++) { a_tmp[i] = a[i]; a_host_tmp[i] = a_host[i]; b_tmp[i] = b[i]; b_host_tmp[i] = b_host[i]; } a_tmp[param_len-1] = d_partial_sums; a_host_tmp[param_len-1] = d_partial_sums_host; b_tmp[param_len-1] = whether_update; b_host_tmp[param_len-1] = whether_update_host; free(a); free(a_host); free(b); free(b_host); a = a_tmp; a_host = a_host_tmp; b = b_tmp; b_host = b_host_tmp; } else { d_partial_sums = a[param_id]; d_partial_sums_host = a_host[param_id]; whether_update = b[param_id]; whether_update_host = b_host[param_id]; } SGDUpdate<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) <<<block_num, CAFFE_CUDA_NUM_THREADS>>>( N, g, h, momentum, local_rate, dvf_threshold, d_partial_sums, whether_update); CUDA_POST_KERNEL_CHECK; //************************************************************** // Communication happends here, delete the MPI related communication function // need to implemente the communication function using smart NIC APIs // following is what MPI do in previous implementation: // MPI_Send g to rank 0, in Rank 0, g[i] += receviced_g[i] // Then, in Rank0, average the update: g[i] = g[i] / RankNumber // MPI_Bcast g to other ranks // g would be used to update parameteres in another function, and we would not care it. //************************************************************* cudaMemcpy(d_partial_sums_host, d_partial_sums, sizeof(int)*block_num, cudaMemcpyDeviceToHost); cudaMemcpy(whether_update_host, whether_update, sizeof(bool)*N, cudaMemcpyDeviceToHost); for (int i=0; i < block_num; i++) { dn += d_partial_sums_host[i]; } for (int i=0; i < N; i++) { update_vector[i] += whether_update_host[i]; } // cudaFree(d_partial_sums); // cudaFree(whether_update); // free(d_partial_sums_host); // free(whether_update_host); return dn; } template long sgd_update_gpu<float>(int, float*, float*, float, float, float, int*, int); template long sgd_update_gpu<double>(int, double*, double*, double, double, float, int*, int); } // namespace caffe
07e5e5f07aeb0be770cfa954ad01ba07fb3fc09e.hip
// !!! This is a file automatically generated by hipify!!! #include <iomanip> #include <iostream> #include <hip/hip_complex.h> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <cufinufft/cudeconvolve.h> namespace cufinufft { namespace deconvolve { /* Kernel for copying fw to fk with amplication by prefac/ker */ // Note: assume modeord=0: CMCL-compatible mode ordering in fk (from -N/2 up // to N/2-1) template <typename T> __global__ void deconvolve_1d(int ms, int nf1, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms; i += blockDim.x * gridDim.x) { int w1 = i - ms / 2 >= 0 ? i - ms / 2 : nf1 + i - ms / 2; T kervalue = fwkerhalf1[abs(i - ms / 2)]; fk[i].x = fw[w1].x / kervalue; fk[i].y = fw[w1].y / kervalue; } } template <typename T> __global__ void deconvolve_2d(int ms, int mt, int nf1, int nf2, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1, T *fwkerhalf2) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms * mt; i += blockDim.x * gridDim.x) { int k1 = i % ms; int k2 = i / ms; int outidx = k1 + k2 * ms; int w1 = k1 - ms / 2 >= 0 ? k1 - ms / 2 : nf1 + k1 - ms / 2; int w2 = k2 - mt / 2 >= 0 ? k2 - mt / 2 : nf2 + k2 - mt / 2; int inidx = w1 + w2 * nf1; T kervalue = fwkerhalf1[abs(k1 - ms / 2)] * fwkerhalf2[abs(k2 - mt / 2)]; fk[outidx].x = fw[inidx].x / kervalue; fk[outidx].y = fw[inidx].y / kervalue; } } template <typename T> __global__ void deconvolve_3d(int ms, int mt, int mu, int nf1, int nf2, int nf3, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1, T *fwkerhalf2, T *fwkerhalf3) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms * mt * mu; i += blockDim.x * gridDim.x) { int k1 = i % ms; int k2 = (i / ms) % mt; int k3 = (i / ms / mt); int outidx = k1 + k2 * ms + k3 * ms * mt; int w1 = k1 - ms / 2 >= 0 ? k1 - ms / 2 : nf1 + k1 - ms / 2; int w2 = k2 - mt / 2 >= 0 ? k2 - mt / 2 : nf2 + k2 - mt / 2; int w3 = k3 - mu / 2 >= 0 ? k3 - mu / 2 : nf3 + k3 - mu / 2; int inidx = w1 + w2 * nf1 + w3 * nf1 * nf2; T kervalue = fwkerhalf1[abs(k1 - ms / 2)] * fwkerhalf2[abs(k2 - mt / 2)] * fwkerhalf3[abs(k3 - mu / 2)]; fk[outidx].x = fw[inidx].x / kervalue; fk[outidx].y = fw[inidx].y / kervalue; } } /* Kernel for copying fk to fw with same amplication */ template <typename T> __global__ void amplify_1d(int ms, int nf1, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms; i += blockDim.x * gridDim.x) { int w1 = i - ms / 2 >= 0 ? i - ms / 2 : nf1 + i - ms / 2; T kervalue = fwkerhalf1[abs(i - ms / 2)]; fw[w1].x = fk[i].x / kervalue; fw[w1].y = fk[i].y / kervalue; } } template <typename T> __global__ void amplify_2d(int ms, int mt, int nf1, int nf2, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1, T *fwkerhalf2) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms * mt; i += blockDim.x * gridDim.x) { int k1 = i % ms; int k2 = i / ms; int inidx = k1 + k2 * ms; int w1 = k1 - ms / 2 >= 0 ? k1 - ms / 2 : nf1 + k1 - ms / 2; int w2 = k2 - mt / 2 >= 0 ? k2 - mt / 2 : nf2 + k2 - mt / 2; int outidx = w1 + w2 * nf1; T kervalue = fwkerhalf1[abs(k1 - ms / 2)] * fwkerhalf2[abs(k2 - mt / 2)]; fw[outidx].x = fk[inidx].x / kervalue; fw[outidx].y = fk[inidx].y / kervalue; } } template <typename T> __global__ void amplify_3d(int ms, int mt, int mu, int nf1, int nf2, int nf3, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1, T *fwkerhalf2, T *fwkerhalf3) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms * mt * mu; i += blockDim.x * gridDim.x) { int k1 = i % ms; int k2 = (i / ms) % mt; int k3 = (i / ms / mt); int inidx = k1 + k2 * ms + k3 * ms * mt; int w1 = k1 - ms / 2 >= 0 ? k1 - ms / 2 : nf1 + k1 - ms / 2; int w2 = k2 - mt / 2 >= 0 ? k2 - mt / 2 : nf2 + k2 - mt / 2; int w3 = k3 - mu / 2 >= 0 ? k3 - mu / 2 : nf3 + k3 - mu / 2; int outidx = w1 + w2 * nf1 + w3 * nf1 * nf2; T kervalue = fwkerhalf1[abs(k1 - ms / 2)] * fwkerhalf2[abs(k2 - mt / 2)] * fwkerhalf3[abs(k3 - mu / 2)]; fw[outidx].x = fk[inidx].x / kervalue; fw[outidx].y = fk[inidx].y / kervalue; } } template <typename T> int cudeconvolve1d(cufinufft_plan_t<T> *d_plan, int blksize) /* wrapper for deconvolution & amplication in 1D. Melody Shih 11/21/21 */ { int ms = d_plan->ms; int nf1 = d_plan->nf1; int nmodes = ms; int maxbatchsize = d_plan->maxbatchsize; if (d_plan->spopts.spread_direction == 1) { for (int t = 0; t < blksize; t++) { hipLaunchKernelGGL(( deconvolve_1d), dim3((nmodes + 256 - 1) / 256), dim3(256), 0, 0, ms, nf1, d_plan->fw + t * nf1, d_plan->fk + t * nmodes, d_plan->fwkerhalf1); } } else { checkCudaErrors(hipMemset(d_plan->fw, 0, maxbatchsize * nf1 * sizeof(cuda_complex<T>))); for (int t = 0; t < blksize; t++) { hipLaunchKernelGGL(( amplify_1d), dim3((nmodes + 256 - 1) / 256), dim3(256), 0, 0, ms, nf1, d_plan->fw + t * nf1, d_plan->fk + t * nmodes, d_plan->fwkerhalf1); } } return 0; } template <typename T> int cudeconvolve2d(cufinufft_plan_t<T> *d_plan, int blksize) /* wrapper for deconvolution & amplication in 2D. Melody Shih 07/25/19 */ { int ms = d_plan->ms; int mt = d_plan->mt; int nf1 = d_plan->nf1; int nf2 = d_plan->nf2; int nmodes = ms * mt; int maxbatchsize = d_plan->maxbatchsize; if (d_plan->spopts.spread_direction == 1) { for (int t = 0; t < blksize; t++) { hipLaunchKernelGGL(( deconvolve_2d), dim3((nmodes + 256 - 1) / 256), dim3(256), 0, 0, ms, mt, nf1, nf2, d_plan->fw + t * nf1 * nf2, d_plan->fk + t * nmodes, d_plan->fwkerhalf1, d_plan->fwkerhalf2); } } else { checkCudaErrors(hipMemset(d_plan->fw, 0, maxbatchsize * nf1 * nf2 * sizeof(cuda_complex<T>))); for (int t = 0; t < blksize; t++) { hipLaunchKernelGGL(( amplify_2d), dim3((nmodes + 256 - 1) / 256), dim3(256), 0, 0, ms, mt, nf1, nf2, d_plan->fw + t * nf1 * nf2, d_plan->fk + t * nmodes, d_plan->fwkerhalf1, d_plan->fwkerhalf2); } } return 0; } template <typename T> int cudeconvolve3d(cufinufft_plan_t<T> *d_plan, int blksize) /* wrapper for deconvolution & amplication in 3D. Melody Shih 07/25/19 */ { int ms = d_plan->ms; int mt = d_plan->mt; int mu = d_plan->mu; int nf1 = d_plan->nf1; int nf2 = d_plan->nf2; int nf3 = d_plan->nf3; int nmodes = ms * mt * mu; int maxbatchsize = d_plan->maxbatchsize; if (d_plan->spopts.spread_direction == 1) { for (int t = 0; t < blksize; t++) { hipLaunchKernelGGL(( deconvolve_3d), dim3((nmodes + 256 - 1) / 256), dim3(256), 0, 0, ms, mt, mu, nf1, nf2, nf3, d_plan->fw + t * nf1 * nf2 * nf3, d_plan->fk + t * nmodes, d_plan->fwkerhalf1, d_plan->fwkerhalf2, d_plan->fwkerhalf3); } } else { checkCudaErrors(hipMemset(d_plan->fw, 0, maxbatchsize * nf1 * nf2 * nf3 * sizeof(cuda_complex<T>))); for (int t = 0; t < blksize; t++) { hipLaunchKernelGGL(( amplify_3d), dim3((nmodes + 256 - 1) / 256), dim3(256), 0, 0, ms, mt, mu, nf1, nf2, nf3, d_plan->fw + t * nf1 * nf2 * nf3, d_plan->fk + t * nmodes, d_plan->fwkerhalf1, d_plan->fwkerhalf2, d_plan->fwkerhalf3); } } return 0; } template int cudeconvolve1d<float>(cufinufft_plan_t<float> *d_plan, int blksize); template int cudeconvolve1d<double>(cufinufft_plan_t<double> *d_plan, int blksize); template int cudeconvolve2d<float>(cufinufft_plan_t<float> *d_plan, int blksize); template int cudeconvolve2d<double>(cufinufft_plan_t<double> *d_plan, int blksize); template int cudeconvolve3d<float>(cufinufft_plan_t<float> *d_plan, int blksize); template int cudeconvolve3d<double>(cufinufft_plan_t<double> *d_plan, int blksize); } // namespace deconvolve } // namespace cufinufft
07e5e5f07aeb0be770cfa954ad01ba07fb3fc09e.cu
#include <iomanip> #include <iostream> #include <cuComplex.h> #include <cuda.h> #include <helper_cuda.h> #include <cufinufft/cudeconvolve.h> namespace cufinufft { namespace deconvolve { /* Kernel for copying fw to fk with amplication by prefac/ker */ // Note: assume modeord=0: CMCL-compatible mode ordering in fk (from -N/2 up // to N/2-1) template <typename T> __global__ void deconvolve_1d(int ms, int nf1, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms; i += blockDim.x * gridDim.x) { int w1 = i - ms / 2 >= 0 ? i - ms / 2 : nf1 + i - ms / 2; T kervalue = fwkerhalf1[abs(i - ms / 2)]; fk[i].x = fw[w1].x / kervalue; fk[i].y = fw[w1].y / kervalue; } } template <typename T> __global__ void deconvolve_2d(int ms, int mt, int nf1, int nf2, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1, T *fwkerhalf2) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms * mt; i += blockDim.x * gridDim.x) { int k1 = i % ms; int k2 = i / ms; int outidx = k1 + k2 * ms; int w1 = k1 - ms / 2 >= 0 ? k1 - ms / 2 : nf1 + k1 - ms / 2; int w2 = k2 - mt / 2 >= 0 ? k2 - mt / 2 : nf2 + k2 - mt / 2; int inidx = w1 + w2 * nf1; T kervalue = fwkerhalf1[abs(k1 - ms / 2)] * fwkerhalf2[abs(k2 - mt / 2)]; fk[outidx].x = fw[inidx].x / kervalue; fk[outidx].y = fw[inidx].y / kervalue; } } template <typename T> __global__ void deconvolve_3d(int ms, int mt, int mu, int nf1, int nf2, int nf3, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1, T *fwkerhalf2, T *fwkerhalf3) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms * mt * mu; i += blockDim.x * gridDim.x) { int k1 = i % ms; int k2 = (i / ms) % mt; int k3 = (i / ms / mt); int outidx = k1 + k2 * ms + k3 * ms * mt; int w1 = k1 - ms / 2 >= 0 ? k1 - ms / 2 : nf1 + k1 - ms / 2; int w2 = k2 - mt / 2 >= 0 ? k2 - mt / 2 : nf2 + k2 - mt / 2; int w3 = k3 - mu / 2 >= 0 ? k3 - mu / 2 : nf3 + k3 - mu / 2; int inidx = w1 + w2 * nf1 + w3 * nf1 * nf2; T kervalue = fwkerhalf1[abs(k1 - ms / 2)] * fwkerhalf2[abs(k2 - mt / 2)] * fwkerhalf3[abs(k3 - mu / 2)]; fk[outidx].x = fw[inidx].x / kervalue; fk[outidx].y = fw[inidx].y / kervalue; } } /* Kernel for copying fk to fw with same amplication */ template <typename T> __global__ void amplify_1d(int ms, int nf1, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms; i += blockDim.x * gridDim.x) { int w1 = i - ms / 2 >= 0 ? i - ms / 2 : nf1 + i - ms / 2; T kervalue = fwkerhalf1[abs(i - ms / 2)]; fw[w1].x = fk[i].x / kervalue; fw[w1].y = fk[i].y / kervalue; } } template <typename T> __global__ void amplify_2d(int ms, int mt, int nf1, int nf2, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1, T *fwkerhalf2) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms * mt; i += blockDim.x * gridDim.x) { int k1 = i % ms; int k2 = i / ms; int inidx = k1 + k2 * ms; int w1 = k1 - ms / 2 >= 0 ? k1 - ms / 2 : nf1 + k1 - ms / 2; int w2 = k2 - mt / 2 >= 0 ? k2 - mt / 2 : nf2 + k2 - mt / 2; int outidx = w1 + w2 * nf1; T kervalue = fwkerhalf1[abs(k1 - ms / 2)] * fwkerhalf2[abs(k2 - mt / 2)]; fw[outidx].x = fk[inidx].x / kervalue; fw[outidx].y = fk[inidx].y / kervalue; } } template <typename T> __global__ void amplify_3d(int ms, int mt, int mu, int nf1, int nf2, int nf3, cuda_complex<T> *fw, cuda_complex<T> *fk, T *fwkerhalf1, T *fwkerhalf2, T *fwkerhalf3) { for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < ms * mt * mu; i += blockDim.x * gridDim.x) { int k1 = i % ms; int k2 = (i / ms) % mt; int k3 = (i / ms / mt); int inidx = k1 + k2 * ms + k3 * ms * mt; int w1 = k1 - ms / 2 >= 0 ? k1 - ms / 2 : nf1 + k1 - ms / 2; int w2 = k2 - mt / 2 >= 0 ? k2 - mt / 2 : nf2 + k2 - mt / 2; int w3 = k3 - mu / 2 >= 0 ? k3 - mu / 2 : nf3 + k3 - mu / 2; int outidx = w1 + w2 * nf1 + w3 * nf1 * nf2; T kervalue = fwkerhalf1[abs(k1 - ms / 2)] * fwkerhalf2[abs(k2 - mt / 2)] * fwkerhalf3[abs(k3 - mu / 2)]; fw[outidx].x = fk[inidx].x / kervalue; fw[outidx].y = fk[inidx].y / kervalue; } } template <typename T> int cudeconvolve1d(cufinufft_plan_t<T> *d_plan, int blksize) /* wrapper for deconvolution & amplication in 1D. Melody Shih 11/21/21 */ { int ms = d_plan->ms; int nf1 = d_plan->nf1; int nmodes = ms; int maxbatchsize = d_plan->maxbatchsize; if (d_plan->spopts.spread_direction == 1) { for (int t = 0; t < blksize; t++) { deconvolve_1d<<<(nmodes + 256 - 1) / 256, 256>>>(ms, nf1, d_plan->fw + t * nf1, d_plan->fk + t * nmodes, d_plan->fwkerhalf1); } } else { checkCudaErrors(cudaMemset(d_plan->fw, 0, maxbatchsize * nf1 * sizeof(cuda_complex<T>))); for (int t = 0; t < blksize; t++) { amplify_1d<<<(nmodes + 256 - 1) / 256, 256>>>(ms, nf1, d_plan->fw + t * nf1, d_plan->fk + t * nmodes, d_plan->fwkerhalf1); } } return 0; } template <typename T> int cudeconvolve2d(cufinufft_plan_t<T> *d_plan, int blksize) /* wrapper for deconvolution & amplication in 2D. Melody Shih 07/25/19 */ { int ms = d_plan->ms; int mt = d_plan->mt; int nf1 = d_plan->nf1; int nf2 = d_plan->nf2; int nmodes = ms * mt; int maxbatchsize = d_plan->maxbatchsize; if (d_plan->spopts.spread_direction == 1) { for (int t = 0; t < blksize; t++) { deconvolve_2d<<<(nmodes + 256 - 1) / 256, 256>>>(ms, mt, nf1, nf2, d_plan->fw + t * nf1 * nf2, d_plan->fk + t * nmodes, d_plan->fwkerhalf1, d_plan->fwkerhalf2); } } else { checkCudaErrors(cudaMemset(d_plan->fw, 0, maxbatchsize * nf1 * nf2 * sizeof(cuda_complex<T>))); for (int t = 0; t < blksize; t++) { amplify_2d<<<(nmodes + 256 - 1) / 256, 256>>>(ms, mt, nf1, nf2, d_plan->fw + t * nf1 * nf2, d_plan->fk + t * nmodes, d_plan->fwkerhalf1, d_plan->fwkerhalf2); } } return 0; } template <typename T> int cudeconvolve3d(cufinufft_plan_t<T> *d_plan, int blksize) /* wrapper for deconvolution & amplication in 3D. Melody Shih 07/25/19 */ { int ms = d_plan->ms; int mt = d_plan->mt; int mu = d_plan->mu; int nf1 = d_plan->nf1; int nf2 = d_plan->nf2; int nf3 = d_plan->nf3; int nmodes = ms * mt * mu; int maxbatchsize = d_plan->maxbatchsize; if (d_plan->spopts.spread_direction == 1) { for (int t = 0; t < blksize; t++) { deconvolve_3d<<<(nmodes + 256 - 1) / 256, 256>>>( ms, mt, mu, nf1, nf2, nf3, d_plan->fw + t * nf1 * nf2 * nf3, d_plan->fk + t * nmodes, d_plan->fwkerhalf1, d_plan->fwkerhalf2, d_plan->fwkerhalf3); } } else { checkCudaErrors(cudaMemset(d_plan->fw, 0, maxbatchsize * nf1 * nf2 * nf3 * sizeof(cuda_complex<T>))); for (int t = 0; t < blksize; t++) { amplify_3d<<<(nmodes + 256 - 1) / 256, 256>>>(ms, mt, mu, nf1, nf2, nf3, d_plan->fw + t * nf1 * nf2 * nf3, d_plan->fk + t * nmodes, d_plan->fwkerhalf1, d_plan->fwkerhalf2, d_plan->fwkerhalf3); } } return 0; } template int cudeconvolve1d<float>(cufinufft_plan_t<float> *d_plan, int blksize); template int cudeconvolve1d<double>(cufinufft_plan_t<double> *d_plan, int blksize); template int cudeconvolve2d<float>(cufinufft_plan_t<float> *d_plan, int blksize); template int cudeconvolve2d<double>(cufinufft_plan_t<double> *d_plan, int blksize); template int cudeconvolve3d<float>(cufinufft_plan_t<float> *d_plan, int blksize); template int cudeconvolve3d<double>(cufinufft_plan_t<double> *d_plan, int blksize); } // namespace deconvolve } // namespace cufinufft
c22ec4718686b816301c9531ba72c6e298b893bd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<math.h> #include "algebra.h" namespace Algebra{ __global__ void filter_real(int N,const std::complex<double> *inArray, double *outArray){ int i = blockIdx.x; if (fabs(inArray[i].imag()) < TOLERANCE) { outArray[i] = inArray[i].real(); } } int cuda_FilterRealNumbers(int numComplexValues, const std::complex<double> inArray[], double outArray[]){ std::complex<double> *a; double *b; int N=numComplexValues; hipMalloc(&a,N*sizeof(std::complex<double>)); hipMalloc(&b,N*sizeof(double)); hipMemcpy(a, inArray, (N)*sizeof(std::complex<double>), hipMemcpyHostToDevice); hipLaunchKernelGGL(( filter_real), dim3(N),dim3(1), 0, 0, N,a,b); hipMemcpy(outArray, b, (N)*sizeof(double), hipMemcpyDeviceToHost); hipFree(a); hipFree(b); return N; } }
c22ec4718686b816301c9531ba72c6e298b893bd.cu
#include<stdio.h> #include<math.h> #include "algebra.h" namespace Algebra{ __global__ void filter_real(int N,const std::complex<double> *inArray, double *outArray){ int i = blockIdx.x; if (fabs(inArray[i].imag()) < TOLERANCE) { outArray[i] = inArray[i].real(); } } int cuda_FilterRealNumbers(int numComplexValues, const std::complex<double> inArray[], double outArray[]){ std::complex<double> *a; double *b; int N=numComplexValues; cudaMalloc(&a,N*sizeof(std::complex<double>)); cudaMalloc(&b,N*sizeof(double)); cudaMemcpy(a, inArray, (N)*sizeof(std::complex<double>), cudaMemcpyHostToDevice); filter_real<<<N,1>>>(N,a,b); cudaMemcpy(outArray, b, (N)*sizeof(double), cudaMemcpyDeviceToHost); cudaFree(a); cudaFree(b); return N; } }
f08bd1db35aec217c61035aed2b786e4d22e4993.hip
// !!! This is a file automatically generated by hipify!!! // // Created by Jacob Austin on 5/17/18. // #include "mass.h" namespace titan { Mass::Mass() { m = 1.0; T = 0; valid = true; arrayptr = nullptr; ref_count = 0; og_idx = -1; // idx is negative, as in not set #ifdef GRAPHICS color = Vec(1.0, 0.2, 0.2); #endif } // constructor TODO fix timing void Mass::operator=(CUDA_MASS & mass) { m = mass.m; T = mass.T; pos = mass.pos; vel = mass.vel; valid = mass.valid; rad = mass.rad; // magnet_sphere radius stiffness = mass.stiffness; max_mag_force = mass.max_mag_force; // maximum pull force excerted by the magnet mag_scale_factor = mass.mag_scale_factor; // scales susceptibility to magnetic flux constraints.fixed = mass.constraints.fixed; acc = mass.acc; extern_force = mass.extern_force; og_idx = mass.og_idx; ref_count = this -> ref_count; arrayptr = this -> arrayptr; #ifdef CONSTRAINTS constraints = this -> constraints; #endif #ifdef GRAPHICS color = mass.color; #endif } Mass::Mass(const Vec & position, double mass, bool fixed, double radius, double mag_k, double maximum_magnet_force, double magnet_scale_factor) { m = mass; pos = position; rad = radius; stiffness = mag_k; max_mag_force = maximum_magnet_force; mag_scale_factor = magnet_scale_factor; constraints.fixed = fixed; T = 0; valid = true; arrayptr = nullptr; ref_count = 0; #ifdef GRAPHICS color = Vec(1.0, 0.2, 0.2); #endif } CUDA_MASS::CUDA_MASS(Mass &mass) { m = mass.m; T = mass.T; pos = mass.pos; vel = mass.vel; extern_force = mass.extern_force; og_idx = mass.og_idx; rad = mass.rad; stiffness = mass.stiffness; // spring constant of the magnet shell max_mag_force = mass.max_mag_force; mag_scale_factor = mass.mag_scale_factor; constraints.fixed = mass.constraints.fixed; valid = true; #ifdef CONSTRAINTS constraints = CUDA_LOCAL_CONSTRAINTS(mass.constraints); #endif #ifdef GRAPHICS color = mass.color; #endif } #ifdef CONSTRAINTS void Mass::addConstraint(CONSTRAINT_TYPE type, const Vec & vec, double num) { // TODO make this more efficient if (type == 0) { this -> constraints.constraint_plane.push_back(CudaConstraintPlane(vec, num)); this -> constraints.num_constraint_planes++; this -> constraints.constraint_plane_ptr = thrust::raw_pointer_cast(constraints.constraint_plane.data()); } else if (type == 1) { this -> constraints.contact_plane.push_back(CudaContactPlane(vec, num)); this -> constraints.num_contact_planes++; this -> constraints.contact_plane_ptr = thrust::raw_pointer_cast(constraints.contact_plane.data()); } else if (type == 2) { this -> constraints.ball.push_back(CudaBall(vec, num)); this -> constraints.num_balls++; this -> constraints.ball_ptr = thrust::raw_pointer_cast(constraints.ball.data()); } else if (type == 3) { this -> constraints.direction.push_back(CudaDirection(vec, num)); this -> constraints.num_directions++; this -> constraints.direction_ptr = thrust::raw_pointer_cast(constraints.direction.data()); } } void Mass::clearConstraints(CONSTRAINT_TYPE type) { if (type == 0) { this -> constraints.constraint_plane.clear(); this -> constraints.constraint_plane.shrink_to_fit(); this -> constraints.num_constraint_planes = 0; } else if (type == 1) { this -> constraints.contact_plane.clear(); this -> constraints.contact_plane.shrink_to_fit(); this -> constraints.num_contact_planes = 0; } else if (type == 2) { this -> constraints.ball.clear(); this -> constraints.ball.shrink_to_fit(); this -> constraints.num_balls = 0; } else if (type == 3) { this -> constraints.direction.clear(); this -> constraints.direction.shrink_to_fit(); this -> constraints.num_directions = 0; } } void Mass::clearConstraints() { clearConstraints(CONSTRAINT_PLANE); clearConstraints(CONTACT_PLANE); clearConstraints(DIRECTION); clearConstraints(BALL); } void Mass::fix() { this -> constraints.fixed = true; } void Mass::unfix() { this -> constraints.fixed = false; } void Mass::setDrag(double C) { this -> constraints.drag_coefficient = C; } #endif void Mass::decrementRefCount() { if (--ref_count == 0) { if (arrayptr) { hipFree(arrayptr); } delete this; } } } // namespace titan
f08bd1db35aec217c61035aed2b786e4d22e4993.cu
// // Created by Jacob Austin on 5/17/18. // #include "mass.h" namespace titan { Mass::Mass() { m = 1.0; T = 0; valid = true; arrayptr = nullptr; ref_count = 0; og_idx = -1; // idx is negative, as in not set #ifdef GRAPHICS color = Vec(1.0, 0.2, 0.2); #endif } // constructor TODO fix timing void Mass::operator=(CUDA_MASS & mass) { m = mass.m; T = mass.T; pos = mass.pos; vel = mass.vel; valid = mass.valid; rad = mass.rad; // magnet_sphere radius stiffness = mass.stiffness; max_mag_force = mass.max_mag_force; // maximum pull force excerted by the magnet mag_scale_factor = mass.mag_scale_factor; // scales susceptibility to magnetic flux constraints.fixed = mass.constraints.fixed; acc = mass.acc; extern_force = mass.extern_force; og_idx = mass.og_idx; ref_count = this -> ref_count; arrayptr = this -> arrayptr; #ifdef CONSTRAINTS constraints = this -> constraints; #endif #ifdef GRAPHICS color = mass.color; #endif } Mass::Mass(const Vec & position, double mass, bool fixed, double radius, double mag_k, double maximum_magnet_force, double magnet_scale_factor) { m = mass; pos = position; rad = radius; stiffness = mag_k; max_mag_force = maximum_magnet_force; mag_scale_factor = magnet_scale_factor; constraints.fixed = fixed; T = 0; valid = true; arrayptr = nullptr; ref_count = 0; #ifdef GRAPHICS color = Vec(1.0, 0.2, 0.2); #endif } CUDA_MASS::CUDA_MASS(Mass &mass) { m = mass.m; T = mass.T; pos = mass.pos; vel = mass.vel; extern_force = mass.extern_force; og_idx = mass.og_idx; rad = mass.rad; stiffness = mass.stiffness; // spring constant of the magnet shell max_mag_force = mass.max_mag_force; mag_scale_factor = mass.mag_scale_factor; constraints.fixed = mass.constraints.fixed; valid = true; #ifdef CONSTRAINTS constraints = CUDA_LOCAL_CONSTRAINTS(mass.constraints); #endif #ifdef GRAPHICS color = mass.color; #endif } #ifdef CONSTRAINTS void Mass::addConstraint(CONSTRAINT_TYPE type, const Vec & vec, double num) { // TODO make this more efficient if (type == 0) { this -> constraints.constraint_plane.push_back(CudaConstraintPlane(vec, num)); this -> constraints.num_constraint_planes++; this -> constraints.constraint_plane_ptr = thrust::raw_pointer_cast(constraints.constraint_plane.data()); } else if (type == 1) { this -> constraints.contact_plane.push_back(CudaContactPlane(vec, num)); this -> constraints.num_contact_planes++; this -> constraints.contact_plane_ptr = thrust::raw_pointer_cast(constraints.contact_plane.data()); } else if (type == 2) { this -> constraints.ball.push_back(CudaBall(vec, num)); this -> constraints.num_balls++; this -> constraints.ball_ptr = thrust::raw_pointer_cast(constraints.ball.data()); } else if (type == 3) { this -> constraints.direction.push_back(CudaDirection(vec, num)); this -> constraints.num_directions++; this -> constraints.direction_ptr = thrust::raw_pointer_cast(constraints.direction.data()); } } void Mass::clearConstraints(CONSTRAINT_TYPE type) { if (type == 0) { this -> constraints.constraint_plane.clear(); this -> constraints.constraint_plane.shrink_to_fit(); this -> constraints.num_constraint_planes = 0; } else if (type == 1) { this -> constraints.contact_plane.clear(); this -> constraints.contact_plane.shrink_to_fit(); this -> constraints.num_contact_planes = 0; } else if (type == 2) { this -> constraints.ball.clear(); this -> constraints.ball.shrink_to_fit(); this -> constraints.num_balls = 0; } else if (type == 3) { this -> constraints.direction.clear(); this -> constraints.direction.shrink_to_fit(); this -> constraints.num_directions = 0; } } void Mass::clearConstraints() { clearConstraints(CONSTRAINT_PLANE); clearConstraints(CONTACT_PLANE); clearConstraints(DIRECTION); clearConstraints(BALL); } void Mass::fix() { this -> constraints.fixed = true; } void Mass::unfix() { this -> constraints.fixed = false; } void Mass::setDrag(double C) { this -> constraints.drag_coefficient = C; } #endif void Mass::decrementRefCount() { if (--ref_count == 0) { if (arrayptr) { cudaFree(arrayptr); } delete this; } } } // namespace titan
e32bc59783a09a9f7252d7708a98ec92eb937730.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_ROCM template<typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } #endif // USE_ROCM template<typename Dtype> void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; default: { LOG(FATAL)<< "Unknown elementwise operation."; } } #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: { greentea_gpu_mul<Dtype>(this->device_context_->id(), count, (cl_mem)(bottom[0]->gpu_data()), 0, (cl_mem)(bottom[1]->gpu_data()), 0, (cl_mem)top_data, 0); for (int i = 2; i < bottom.size(); ++i) { greentea_gpu_mul<Dtype>(this->device_context_->id(), count, (cl_mem)top_data, 0, (cl_mem)(bottom[i]->gpu_data()), 0, (cl_mem)top_data, 0); } } break; case EltwiseParameter_EltwiseOp_SUM: { greentea_gpu_set<Dtype>(this->device_context_->id(), count, 0, (cl_mem)top_data, 0); for (int i = 0; i < bottom.size(); ++i) { greentea_gpu_axpy<Dtype>(this->device_context_->id(), count, coeffs_[i], (cl_mem)(bottom[i]->gpu_data()), 0, (cl_mem)top_data, 0); } } break; case EltwiseParameter_EltwiseOp_MAX: { mask = max_idx_.mutable_gpu_data(); viennacl::ocl::kernel &oclk_max_forward = program.get_kernel( CL_KERNEL_SELECT("eltwise_max_forward")); viennacl::ocl::enqueue( oclk_max_forward(count, WrapHandle((cl_mem)(bottom[0]->gpu_data()), &ctx), WrapHandle((cl_mem)(bottom[1]->gpu_data()), &ctx), 0, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)mask, &ctx)), ctx.get_queue()); for (int i = 2; i < bottom.size(); ++i) { viennacl::ocl::enqueue( oclk_max_forward(count, WrapHandle((cl_mem)(top_data), &ctx), WrapHandle((cl_mem)(bottom[i]->gpu_data()), &ctx), i-1, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)mask, &ctx)), ctx.get_queue()); } } break; default: { LOG(FATAL)<< "Unknown elementwise operation."; } } #endif // USE_GREENTEA } } #ifdef USE_ROCM template<typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } #endif // USE_ROCM template<typename Dtype> void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_ROCM for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { caffe_copy(count, top_diff, bottom_diff); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_diff, i, mask, bottom_diff); break; default: { LOG(FATAL)<< "Unknown elementwise operation."; } } } } #endif // USE_ROCM } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: { if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { greentea_copy<Dtype>(count, (cl_mem)(bottom[j]->gpu_data()), 0, (cl_mem)(bottom_diff), 0, &ctx); initialized = true; } else { greentea_gpu_mul<Dtype>(this->device_context_->id(), count, (cl_mem)bottom[j]->gpu_data(), 0, (cl_mem)bottom_diff, 0, (cl_mem)bottom_diff, 0); } } } else { greentea_gpu_div<Dtype>(this->device_context_->id(), count, (cl_mem)top_data, 0, (cl_mem)bottom_data, 0, (cl_mem)bottom_diff, 0); } greentea_gpu_mul<Dtype>(this->device_context_->id(), count, (cl_mem)bottom_diff, 0, (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0); } break; case EltwiseParameter_EltwiseOp_SUM: { if (coeffs_[i] == Dtype(1.)) { greentea_copy<Dtype>(count, (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0, &ctx); } else { greentea_gpu_scale<Dtype>(this->device_context_->id(), count, coeffs_[i], (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0); } } break; case EltwiseParameter_EltwiseOp_MAX: { mask = max_idx_.gpu_data(); viennacl::ocl::kernel &oclk_max_backward = program.get_kernel( CL_KERNEL_SELECT("eltwise_max_backward")); viennacl::ocl::enqueue( oclk_max_backward(count, WrapHandle((cl_mem)top_diff, &ctx), i, WrapHandle((cl_mem)mask, &ctx), WrapHandle((cl_mem)bottom_diff, &ctx)), ctx.get_queue()); } break; default: { LOG(FATAL)<< "Unknown elementwise operation."; } } } } #endif } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer); } // namespace caffe
e32bc59783a09a9f7252d7708a98ec92eb937730.cu
#include <cfloat> #include <vector> #include "caffe/layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" #ifdef USE_GREENTEA #include "caffe/greentea/greentea.hpp" #include "caffe/greentea/greentea_math_functions.hpp" #endif namespace caffe { #ifdef USE_CUDA template<typename Dtype> __global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b, const int blob_idx, Dtype* top_data, int* mask) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype maxval = -FLT_MAX; int maxidx = -1; if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; maxidx = blob_idx; mask[index] = maxidx; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; maxidx = blob_idx + 1; mask[index] = maxidx; } } } #endif // USE_CUDA template<typename Dtype> void EltwiseLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { int* mask = NULL; const int count = top[0]->count(); Dtype* top_data = top[0]->mutable_gpu_data(); if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, Dtype(0.), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.mutable_gpu_data(); // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data, mask); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<Dtype> CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_data, bottom[i]->gpu_data(), i-1, top_data, mask); } break; default: { LOG(FATAL)<< "Unknown elementwise operation."; } } #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: { greentea_gpu_mul<Dtype>(this->device_context_->id(), count, (cl_mem)(bottom[0]->gpu_data()), 0, (cl_mem)(bottom[1]->gpu_data()), 0, (cl_mem)top_data, 0); for (int i = 2; i < bottom.size(); ++i) { greentea_gpu_mul<Dtype>(this->device_context_->id(), count, (cl_mem)top_data, 0, (cl_mem)(bottom[i]->gpu_data()), 0, (cl_mem)top_data, 0); } } break; case EltwiseParameter_EltwiseOp_SUM: { greentea_gpu_set<Dtype>(this->device_context_->id(), count, 0, (cl_mem)top_data, 0); for (int i = 0; i < bottom.size(); ++i) { greentea_gpu_axpy<Dtype>(this->device_context_->id(), count, coeffs_[i], (cl_mem)(bottom[i]->gpu_data()), 0, (cl_mem)top_data, 0); } } break; case EltwiseParameter_EltwiseOp_MAX: { mask = max_idx_.mutable_gpu_data(); viennacl::ocl::kernel &oclk_max_forward = program.get_kernel( CL_KERNEL_SELECT("eltwise_max_forward")); viennacl::ocl::enqueue( oclk_max_forward(count, WrapHandle((cl_mem)(bottom[0]->gpu_data()), &ctx), WrapHandle((cl_mem)(bottom[1]->gpu_data()), &ctx), 0, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)mask, &ctx)), ctx.get_queue()); for (int i = 2; i < bottom.size(); ++i) { viennacl::ocl::enqueue( oclk_max_forward(count, WrapHandle((cl_mem)(top_data), &ctx), WrapHandle((cl_mem)(bottom[i]->gpu_data()), &ctx), i-1, WrapHandle((cl_mem)top_data, &ctx), WrapHandle((cl_mem)mask, &ctx)), ctx.get_queue()); } } break; default: { LOG(FATAL)<< "Unknown elementwise operation."; } } #endif // USE_GREENTEA } } #ifdef USE_CUDA template<typename Dtype> __global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const int blob_idx, const int* mask, Dtype* bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { Dtype gradient = 0; if (mask[index] == blob_idx) { gradient += top_diff[index]; } bottom_diff[index] = gradient; } } #endif // USE_CUDA template<typename Dtype> void EltwiseLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const int* mask = NULL; const int count = top[0]->count(); const Dtype* top_data = top[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); if (this->device_context_->backend() == BACKEND_CUDA) { #ifdef USE_CUDA for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { caffe_copy(count, bottom[j]->gpu_data(), bottom_diff); initialized = true; } else { caffe_gpu_mul(count, bottom[j]->gpu_data(), bottom_diff, bottom_diff); } } } else { caffe_gpu_div(count, top_data, bottom_data, bottom_diff); } caffe_gpu_mul(count, bottom_diff, top_diff, bottom_diff); break; case EltwiseParameter_EltwiseOp_SUM: if (coeffs_[i] == Dtype(1.)) { caffe_copy(count, top_diff, bottom_diff); } else { caffe_gpu_scale(count, coeffs_[i], top_diff, bottom_diff); } break; case EltwiseParameter_EltwiseOp_MAX: mask = max_idx_.gpu_data(); MaxBackward<Dtype> // NOLINT_NEXT_LINE(whitespace/operators) CUDA_KERNEL(CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS)( count, top_diff, i, mask, bottom_diff); break; default: { LOG(FATAL)<< "Unknown elementwise operation."; } } } } #endif // USE_CUDA } else { #ifdef USE_GREENTEA viennacl::ocl::context &ctx = viennacl::ocl::get_context( this->device_context_->id()); viennacl::ocl::program &program = Caffe::Get().GetDeviceProgram( this->device_context_->id()); for (int i = 0; i < bottom.size(); ++i) { if (propagate_down[i]) { const Dtype* bottom_data = bottom[i]->gpu_data(); Dtype* bottom_diff = bottom[i]->mutable_gpu_diff(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: { if (stable_prod_grad_) { bool initialized = false; for (int j = 0; j < bottom.size(); ++j) { if (i == j) { continue; } if (!initialized) { greentea_copy<Dtype>(count, (cl_mem)(bottom[j]->gpu_data()), 0, (cl_mem)(bottom_diff), 0, &ctx); initialized = true; } else { greentea_gpu_mul<Dtype>(this->device_context_->id(), count, (cl_mem)bottom[j]->gpu_data(), 0, (cl_mem)bottom_diff, 0, (cl_mem)bottom_diff, 0); } } } else { greentea_gpu_div<Dtype>(this->device_context_->id(), count, (cl_mem)top_data, 0, (cl_mem)bottom_data, 0, (cl_mem)bottom_diff, 0); } greentea_gpu_mul<Dtype>(this->device_context_->id(), count, (cl_mem)bottom_diff, 0, (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0); } break; case EltwiseParameter_EltwiseOp_SUM: { if (coeffs_[i] == Dtype(1.)) { greentea_copy<Dtype>(count, (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0, &ctx); } else { greentea_gpu_scale<Dtype>(this->device_context_->id(), count, coeffs_[i], (cl_mem)top_diff, 0, (cl_mem)bottom_diff, 0); } } break; case EltwiseParameter_EltwiseOp_MAX: { mask = max_idx_.gpu_data(); viennacl::ocl::kernel &oclk_max_backward = program.get_kernel( CL_KERNEL_SELECT("eltwise_max_backward")); viennacl::ocl::enqueue( oclk_max_backward(count, WrapHandle((cl_mem)top_diff, &ctx), i, WrapHandle((cl_mem)mask, &ctx), WrapHandle((cl_mem)bottom_diff, &ctx)), ctx.get_queue()); } break; default: { LOG(FATAL)<< "Unknown elementwise operation."; } } } } #endif } } INSTANTIATE_LAYER_GPU_FUNCS(EltwiseLayer); } // namespace caffe
91950714dbfcdd5c7c3dad32016c87925243119a.hip
// !!! This is a file automatically generated by hipify!!! /* thrust::device_vector<float> td_A(nr_rows_A * nr_cols_A), td_B(nr_rows_B * nr_cols_B), td_C(nr_rows_C * nr_cols_C); float *h_A = (float *)malloc(nr_rows_A * nr_cols_A * sizeof(float)); float *h_B = (float *)malloc(nr_rows_B * nr_cols_B * sizeof(float)); float *h_C = (float *)malloc(nr_rows_C * nr_cols_C * sizeof(float)); // Allocate 3 arrays on GPU float *d_A, *d_B, *d_C; hipMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(float)); hipMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(float)); hipMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(float)); // Fill the arrays A and B on GPU with random numbers GPU_fill_rand(d_A, nr_rows_A, nr_cols_A); GPU_fill_rand(d_B, nr_rows_B, nr_cols_B); GPU_fill_rand(thrust::raw_pointer_cast(&td_A[0]), nr_rows_A, nr_cols_A); GPU_fill_rand(thrust::raw_pointer_cast(&td_B[0]), nr_rows_B, nr_cols_B); */ /* // Optionally we can print the data std::cout << "A =" << std::endl; print_matrix(td_A, nr_rows_A, nr_cols_A); std::cout << "B =" << std::endl; print_matrix(td_B, nr_rows_B, nr_cols_B); // Optionally we can copy the data back on CPU and print the arrays hipMemcpy(h_A,thrust::raw_pointer_cast(&td_A[0]),nr_rows_A * nr_cols_A * sizeof(float),hipMemcpyDeviceToHost); hipMemcpy(h_B,thrust::raw_pointer_cast(&td_B[0]),nr_rows_B * nr_cols_B * sizeof(float),hipMemcpyDeviceToHost); std::cout << "A =" << std::endl; print_matrix(h_A, nr_rows_A, nr_cols_A); std::cout << "B =" << std::endl; print_matrix(h_B, nr_rows_B, nr_cols_B); */ /* //Print the result std::cout << "C =" << std::endl; print_matrix(td_C, nr_rows_C, nr_cols_C); // Copy (and print) the result on host memory hipMemcpy(h_C,thrust::raw_pointer_cast(&td_C[0]),nr_rows_C * nr_cols_C * sizeof(float),hipMemcpyDeviceToHost); std::cout << "C =" << std::endl; print_matrix(h_C, nr_rows_C, nr_cols_C); //Free GPU memory hipFree(d_A); hipFree(d_B); hipFree(d_C); // Free CPU memory free(h_A); free(h_B); free(h_C); */ // Multiply A and B on GPU //gpu_blas_mmul(handle, tmA.devicePointer(), tmB.devicePointer(), tmC.devicePointer(), tmA.numRows(), tmA.numColumns(), tmB.numColumns()); //gpu_blas_mmul(handle, thrust::raw_pointer_cast(&td_A[0]), thrust::raw_pointer_cast(&td_A[0]), thrust::raw_pointer_cast(&td_C[0]), nr_rows_A, nr_cols_A, nr_cols_B); /* float constalpha = 1; float constbeta = 0; unsigned int newChunk = 4, oldChunk = 6, size = 5; hipblasSgeam(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, newChunk, size, &constalpha, thrust::raw_pointer_cast(&W1[0]), oldChunk, &constbeta, thrust::raw_pointer_cast(&W1[0]), oldChunk, thrust::raw_pointer_cast(&W2[0]), newChunk); */ /* // summing up columns thrust::device_vector<float> x(M); thrust::fill(x.begin(), x.end(), 1); hipblasSgemv(handle, HIPBLAS_OP_N, tmB.numRows(), tmB.numColumns(), &alpha, tmB.devicePointer(), tmB.numRows(), thrust::raw_pointer_cast(&x[0]), 1, &beta, thrust::raw_pointer_cast(&y[0]), 1); */ //tmC.multiplyByConstant(10.0); //tmC.printBlasMajor(); // correct p //tmC.printRowMajor(); /* //C = alpha*op(A)*op(B) + beta*C void matrixMatrixMultiply( hipblasHandle_t &handle, float alpha, hipblasOperation_t operationOnA, ThrustMatrix &A, hipblasOperation_t operationOnB, ThrustMatrix &B, float beta, ThrustMatrix &C ) { if (A.numColumns() != B.numRows()) { cout << "k does not match for matrix A and B, exiting\n"; return; } if (beta !=0 and !(C.numRows() == A.numRows() and C.numColumns() == B.numColumns())) { cout << "size mismatch in C, exiting\n"; return; } // hipblasSgemm(hipblasHandle_t handle, hipblasOperation_t transa, hipblasOperation_t transb, int m, int n, int k, // float *alpha, float *A, int lda, float *B, int ldb, float *beta, float *C, int ldc) unsigned int m = A.numRows(), n = B.numColumns(), k = A.numColumns(); unsigned int &lda = m, &ldb = k, &ldc = m; if (operationOnA == HIPBLAS_OP_T) { m = A.numColumns(); k = A.numRows(); } if (operationOnB == HIPBLAS_OP_T) { m = A.numRows(); n = B.numRows(); } //if (beta == 0) C.resize(m, n); hipblasSgemm(handle, operationOnA, operationOnB, m, n, k, &alpha, A.devicePointer(), lda, B.devicePointer(), ldb, &beta, C.devicePointer(), ldc); } */ /* void matrixVectorMultiply( hipblasHandle_t &handle, float alpha, hipblasOperation_t operationOnA, ThrustMatrix &A, float beta, thrust::device_vector<float> &x ) { hipblasSgemv(handle, operationOnA, A.numRows(), A.numColumns(), &alpha, A.devicePointer(), A.numRows(), thrust::raw_pointer_cast(x.data()), 1, &beta, float *y, int incy) } hipblasStatus_t hipblasSgemv(hipblasHandle_t handle, hipblasOperation_t trans, int m, int n, const float *alpha, const float *A, int lda, const float *x, int incx, const float *beta, float *y, int incy) y = alpha*op(A)*x + beta*y; */ /* //cout << "A in row major =" << endl; //tmA.printRowMajor(); //cout << "A in col major (what blas sees) =" << endl; //tmA.printBlasMajor(); //cout << "B in col major =" << endl; //tmB.printBlasMajor(); float alpha = 1.0/M, beta = 0; //int m = ; n; k; hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_T, tmA.numRows(), tmB.numRows(), tmA.numColumns(), &alpha, tmA.devicePointer(), tmA.numRows(), tmB.devicePointer(), tmB.numRows(), &beta, tmC.devicePointer(), tmA.numRows()); */ /* unsigned int oldchunkSize = R+K-1; thrust::device_vector<float> U(ZTable.size() * oldchunkSize); // M is by definition equal to ZTable, or the list of data values, normalized thrust::counting_iterator<unsigned int> countBegin(0); thrust::counting_iterator<unsigned int> countEnd = countBegin + U.size(); // generate B for k=1 (root case) thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(countBegin, U.begin())), thrust::make_zip_iterator(thrust::make_tuple(countEnd, U.end())), RootCaseBSplineFunctor<float>(TTable, ZTable, oldchunkSize)); //printDeviceVector(U); print_matrix_rowMajor<float>(TTable, R+K); print_matrix_rowMajor<float>(U, oldchunkSize); thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(countBegin, U.begin(), U.begin()+1, V.begin())), thrust::make_zip_iterator(thrust::make_tuple(countEnd, U.end()-1, U.end(), V.end())), BSplineFunctor<T>(TTable, ZTable, oldchunkSize, tempK)); */ /* for (map<string, thrust::device_vector<float> >::iterator it = listOfDeviceVectors.begin(); it != listOfDeviceVectors.end(); ++it) { cout << it->first << ": " << it->second.size() << endl; } cout << "values for YPR199C:\t\t"; printDeviceVector<float>(listOfDeviceVectors["YPR199C"]); */
91950714dbfcdd5c7c3dad32016c87925243119a.cu
/* thrust::device_vector<float> td_A(nr_rows_A * nr_cols_A), td_B(nr_rows_B * nr_cols_B), td_C(nr_rows_C * nr_cols_C); float *h_A = (float *)malloc(nr_rows_A * nr_cols_A * sizeof(float)); float *h_B = (float *)malloc(nr_rows_B * nr_cols_B * sizeof(float)); float *h_C = (float *)malloc(nr_rows_C * nr_cols_C * sizeof(float)); // Allocate 3 arrays on GPU float *d_A, *d_B, *d_C; cudaMalloc(&d_A,nr_rows_A * nr_cols_A * sizeof(float)); cudaMalloc(&d_B,nr_rows_B * nr_cols_B * sizeof(float)); cudaMalloc(&d_C,nr_rows_C * nr_cols_C * sizeof(float)); // Fill the arrays A and B on GPU with random numbers GPU_fill_rand(d_A, nr_rows_A, nr_cols_A); GPU_fill_rand(d_B, nr_rows_B, nr_cols_B); GPU_fill_rand(thrust::raw_pointer_cast(&td_A[0]), nr_rows_A, nr_cols_A); GPU_fill_rand(thrust::raw_pointer_cast(&td_B[0]), nr_rows_B, nr_cols_B); */ /* // Optionally we can print the data std::cout << "A =" << std::endl; print_matrix(td_A, nr_rows_A, nr_cols_A); std::cout << "B =" << std::endl; print_matrix(td_B, nr_rows_B, nr_cols_B); // Optionally we can copy the data back on CPU and print the arrays cudaMemcpy(h_A,thrust::raw_pointer_cast(&td_A[0]),nr_rows_A * nr_cols_A * sizeof(float),cudaMemcpyDeviceToHost); cudaMemcpy(h_B,thrust::raw_pointer_cast(&td_B[0]),nr_rows_B * nr_cols_B * sizeof(float),cudaMemcpyDeviceToHost); std::cout << "A =" << std::endl; print_matrix(h_A, nr_rows_A, nr_cols_A); std::cout << "B =" << std::endl; print_matrix(h_B, nr_rows_B, nr_cols_B); */ /* //Print the result std::cout << "C =" << std::endl; print_matrix(td_C, nr_rows_C, nr_cols_C); // Copy (and print) the result on host memory cudaMemcpy(h_C,thrust::raw_pointer_cast(&td_C[0]),nr_rows_C * nr_cols_C * sizeof(float),cudaMemcpyDeviceToHost); std::cout << "C =" << std::endl; print_matrix(h_C, nr_rows_C, nr_cols_C); //Free GPU memory cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); // Free CPU memory free(h_A); free(h_B); free(h_C); */ // Multiply A and B on GPU //gpu_blas_mmul(handle, tmA.devicePointer(), tmB.devicePointer(), tmC.devicePointer(), tmA.numRows(), tmA.numColumns(), tmB.numColumns()); //gpu_blas_mmul(handle, thrust::raw_pointer_cast(&td_A[0]), thrust::raw_pointer_cast(&td_A[0]), thrust::raw_pointer_cast(&td_C[0]), nr_rows_A, nr_cols_A, nr_cols_B); /* float constalpha = 1; float constbeta = 0; unsigned int newChunk = 4, oldChunk = 6, size = 5; cublasSgeam(handle, CUBLAS_OP_N, CUBLAS_OP_N, newChunk, size, &constalpha, thrust::raw_pointer_cast(&W1[0]), oldChunk, &constbeta, thrust::raw_pointer_cast(&W1[0]), oldChunk, thrust::raw_pointer_cast(&W2[0]), newChunk); */ /* // summing up columns thrust::device_vector<float> x(M); thrust::fill(x.begin(), x.end(), 1); cublasSgemv(handle, CUBLAS_OP_N, tmB.numRows(), tmB.numColumns(), &alpha, tmB.devicePointer(), tmB.numRows(), thrust::raw_pointer_cast(&x[0]), 1, &beta, thrust::raw_pointer_cast(&y[0]), 1); */ //tmC.multiplyByConstant(10.0); //tmC.printBlasMajor(); // correct p //tmC.printRowMajor(); /* //C = alpha*op(A)*op(B) + beta*C void matrixMatrixMultiply( cublasHandle_t &handle, float alpha, cublasOperation_t operationOnA, ThrustMatrix &A, cublasOperation_t operationOnB, ThrustMatrix &B, float beta, ThrustMatrix &C ) { if (A.numColumns() != B.numRows()) { cout << "k does not match for matrix A and B, exiting\n"; return; } if (beta !=0 and !(C.numRows() == A.numRows() and C.numColumns() == B.numColumns())) { cout << "size mismatch in C, exiting\n"; return; } // cublasSgemm(cublasHandle_t handle, cublasOperation_t transa, cublasOperation_t transb, int m, int n, int k, // float *alpha, float *A, int lda, float *B, int ldb, float *beta, float *C, int ldc) unsigned int m = A.numRows(), n = B.numColumns(), k = A.numColumns(); unsigned int &lda = m, &ldb = k, &ldc = m; if (operationOnA == CUBLAS_OP_T) { m = A.numColumns(); k = A.numRows(); } if (operationOnB == CUBLAS_OP_T) { m = A.numRows(); n = B.numRows(); } //if (beta == 0) C.resize(m, n); cublasSgemm(handle, operationOnA, operationOnB, m, n, k, &alpha, A.devicePointer(), lda, B.devicePointer(), ldb, &beta, C.devicePointer(), ldc); } */ /* void matrixVectorMultiply( cublasHandle_t &handle, float alpha, cublasOperation_t operationOnA, ThrustMatrix &A, float beta, thrust::device_vector<float> &x ) { cublasSgemv(handle, operationOnA, A.numRows(), A.numColumns(), &alpha, A.devicePointer(), A.numRows(), thrust::raw_pointer_cast(x.data()), 1, &beta, float *y, int incy) } cublasStatus_t cublasSgemv(cublasHandle_t handle, cublasOperation_t trans, int m, int n, const float *alpha, const float *A, int lda, const float *x, int incx, const float *beta, float *y, int incy) y = alpha*op(A)*x + beta*y; */ /* //cout << "A in row major =" << endl; //tmA.printRowMajor(); //cout << "A in col major (what blas sees) =" << endl; //tmA.printBlasMajor(); //cout << "B in col major =" << endl; //tmB.printBlasMajor(); float alpha = 1.0/M, beta = 0; //int m = ; n; k; cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_T, tmA.numRows(), tmB.numRows(), tmA.numColumns(), &alpha, tmA.devicePointer(), tmA.numRows(), tmB.devicePointer(), tmB.numRows(), &beta, tmC.devicePointer(), tmA.numRows()); */ /* unsigned int oldchunkSize = R+K-1; thrust::device_vector<float> U(ZTable.size() * oldchunkSize); // M is by definition equal to ZTable, or the list of data values, normalized thrust::counting_iterator<unsigned int> countBegin(0); thrust::counting_iterator<unsigned int> countEnd = countBegin + U.size(); // generate B for k=1 (root case) thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(countBegin, U.begin())), thrust::make_zip_iterator(thrust::make_tuple(countEnd, U.end())), RootCaseBSplineFunctor<float>(TTable, ZTable, oldchunkSize)); //printDeviceVector(U); print_matrix_rowMajor<float>(TTable, R+K); print_matrix_rowMajor<float>(U, oldchunkSize); thrust::for_each(thrust::make_zip_iterator(thrust::make_tuple(countBegin, U.begin(), U.begin()+1, V.begin())), thrust::make_zip_iterator(thrust::make_tuple(countEnd, U.end()-1, U.end(), V.end())), BSplineFunctor<T>(TTable, ZTable, oldchunkSize, tempK)); */ /* for (map<string, thrust::device_vector<float> >::iterator it = listOfDeviceVectors.begin(); it != listOfDeviceVectors.end(); ++it) { cout << it->first << ": " << it->second.size() << endl; } cout << "values for YPR199C:\t\t"; printDeviceVector<float>(listOfDeviceVectors["YPR199C"]); */
431d13df0d7912b3c4cc3ceaed0da2a8fff28cb3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "unary_op_grad_impl.cuh" template <typename T> __global__ void SqrtGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float input_f = static_cast<float>(input[i]); float dout_f = static_cast<float>(dout[i]); float res_vmul = dout_f / (2.0 * input_f); output[i] = static_cast<T>(res_vmul); } return; } template <typename T> __global__ void RsqrtGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float input_f = static_cast<float>(input[i]); float dout_f = static_cast<float>(dout[i]); float res_vmul = input_f * input_f * input_f; res_vmul = -0.5 * res_vmul * dout_f; output[i] = static_cast<T>(res_vmul); } return; } template <typename T> __global__ void AsinGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { T one = 1; T sqt = sqrtf(one - input[i] * input[i]); output[i] = dout[i] / sqt; } return; } template <> __global__ void AsinGradKernel(const half *input, const half *dout, half *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { half one = 1; half sqt = hsqrt(one - input[i] * input[i]); output[i] = dout[i] / sqt; } return; } template <typename T> __global__ void ACosGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { T neg_one = -1; T one = 1; T sqt = sqrtf(one - input[i] * input[i]); output[i] = neg_one * dout[i] / sqt; } return; } template <> __global__ void ACosGradKernel(const half *input, const half *dout, half *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { half neg_one = -1; half one = 1; half sqt = hsqrt(one - input[i] * input[i]); output[i] = neg_one * dout[i] / sqt; } return; } template <typename T> __global__ void AtanGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { T one = 1; T divisor = one + input[i] * input[i]; output[i] = dout[i] / divisor; } return; } template <typename T> __global__ void AsinhGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float inputf = static_cast<float>(input[i]); T coshy = static_cast<T>(coshf(inputf)); output[i] = dout[i] / coshy; } return; } template <typename T> __global__ void AcoshGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float inputf = static_cast<float>(input[i]); T sinhy = static_cast<T>(sinhf(inputf)); output[i] = dout[i] / sinhy; } return; } template <typename T> void SqrtGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) { hipLaunchKernelGGL(( SqrtGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count); return; } template <typename T> void RsqrtGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) { hipLaunchKernelGGL(( RsqrtGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count); return; } template <typename T> void AsinGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) { hipLaunchKernelGGL(( AsinGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count); return; } template <typename T> void ACosGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) { hipLaunchKernelGGL(( ACosGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count); return; } template <typename T> void AtanGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) { hipLaunchKernelGGL(( AtanGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count); return; } template <typename T> void AsinhGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) { hipLaunchKernelGGL(( AsinhGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count); return; } template <typename T> void AcoshGrad(const T *input, const T *dout, T *output, const size_t count, hipStream_t cuda_stream) { hipLaunchKernelGGL(( AcoshGradKernel), dim3(GET_BLOCKS(count)), dim3(GET_THREADS), 0, cuda_stream, input, dout, output, count); return; } template void SqrtGrad<float>(const float *input, const float *dout, float *output, const size_t count, hipStream_t cuda_stream); template void RsqrtGrad<float>(const float *input, const float *dout, float *output, const size_t count, hipStream_t cuda_stream); template void AsinGrad<float>(const float *input, const float *dout, float *output, const size_t count, hipStream_t cuda_stream); template void ACosGrad<float>(const float *input, const float *dout, float *output, const size_t count, hipStream_t cuda_stream); template void AtanGrad<float>(const float *input, const float *dout, float *output, const size_t count, hipStream_t cuda_stream); template void AsinhGrad<float>(const float *input, const float *dout, float *output, const size_t count, hipStream_t cuda_stream); template void AcoshGrad<float>(const float *input, const float *dout, float *output, const size_t count, hipStream_t cuda_stream); template void SqrtGrad<half>(const half *input, const half *dout, half *output, const size_t count, hipStream_t cuda_stream); template void RsqrtGrad<half>(const half *input, const half *dout, half *output, const size_t count, hipStream_t cuda_stream); template void AsinGrad<half>(const half *input, const half *dout, half *output, const size_t count, hipStream_t cuda_stream); template void ACosGrad<half>(const half *input, const half *dout, half *output, const size_t count, hipStream_t cuda_stream); template void AtanGrad<half>(const half *input, const half *dout, half *output, const size_t count, hipStream_t cuda_stream); template void AsinhGrad<half>(const half *input, const half *dout, half *output, const size_t count, hipStream_t cuda_stream); template void AcoshGrad<half>(const half *input, const half *dout, half *output, const size_t count, hipStream_t cuda_stream);
431d13df0d7912b3c4cc3ceaed0da2a8fff28cb3.cu
/** * Copyright 2020 Huawei Technologies Co., Ltd * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "unary_op_grad_impl.cuh" template <typename T> __global__ void SqrtGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float input_f = static_cast<float>(input[i]); float dout_f = static_cast<float>(dout[i]); float res_vmul = dout_f / (2.0 * input_f); output[i] = static_cast<T>(res_vmul); } return; } template <typename T> __global__ void RsqrtGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float input_f = static_cast<float>(input[i]); float dout_f = static_cast<float>(dout[i]); float res_vmul = input_f * input_f * input_f; res_vmul = -0.5 * res_vmul * dout_f; output[i] = static_cast<T>(res_vmul); } return; } template <typename T> __global__ void AsinGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { T one = 1; T sqt = sqrtf(one - input[i] * input[i]); output[i] = dout[i] / sqt; } return; } template <> __global__ void AsinGradKernel(const half *input, const half *dout, half *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { half one = 1; half sqt = hsqrt(one - input[i] * input[i]); output[i] = dout[i] / sqt; } return; } template <typename T> __global__ void ACosGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { T neg_one = -1; T one = 1; T sqt = sqrtf(one - input[i] * input[i]); output[i] = neg_one * dout[i] / sqt; } return; } template <> __global__ void ACosGradKernel(const half *input, const half *dout, half *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { half neg_one = -1; half one = 1; half sqt = hsqrt(one - input[i] * input[i]); output[i] = neg_one * dout[i] / sqt; } return; } template <typename T> __global__ void AtanGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { T one = 1; T divisor = one + input[i] * input[i]; output[i] = dout[i] / divisor; } return; } template <typename T> __global__ void AsinhGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float inputf = static_cast<float>(input[i]); T coshy = static_cast<T>(coshf(inputf)); output[i] = dout[i] / coshy; } return; } template <typename T> __global__ void AcoshGradKernel(const T *input, const T *dout, T *output, const size_t count) { for (size_t i = blockIdx.x * blockDim.x + threadIdx.x; i < (count); i += blockDim.x * gridDim.x) { float inputf = static_cast<float>(input[i]); T sinhy = static_cast<T>(sinhf(inputf)); output[i] = dout[i] / sinhy; } return; } template <typename T> void SqrtGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) { SqrtGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count); return; } template <typename T> void RsqrtGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) { RsqrtGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count); return; } template <typename T> void AsinGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) { AsinGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count); return; } template <typename T> void ACosGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) { ACosGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count); return; } template <typename T> void AtanGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) { AtanGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count); return; } template <typename T> void AsinhGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) { AsinhGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count); return; } template <typename T> void AcoshGrad(const T *input, const T *dout, T *output, const size_t count, cudaStream_t cuda_stream) { AcoshGradKernel<<<GET_BLOCKS(count), GET_THREADS, 0, cuda_stream>>>(input, dout, output, count); return; } template void SqrtGrad<float>(const float *input, const float *dout, float *output, const size_t count, cudaStream_t cuda_stream); template void RsqrtGrad<float>(const float *input, const float *dout, float *output, const size_t count, cudaStream_t cuda_stream); template void AsinGrad<float>(const float *input, const float *dout, float *output, const size_t count, cudaStream_t cuda_stream); template void ACosGrad<float>(const float *input, const float *dout, float *output, const size_t count, cudaStream_t cuda_stream); template void AtanGrad<float>(const float *input, const float *dout, float *output, const size_t count, cudaStream_t cuda_stream); template void AsinhGrad<float>(const float *input, const float *dout, float *output, const size_t count, cudaStream_t cuda_stream); template void AcoshGrad<float>(const float *input, const float *dout, float *output, const size_t count, cudaStream_t cuda_stream); template void SqrtGrad<half>(const half *input, const half *dout, half *output, const size_t count, cudaStream_t cuda_stream); template void RsqrtGrad<half>(const half *input, const half *dout, half *output, const size_t count, cudaStream_t cuda_stream); template void AsinGrad<half>(const half *input, const half *dout, half *output, const size_t count, cudaStream_t cuda_stream); template void ACosGrad<half>(const half *input, const half *dout, half *output, const size_t count, cudaStream_t cuda_stream); template void AtanGrad<half>(const half *input, const half *dout, half *output, const size_t count, cudaStream_t cuda_stream); template void AsinhGrad<half>(const half *input, const half *dout, half *output, const size_t count, cudaStream_t cuda_stream); template void AcoshGrad<half>(const half *input, const half *dout, half *output, const size_t count, cudaStream_t cuda_stream);
0172e535e957aef64fcae9fcfeb7f2d5fe15d232.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/inference/tensorrt/plugin/skip_groupnorm_act_op_plugin.h" #include <hipcub/hipcub.hpp> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/layout.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { nvinfer1::DimsExprs SkipGroupnormActPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputDims, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { return inputDims[0]; } bool SkipGroupnormActPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of SkipGroupnormAct plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { return ((in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::PluginFormat::kHWC8)); } else { PADDLE_THROW(platform::errors::Fatal( "SkipGroupnormAct TRT Plugin is fp16 only so far")); return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType SkipGroupnormActPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ( index, 0, platform::errors::InvalidArgument( "The SkipGroupnormAct Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT || input_types[0] == nvinfer1::DataType::kHALF), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int SkipGroupnormActPluginDynamic::initialize() TRT_NOEXCEPT { return 0; } static inline int32_t divUp(int32_t m, int32_t n) { return (m + n - 1) / n; } static int32_t findMaxDivisor(int32_t n, int32_t maxAllowedDivisor) { int32_t maxDivisor = -1; for (int32_t i = 1; i <= std::sqrt(n); i++) { if (n % i == 0) { int32_t divisor1 = n / i; int32_t divisor2 = i; if (divisor1 > maxDivisor && divisor1 < maxAllowedDivisor) { maxDivisor = divisor1; } if (divisor2 > maxDivisor && divisor2 < maxAllowedDivisor) { maxDivisor = divisor2; } } } return maxDivisor; } static inline __device__ __host__ float sigmoid(float x) { return 1.F / (1.F + expf(-x)); } struct GroupSums { // Is it the 1st element of the group? int32_t flag; // The sum. float sum; // The sum of squares. float sumSq; }; struct GroupSumsOp { inline __device__ GroupSums operator()(GroupSums const &a, GroupSums const &b) { GroupSums dst; dst.sum = b.flag ? b.sum : (a.sum + b.sum); dst.sumSq = b.flag ? b.sumSq : (a.sumSq + b.sumSq); dst.flag = a.flag + b.flag; return dst; } }; template <int32_t tTHREADS_PER_BLOCK> __global__ void skipGroupNormNHWCSumKernel(GroupNormNHWCParams params) { // The object in charge of doing the sums for the different blocks. typedef hipcub::BlockScan<GroupSums, tTHREADS_PER_BLOCK> BlockScan; // Allocate shared memory for BlockScan. __shared__ typename BlockScan::TempStorage tempStorage; // Allocate shared memory for the groups. We could reduce the amount of shared // memory reserved. __shared__ float2 smem[tTHREADS_PER_BLOCK]; // The instance in the batch. int32_t ni = blockIdx.z; // The channel loaded by that thread (2 channels per thread for F16x2). int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2; // The first activation loaded by that block. int32_t hwBegin = blockIdx.y * params.hwPerBlock; // The last activation loaded by that block. int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw); // The sums. float sum = 0.F; float sumSq = 0.F; // Iterate over the activations to compute the sums. for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) { // The offset. int64_t offset = static_cast<int64_t>(ni) * params.hwc + static_cast<int64_t>(hwi) * params.c + ci; // Fetch two channels per thread. __half2 h2(0, 0); if (ci < params.c) { // W = 1, H = 1 int64_t offsetY = static_cast<int64_t>(ni) * params.c + ci; __half2 y = *reinterpret_cast<__half2 const *>(&params.srcY[offsetY]); h2 = *reinterpret_cast<__half2 const *>(&params.srcX[offset]); #if __CUDA_ARCH__ >= 530 h2 = __hadd2(h2, y); #else float2 out{}; out.x = __half2float(h2.x) + __half2float(y.x); out.y = __half2float(h2.y) + __half2float(y.y); h2 = __float22half2_rn(out); #endif // elementwise_add *reinterpret_cast<__half2 *>(&params.dst[offset]) = h2; } // Extract the two half values. float2 f2 = __half22float2(h2); // Update the sum. sum += f2.x + f2.y; // Update the sum of squares. sumSq += f2.x * f2.x + f2.y * f2.y; } // The group that thread works on and the channel in the group (modulus). int32_t gi = threadIdx.x * 2 / params.cPerGroup; int32_t cj = threadIdx.x * 2 - params.cPerGroup * gi; // The data for the summations. GroupSums inp{cj == 0 ? 1 : 0, sum, sumSq}; // Do the segmented scan. GroupSums out; BlockScan(tempStorage).InclusiveScan(inp, out, GroupSumsOp()); // Store the results for the groups in shared memory (to produce coalesced // stores later). if (cj == params.cPerGroup - 2 /* 2 channels per thread */) { smem[gi] = make_float2(out.sum, out.sumSq); } // Make sure the data is in shared memory. __syncthreads(); // The global group index. int32_t gj = blockIdx.x * params.groupsPerBlock + threadIdx.x; // Threads that have nothing left to do, exit. if (threadIdx.x >= params.groupsPerBlock || gj >= params.groups) { return; } // The first threads (those storing to global memory, load the values). float2 sums = smem[threadIdx.x]; // Store to global memory. atomicAdd(&params.redBuffer[(2 * ni + 0) * params.groups + gj], sums.x); atomicAdd(&params.redBuffer[(2 * ni + 1) * params.groups + gj], sums.y); } void skipGroupNormNHWCSum(GroupNormNHWCParams const &params, hipStream_t stream) { // Make sure the values are as we expect. PADDLE_ENFORCE_EQ( params.c % params.cPerBlock, 0, platform::errors::InvalidArgument( "The groupNormNHWCSum of SkipGroupnormAct Plugin got wrong parameters" "params.c %% params.cPerBlock should be 0, but get %d.", params.c % params.cPerBlock)); PADDLE_ENFORCE_EQ( params.hw % params.hwPerBlock, 0, platform::errors::InvalidArgument( "The groupNormNHWCSum of SkipGroupnormAct Plugin got wrong parameters" "params.hw %% params.hwPerBlock should be 0, but get %d.", params.hw % params.hwPerBlock)); // Make sure a group does not span multiple blocks. PADDLE_ENFORCE_EQ( params.cPerBlock % params.cPerGroup, 0, platform::errors::InvalidArgument( "The groupNormNHWCSum of SkipGroupnormAct Plugin got wrong parameters" "params.cPerBlock %% params.cPerGroup should be 0, but get %d.", params.cPerBlock % params.cPerGroup)); dim3 grid; // The number of blocks to compute all the channels. grid.x = params.c / params.cPerBlock; // The number of blocks to compute all the activations in a given instance. grid.y = divUp(params.hw, params.hwPerBlock); // The number of instances. grid.z = params.n; switch (params.cPerBlock) { case 320: hipLaunchKernelGGL(( skipGroupNormNHWCSumKernel<160>), dim3(grid), dim3(160), 0, stream, params); break; case 480: hipLaunchKernelGGL(( skipGroupNormNHWCSumKernel<256>), dim3(grid), dim3(256), 0, stream, params); break; case 256: hipLaunchKernelGGL(( skipGroupNormNHWCSumKernel<128>), dim3(grid), dim3(128), 0, stream, params); break; case 128: hipLaunchKernelGGL(( skipGroupNormNHWCSumKernel<64>), dim3(grid), dim3(64), 0, stream, params); break; case 8: hipLaunchKernelGGL(( skipGroupNormNHWCSumKernel<4>), dim3(grid), dim3(4), 0, stream, params); break; default: PADDLE_THROW(platform::errors::Fatal( "The function groupNormNHWCSum of SkipGroupnormAct TRT Plugin " "encounter error")); } } template <int32_t tTHREADS_PER_BLOCK> __global__ void skipGroupNormNHWCScaleKernel(GroupNormNHWCParams params) { // The instance in the batch. int32_t ni = blockIdx.z; // The channel loaded by that thread (2 channels per thread for F16x2). int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2; // The group that thread works on and the channel in the group (modulus). int32_t gi = ci / params.cPerGroup; // Load the sum and sum of squares for the group. float sum = 0.F, sumSq = 0.F; if (gi < params.groups) { sum = params.redBuffer[(2 * ni + 0) * params.groups + gi]; sumSq = params.redBuffer[(2 * ni + 1) * params.groups + gi]; } // Load gamma/beta. float2 gammaF2, betaF2; if (ci < params.c) { gammaF2 = *reinterpret_cast<float2 const *>( reinterpret_cast<float const *>(params.gamma) + ci); betaF2 = *reinterpret_cast<float2 const *>( reinterpret_cast<float const *>(params.beta) + ci); } // Compute the mean. float mean = sum * params.invHWC; // Compute the variance. float var = sumSq * params.invHWC - (mean * mean); // Compute the inverse of the stddev. float invStdDev = rsqrtf(var + params.eps); // The first activation loaded by that block. int32_t hwBegin = blockIdx.y * params.hwPerBlock; // The last activation loaded by that block. int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw); // Iterate over the activations to compute the sums. for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) { // The src/dst offset. int64_t offset = (int64_t)ni * params.hwc + hwi * params.c + ci; // Fetch two channels per thread. __half2 h2(0, 0); if (ci < params.c) { h2 = *reinterpret_cast<__half2 const *>(&params.dst[offset]); } // Extract the two half values. float2 f2 = __half22float2(h2); // Normalize the channels. f2.x = (f2.x - mean) * invStdDev; f2.y = (f2.y - mean) * invStdDev; // Scale by gamma and add beta. f2.x = gammaF2.x * f2.x + betaF2.x; f2.y = gammaF2.y * f2.y + betaF2.y; // Apply Swish if needed. if (params.withSwish) { f2.x = f2.x * sigmoid(f2.x); f2.y = f2.y * sigmoid(f2.y); } // Store the scaled values. if (ci < params.c) { *reinterpret_cast<__half2 *>(&params.dst[offset]) = __float22half2_rn(f2); } } } void skipGroupNormNHWCScale(GroupNormNHWCParams const &params, hipStream_t stream) { // Make sure the dimensions are aligned with what we expect. PADDLE_ENFORCE_EQ(params.c % params.cPerBlock, 0, platform::errors::InvalidArgument( "The groupNormNHWCScale of SkipGroupnormAct Plugin got " "wrong parameters" "params.c %% params.cPerBlock should be 0, but get %d.", params.c % params.cPerBlock)); // Make sure a group does not span multiple blocks. PADDLE_ENFORCE_EQ( params.cPerBlock % params.cPerGroup, 0, platform::errors::InvalidArgument( "The groupNormNHWCScale of SkipGroupnormAct Plugin got wrong " "parameters" "params.cPerBlock %% params.cPerGroup should be 0, but get %d.", params.cPerBlock % params.cPerGroup)); dim3 grid; // The number of blocks to compute all the channels. grid.x = params.c / params.cPerBlock; // The number of blocks to compute all the activations in a given instance. grid.y = divUp(params.hw, params.hwPerBlock); // The number of instances. grid.z = params.n; switch (params.cPerBlock) { case 320: hipLaunchKernelGGL(( skipGroupNormNHWCScaleKernel<160>), dim3(grid), dim3(160), 0, stream, params); break; case 480: hipLaunchKernelGGL(( skipGroupNormNHWCScaleKernel<256>), dim3(grid), dim3(256), 0, stream, params); break; case 256: hipLaunchKernelGGL(( skipGroupNormNHWCScaleKernel<128>), dim3(grid), dim3(128), 0, stream, params); break; case 128: hipLaunchKernelGGL(( skipGroupNormNHWCScaleKernel<64>), dim3(grid), dim3(64), 0, stream, params); break; case 8: hipLaunchKernelGGL(( skipGroupNormNHWCScaleKernel<4>), dim3(grid), dim3(4), 0, stream, params); break; default: PADDLE_THROW(platform::errors::Fatal( "The function groupNormNHWCSum of SkipGroupnormAct TRT Plugin " "encounter error")); } } int SkipGroupnormActPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, hipStream_t stream) TRT_NOEXCEPT { auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. SkipGroupnormAct-->fp32"; PADDLE_THROW(platform::errors::Fatal( "The SkipGroupnormAct TRT Plugin's only support fp16 input")); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. SkipGroupnormAct-->fp16"; int32_t cPerBlock = 320; int32_t maxBlocksPerHW = 1024; switch (input_desc[0].dims.d[1]) { case 960: case 1920: cPerBlock = 480; break; case 512: case 256: cPerBlock = 256; break; case 128: cPerBlock = 128; break; default: cPerBlock = 320; } if (cPerBlock > input_desc[0].dims.d[1]) { cPerBlock = 8; } params_.withSwish = true; params_.dst = static_cast<half *>(outputs[0]); params_.srcX = static_cast<half const *>(inputs[0]); params_.srcY = static_cast<half const *>(inputs[1]); params_.gamma = scale_gpu_.get(); params_.beta = bias_gpu_.get(); params_.redBuffer = static_cast<float *>(workspace); params_.n = input_desc[0].dims.d[0]; params_.h = input_desc[0].dims.d[2]; params_.w = input_desc[0].dims.d[3]; params_.c = input_desc[0].dims.d[1]; params_.groups = groups_; params_.hw = params_.h * params_.w; const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW); params_.hwPerBlock = divUp(params_.hw, blocksPerHW); params_.cPerBlock = cPerBlock; params_.cPerGroup = params_.c / params_.groups; params_.hwc = params_.hw * params_.c; params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup); params_.groupsPerBlock = cPerBlock / params_.cPerGroup; params_.eps = eps_; hipMemsetAsync(params_.redBuffer, 0, ws_, stream); skipGroupNormNHWCSum(params_, stream); skipGroupNormNHWCScale(params_, stream); } else { // input not fp16 PADDLE_THROW(platform::errors::Fatal( "The SkipGroupnormAct TRT Plugin's only support fp16 input")); } return hipGetLastError() != hipSuccess; } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
0172e535e957aef64fcae9fcfeb7f2d5fe15d232.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. SPDX-FileCopyrightText: Copyright (c) 1993-2022 NVIDIA CORPORATION & AFFILIATES. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/inference/tensorrt/plugin/skip_groupnorm_act_op_plugin.h" #include <cub/cub.cuh> #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/layout.h" #include "paddle/phi/kernels/funcs/math_function.h" namespace paddle { namespace inference { namespace tensorrt { namespace plugin { nvinfer1::DimsExprs SkipGroupnormActPluginDynamic::getOutputDimensions( int output_index, const nvinfer1::DimsExprs *inputDims, int nb_inputs, nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT { return inputDims[0]; } bool SkipGroupnormActPluginDynamic::supportsFormatCombination( int pos, const nvinfer1::PluginTensorDesc *in_out, int nb_inputs, int nb_outputs) TRT_NOEXCEPT { PADDLE_ENFORCE_NOT_NULL( in_out, platform::errors::InvalidArgument( "The input of SkipGroupnormAct plugin shoule not be nullptr.")); PADDLE_ENFORCE_LT( pos, nb_inputs + nb_outputs, platform::errors::InvalidArgument("The pos(%d) should be less than the " "num(%d) of the input and the output.", pos, nb_inputs + nb_outputs)); const nvinfer1::PluginTensorDesc &in = in_out[pos]; if (pos == 0) { if (with_fp16_) { return ((in.type == nvinfer1::DataType::kHALF) && (in.format == nvinfer1::PluginFormat::kHWC8)); } else { PADDLE_THROW(platform::errors::Fatal( "SkipGroupnormAct TRT Plugin is fp16 only so far")); return (in.type == nvinfer1::DataType::kFLOAT) && (in.format == nvinfer1::TensorFormat::kLINEAR); } } const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1]; // output return in.type == prev.type && in.format == prev.format; } nvinfer1::DataType SkipGroupnormActPluginDynamic::getOutputDataType( int index, const nvinfer1::DataType *input_types, int nb_inputs) const TRT_NOEXCEPT { PADDLE_ENFORCE_EQ( index, 0, platform::errors::InvalidArgument( "The SkipGroupnormAct Plugin only has one input, so the " "index value should be 0, but get %d.", index)); PADDLE_ENFORCE_EQ((input_types[0] == nvinfer1::DataType::kFLOAT || input_types[0] == nvinfer1::DataType::kHALF), true, platform::errors::InvalidArgument( "The input type should be half or float")); return input_types[0]; } int SkipGroupnormActPluginDynamic::initialize() TRT_NOEXCEPT { return 0; } static inline int32_t divUp(int32_t m, int32_t n) { return (m + n - 1) / n; } static int32_t findMaxDivisor(int32_t n, int32_t maxAllowedDivisor) { int32_t maxDivisor = -1; for (int32_t i = 1; i <= std::sqrt(n); i++) { if (n % i == 0) { int32_t divisor1 = n / i; int32_t divisor2 = i; if (divisor1 > maxDivisor && divisor1 < maxAllowedDivisor) { maxDivisor = divisor1; } if (divisor2 > maxDivisor && divisor2 < maxAllowedDivisor) { maxDivisor = divisor2; } } } return maxDivisor; } static inline __device__ __host__ float sigmoid(float x) { return 1.F / (1.F + expf(-x)); } struct GroupSums { // Is it the 1st element of the group? int32_t flag; // The sum. float sum; // The sum of squares. float sumSq; }; struct GroupSumsOp { inline __device__ GroupSums operator()(GroupSums const &a, GroupSums const &b) { GroupSums dst; dst.sum = b.flag ? b.sum : (a.sum + b.sum); dst.sumSq = b.flag ? b.sumSq : (a.sumSq + b.sumSq); dst.flag = a.flag + b.flag; return dst; } }; template <int32_t tTHREADS_PER_BLOCK> __global__ void skipGroupNormNHWCSumKernel(GroupNormNHWCParams params) { // The object in charge of doing the sums for the different blocks. typedef cub::BlockScan<GroupSums, tTHREADS_PER_BLOCK> BlockScan; // Allocate shared memory for BlockScan. __shared__ typename BlockScan::TempStorage tempStorage; // Allocate shared memory for the groups. We could reduce the amount of shared // memory reserved. __shared__ float2 smem[tTHREADS_PER_BLOCK]; // The instance in the batch. int32_t ni = blockIdx.z; // The channel loaded by that thread (2 channels per thread for F16x2). int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2; // The first activation loaded by that block. int32_t hwBegin = blockIdx.y * params.hwPerBlock; // The last activation loaded by that block. int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw); // The sums. float sum = 0.F; float sumSq = 0.F; // Iterate over the activations to compute the sums. for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) { // The offset. int64_t offset = static_cast<int64_t>(ni) * params.hwc + static_cast<int64_t>(hwi) * params.c + ci; // Fetch two channels per thread. __half2 h2(0, 0); if (ci < params.c) { // W = 1, H = 1 int64_t offsetY = static_cast<int64_t>(ni) * params.c + ci; __half2 y = *reinterpret_cast<__half2 const *>(&params.srcY[offsetY]); h2 = *reinterpret_cast<__half2 const *>(&params.srcX[offset]); #if __CUDA_ARCH__ >= 530 h2 = __hadd2(h2, y); #else float2 out{}; out.x = __half2float(h2.x) + __half2float(y.x); out.y = __half2float(h2.y) + __half2float(y.y); h2 = __float22half2_rn(out); #endif // elementwise_add *reinterpret_cast<__half2 *>(&params.dst[offset]) = h2; } // Extract the two half values. float2 f2 = __half22float2(h2); // Update the sum. sum += f2.x + f2.y; // Update the sum of squares. sumSq += f2.x * f2.x + f2.y * f2.y; } // The group that thread works on and the channel in the group (modulus). int32_t gi = threadIdx.x * 2 / params.cPerGroup; int32_t cj = threadIdx.x * 2 - params.cPerGroup * gi; // The data for the summations. GroupSums inp{cj == 0 ? 1 : 0, sum, sumSq}; // Do the segmented scan. GroupSums out; BlockScan(tempStorage).InclusiveScan(inp, out, GroupSumsOp()); // Store the results for the groups in shared memory (to produce coalesced // stores later). if (cj == params.cPerGroup - 2 /* 2 channels per thread */) { smem[gi] = make_float2(out.sum, out.sumSq); } // Make sure the data is in shared memory. __syncthreads(); // The global group index. int32_t gj = blockIdx.x * params.groupsPerBlock + threadIdx.x; // Threads that have nothing left to do, exit. if (threadIdx.x >= params.groupsPerBlock || gj >= params.groups) { return; } // The first threads (those storing to global memory, load the values). float2 sums = smem[threadIdx.x]; // Store to global memory. atomicAdd(&params.redBuffer[(2 * ni + 0) * params.groups + gj], sums.x); atomicAdd(&params.redBuffer[(2 * ni + 1) * params.groups + gj], sums.y); } void skipGroupNormNHWCSum(GroupNormNHWCParams const &params, cudaStream_t stream) { // Make sure the values are as we expect. PADDLE_ENFORCE_EQ( params.c % params.cPerBlock, 0, platform::errors::InvalidArgument( "The groupNormNHWCSum of SkipGroupnormAct Plugin got wrong parameters" "params.c %% params.cPerBlock should be 0, but get %d.", params.c % params.cPerBlock)); PADDLE_ENFORCE_EQ( params.hw % params.hwPerBlock, 0, platform::errors::InvalidArgument( "The groupNormNHWCSum of SkipGroupnormAct Plugin got wrong parameters" "params.hw %% params.hwPerBlock should be 0, but get %d.", params.hw % params.hwPerBlock)); // Make sure a group does not span multiple blocks. PADDLE_ENFORCE_EQ( params.cPerBlock % params.cPerGroup, 0, platform::errors::InvalidArgument( "The groupNormNHWCSum of SkipGroupnormAct Plugin got wrong parameters" "params.cPerBlock %% params.cPerGroup should be 0, but get %d.", params.cPerBlock % params.cPerGroup)); dim3 grid; // The number of blocks to compute all the channels. grid.x = params.c / params.cPerBlock; // The number of blocks to compute all the activations in a given instance. grid.y = divUp(params.hw, params.hwPerBlock); // The number of instances. grid.z = params.n; switch (params.cPerBlock) { case 320: skipGroupNormNHWCSumKernel<160><<<grid, 160, 0, stream>>>(params); break; case 480: skipGroupNormNHWCSumKernel<256><<<grid, 256, 0, stream>>>(params); break; case 256: skipGroupNormNHWCSumKernel<128><<<grid, 128, 0, stream>>>(params); break; case 128: skipGroupNormNHWCSumKernel<64><<<grid, 64, 0, stream>>>(params); break; case 8: skipGroupNormNHWCSumKernel<4><<<grid, 4, 0, stream>>>(params); break; default: PADDLE_THROW(platform::errors::Fatal( "The function groupNormNHWCSum of SkipGroupnormAct TRT Plugin " "encounter error")); } } template <int32_t tTHREADS_PER_BLOCK> __global__ void skipGroupNormNHWCScaleKernel(GroupNormNHWCParams params) { // The instance in the batch. int32_t ni = blockIdx.z; // The channel loaded by that thread (2 channels per thread for F16x2). int32_t ci = blockIdx.x * params.cPerBlock + threadIdx.x * 2; // The group that thread works on and the channel in the group (modulus). int32_t gi = ci / params.cPerGroup; // Load the sum and sum of squares for the group. float sum = 0.F, sumSq = 0.F; if (gi < params.groups) { sum = params.redBuffer[(2 * ni + 0) * params.groups + gi]; sumSq = params.redBuffer[(2 * ni + 1) * params.groups + gi]; } // Load gamma/beta. float2 gammaF2, betaF2; if (ci < params.c) { gammaF2 = *reinterpret_cast<float2 const *>( reinterpret_cast<float const *>(params.gamma) + ci); betaF2 = *reinterpret_cast<float2 const *>( reinterpret_cast<float const *>(params.beta) + ci); } // Compute the mean. float mean = sum * params.invHWC; // Compute the variance. float var = sumSq * params.invHWC - (mean * mean); // Compute the inverse of the stddev. float invStdDev = rsqrtf(var + params.eps); // The first activation loaded by that block. int32_t hwBegin = blockIdx.y * params.hwPerBlock; // The last activation loaded by that block. int32_t hwEnd = min(hwBegin + params.hwPerBlock, params.hw); // Iterate over the activations to compute the sums. for (int32_t hwi = hwBegin; hwi < hwEnd; ++hwi) { // The src/dst offset. int64_t offset = (int64_t)ni * params.hwc + hwi * params.c + ci; // Fetch two channels per thread. __half2 h2(0, 0); if (ci < params.c) { h2 = *reinterpret_cast<__half2 const *>(&params.dst[offset]); } // Extract the two half values. float2 f2 = __half22float2(h2); // Normalize the channels. f2.x = (f2.x - mean) * invStdDev; f2.y = (f2.y - mean) * invStdDev; // Scale by gamma and add beta. f2.x = gammaF2.x * f2.x + betaF2.x; f2.y = gammaF2.y * f2.y + betaF2.y; // Apply Swish if needed. if (params.withSwish) { f2.x = f2.x * sigmoid(f2.x); f2.y = f2.y * sigmoid(f2.y); } // Store the scaled values. if (ci < params.c) { *reinterpret_cast<__half2 *>(&params.dst[offset]) = __float22half2_rn(f2); } } } void skipGroupNormNHWCScale(GroupNormNHWCParams const &params, cudaStream_t stream) { // Make sure the dimensions are aligned with what we expect. PADDLE_ENFORCE_EQ(params.c % params.cPerBlock, 0, platform::errors::InvalidArgument( "The groupNormNHWCScale of SkipGroupnormAct Plugin got " "wrong parameters" "params.c %% params.cPerBlock should be 0, but get %d.", params.c % params.cPerBlock)); // Make sure a group does not span multiple blocks. PADDLE_ENFORCE_EQ( params.cPerBlock % params.cPerGroup, 0, platform::errors::InvalidArgument( "The groupNormNHWCScale of SkipGroupnormAct Plugin got wrong " "parameters" "params.cPerBlock %% params.cPerGroup should be 0, but get %d.", params.cPerBlock % params.cPerGroup)); dim3 grid; // The number of blocks to compute all the channels. grid.x = params.c / params.cPerBlock; // The number of blocks to compute all the activations in a given instance. grid.y = divUp(params.hw, params.hwPerBlock); // The number of instances. grid.z = params.n; switch (params.cPerBlock) { case 320: skipGroupNormNHWCScaleKernel<160><<<grid, 160, 0, stream>>>(params); break; case 480: skipGroupNormNHWCScaleKernel<256><<<grid, 256, 0, stream>>>(params); break; case 256: skipGroupNormNHWCScaleKernel<128><<<grid, 128, 0, stream>>>(params); break; case 128: skipGroupNormNHWCScaleKernel<64><<<grid, 64, 0, stream>>>(params); break; case 8: skipGroupNormNHWCScaleKernel<4><<<grid, 4, 0, stream>>>(params); break; default: PADDLE_THROW(platform::errors::Fatal( "The function groupNormNHWCSum of SkipGroupnormAct TRT Plugin " "encounter error")); } } int SkipGroupnormActPluginDynamic::enqueue( const nvinfer1::PluginTensorDesc *input_desc, const nvinfer1::PluginTensorDesc *output_desc, const void *const *inputs, void *const *outputs, void *workspace, cudaStream_t stream) TRT_NOEXCEPT { auto input_type = input_desc[0].type; if (input_type == nvinfer1::DataType::kFLOAT) { VLOG(1) << "TRT Plugin DataType selected. SkipGroupnormAct-->fp32"; PADDLE_THROW(platform::errors::Fatal( "The SkipGroupnormAct TRT Plugin's only support fp16 input")); } else if (input_type == nvinfer1::DataType::kHALF) { VLOG(1) << "TRT Plugin DataType selected. SkipGroupnormAct-->fp16"; int32_t cPerBlock = 320; int32_t maxBlocksPerHW = 1024; switch (input_desc[0].dims.d[1]) { case 960: case 1920: cPerBlock = 480; break; case 512: case 256: cPerBlock = 256; break; case 128: cPerBlock = 128; break; default: cPerBlock = 320; } if (cPerBlock > input_desc[0].dims.d[1]) { cPerBlock = 8; } params_.withSwish = true; params_.dst = static_cast<half *>(outputs[0]); params_.srcX = static_cast<half const *>(inputs[0]); params_.srcY = static_cast<half const *>(inputs[1]); params_.gamma = scale_gpu_.get(); params_.beta = bias_gpu_.get(); params_.redBuffer = static_cast<float *>(workspace); params_.n = input_desc[0].dims.d[0]; params_.h = input_desc[0].dims.d[2]; params_.w = input_desc[0].dims.d[3]; params_.c = input_desc[0].dims.d[1]; params_.groups = groups_; params_.hw = params_.h * params_.w; const int32_t blocksPerHW = findMaxDivisor(params_.hw, maxBlocksPerHW); params_.hwPerBlock = divUp(params_.hw, blocksPerHW); params_.cPerBlock = cPerBlock; params_.cPerGroup = params_.c / params_.groups; params_.hwc = params_.hw * params_.c; params_.invHWC = 1.F / static_cast<float>(params_.hw * params_.cPerGroup); params_.groupsPerBlock = cPerBlock / params_.cPerGroup; params_.eps = eps_; cudaMemsetAsync(params_.redBuffer, 0, ws_, stream); skipGroupNormNHWCSum(params_, stream); skipGroupNormNHWCScale(params_, stream); } else { // input not fp16 PADDLE_THROW(platform::errors::Fatal( "The SkipGroupnormAct TRT Plugin's only support fp16 input")); } return cudaGetLastError() != cudaSuccess; } } // namespace plugin } // namespace tensorrt } // namespace inference } // namespace paddle
7dce3173431fe672e9778b5aa117ef54c450f5b1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <hipcub/hipcub.hpp> #include "adagrad_op.h" #include "caffe2/core/common_gpu.h" #include "caffe2/core/context_gpu.h" namespace caffe2 { __global__ void AdagradUpdate( int N, const float* w, const float* g, const float* h, float* nw, float* nh, float epsilon, float decay, const float* lr) { CUDA_1D_KERNEL_LOOP(i, N) { float gi = g[i]; float hi = nh[i] = decay * h[i] + gi * gi; nw[i] = w[i] + lr[0] * gi / (std::sqrt(hi) + epsilon); } } template <> void adagrad_update<CUDAContext>( int N, const float* w, const float* g, const float* h, float* nw, float* nh, float epsilon, float decay, const float* lr, CUDAContext* context) { hipLaunchKernelGGL(( AdagradUpdate), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context->cuda_stream(), N, w, g, h, nw, nh, epsilon, decay, lr); } template <typename SIndex> __global__ void SparseAdagradKernel( const size_t N, const size_t grad_slice_sz, const float epsilon, float *param, float *param_mom, const SIndex *indices, const float *grad, const float *lr) { const float LR = lr[0]; CUDA_1D_KERNEL_LOOP(i, N) { const size_t gradIdx = i; const SIndex index = indices[i / grad_slice_sz]; const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz); const float mom_new = param_mom[paramIdx] + grad[gradIdx] * grad[gradIdx]; param_mom[paramIdx] = mom_new; param[paramIdx] += LR * grad[gradIdx] / (sqrt(mom_new) + epsilon); } } /** * Calculate RowwiseSparseAdagrad * M: gradients.dims[0] * N: gradients.size_from_dim(1) * grad: pointer to the gradients * param: pointer to weights * param_mom: pointer to the momentum * indices: keys */ template <typename SIndex> __global__ void RowWiseSparseAdagradKernel( const int M, const int N, const float epsilon, float* param, float* param_mom, const SIndex* indices, const float* grad, const float* lr) { typedef hipcub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage; // in case gridDim is smaller than M for (int i = blockIdx.x; i < M; i += gridDim.x) { const SIndex index = indices[i]; float sum_squares = 0.0; __shared__ float row_sum_squares_avg; // in case N is bigger than block size which is 512 by default for (int j = threadIdx.x; j < N; j += blockDim.x) { const float x_ij = grad[i * N + j]; sum_squares += x_ij * x_ij; } float reduce_result = BlockReduce(temp_storage).Sum(sum_squares); if (threadIdx.x == 0) { row_sum_squares_avg = reduce_result / (float)N; param_mom[index] += row_sum_squares_avg; } __syncthreads(); // update param float step = lr[0] / (std::sqrt(param_mom[index]) + epsilon); for (int j = threadIdx.x; j < N; j += blockDim.x) { param[index * N + j] = param[index * N + j] + grad[i * N + j] * step; } } } template<> template<typename SIndex> bool SparseAdagradOp<float, CUDAContext>::DoRunWithType() { auto N = Input(GRAD).size(); auto grad_slice_sz = Input(GRAD).size_from_dim(Input(INDICES).ndim()); if (N == 0) { // empty grad, nothing to do here, not even launching the kernel return true; } hipLaunchKernelGGL(( SparseAdagradKernel<SIndex>), dim3(CAFFE_GET_BLOCKS(N)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), N, grad_slice_sz, epsilon_, Output(OUTPUT_PARAM)->template mutable_data<float>(), Output(OUTPUT_MOMENT_1)->template mutable_data<float>(), Input(INDICES).template data<SIndex>(), Input(GRAD).template data<float>(), Input(LR).template data<float>()); return true; } template <> template <typename SIndex> bool RowWiseSparseAdagradOp<float, CUDAContext>::DoRunWithType() { auto N = Input(GRAD).size(); if (N == 0) { // empty grad, nothing to do here, not even launching the kernel return true; } // size of the 1st dimension of the input gradient auto GRAD_M = Input(GRAD).dim32(0); auto GRAD_N = N / GRAD_M; // each thread block will handle multiple rows of the input and output hipLaunchKernelGGL(( RowWiseSparseAdagradKernel), dim3(min(GRAD_M, CAFFE_MAXIMUM_NUM_BLOCKS)), dim3(CAFFE_CUDA_NUM_THREADS), 0, context_.cuda_stream(), GRAD_M, GRAD_N, epsilon_, Output(OUTPUT_PARAM)->template mutable_data<float>(), Output(OUTPUT_MOMENT_1)->template mutable_data<float>(), Input(INDICES).template data<SIndex>(), Input(GRAD).template data<float>(), Input(LR).template data<float>()); return true; } REGISTER_CUDA_OPERATOR(Adagrad, AdagradOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SparseAdagrad, SparseAdagradOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagrad, RowWiseSparseAdagradOp<float, CUDAContext>); }
7dce3173431fe672e9778b5aa117ef54c450f5b1.cu
/** * Copyright (c) 2016-present, Facebook, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cub/block/block_reduce.cuh> #include "adagrad_op.h" #include "caffe2/core/common_gpu.h" #include "caffe2/core/context_gpu.h" namespace caffe2 { __global__ void AdagradUpdate( int N, const float* w, const float* g, const float* h, float* nw, float* nh, float epsilon, float decay, const float* lr) { CUDA_1D_KERNEL_LOOP(i, N) { float gi = g[i]; float hi = nh[i] = decay * h[i] + gi * gi; nw[i] = w[i] + lr[0] * gi / (std::sqrt(hi) + epsilon); } } template <> void adagrad_update<CUDAContext>( int N, const float* w, const float* g, const float* h, float* nw, float* nh, float epsilon, float decay, const float* lr, CUDAContext* context) { AdagradUpdate<<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context->cuda_stream()>>>(N, w, g, h, nw, nh, epsilon, decay, lr); } template <typename SIndex> __global__ void SparseAdagradKernel( const size_t N, const size_t grad_slice_sz, const float epsilon, float *param, float *param_mom, const SIndex *indices, const float *grad, const float *lr) { const float LR = lr[0]; CUDA_1D_KERNEL_LOOP(i, N) { const size_t gradIdx = i; const SIndex index = indices[i / grad_slice_sz]; const size_t paramIdx = index * grad_slice_sz + (i % grad_slice_sz); const float mom_new = param_mom[paramIdx] + grad[gradIdx] * grad[gradIdx]; param_mom[paramIdx] = mom_new; param[paramIdx] += LR * grad[gradIdx] / (sqrt(mom_new) + epsilon); } } /** * Calculate RowwiseSparseAdagrad * M: gradients.dims[0] * N: gradients.size_from_dim(1) * grad: pointer to the gradients * param: pointer to weights * param_mom: pointer to the momentum * indices: keys */ template <typename SIndex> __global__ void RowWiseSparseAdagradKernel( const int M, const int N, const float epsilon, float* param, float* param_mom, const SIndex* indices, const float* grad, const float* lr) { typedef cub::BlockReduce<float, CAFFE_CUDA_NUM_THREADS> BlockReduce; __shared__ BlockReduce::TempStorage temp_storage; // in case gridDim is smaller than M for (int i = blockIdx.x; i < M; i += gridDim.x) { const SIndex index = indices[i]; float sum_squares = 0.0; __shared__ float row_sum_squares_avg; // in case N is bigger than block size which is 512 by default for (int j = threadIdx.x; j < N; j += blockDim.x) { const float x_ij = grad[i * N + j]; sum_squares += x_ij * x_ij; } float reduce_result = BlockReduce(temp_storage).Sum(sum_squares); if (threadIdx.x == 0) { row_sum_squares_avg = reduce_result / (float)N; param_mom[index] += row_sum_squares_avg; } __syncthreads(); // update param float step = lr[0] / (std::sqrt(param_mom[index]) + epsilon); for (int j = threadIdx.x; j < N; j += blockDim.x) { param[index * N + j] = param[index * N + j] + grad[i * N + j] * step; } } } template<> template<typename SIndex> bool SparseAdagradOp<float, CUDAContext>::DoRunWithType() { auto N = Input(GRAD).size(); auto grad_slice_sz = Input(GRAD).size_from_dim(Input(INDICES).ndim()); if (N == 0) { // empty grad, nothing to do here, not even launching the kernel return true; } SparseAdagradKernel<SIndex><<< CAFFE_GET_BLOCKS(N), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( N, grad_slice_sz, epsilon_, Output(OUTPUT_PARAM)->template mutable_data<float>(), Output(OUTPUT_MOMENT_1)->template mutable_data<float>(), Input(INDICES).template data<SIndex>(), Input(GRAD).template data<float>(), Input(LR).template data<float>()); return true; } template <> template <typename SIndex> bool RowWiseSparseAdagradOp<float, CUDAContext>::DoRunWithType() { auto N = Input(GRAD).size(); if (N == 0) { // empty grad, nothing to do here, not even launching the kernel return true; } // size of the 1st dimension of the input gradient auto GRAD_M = Input(GRAD).dim32(0); auto GRAD_N = N / GRAD_M; // each thread block will handle multiple rows of the input and output RowWiseSparseAdagradKernel<<< min(GRAD_M, CAFFE_MAXIMUM_NUM_BLOCKS), CAFFE_CUDA_NUM_THREADS, 0, context_.cuda_stream()>>>( GRAD_M, GRAD_N, epsilon_, Output(OUTPUT_PARAM)->template mutable_data<float>(), Output(OUTPUT_MOMENT_1)->template mutable_data<float>(), Input(INDICES).template data<SIndex>(), Input(GRAD).template data<float>(), Input(LR).template data<float>()); return true; } REGISTER_CUDA_OPERATOR(Adagrad, AdagradOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR(SparseAdagrad, SparseAdagradOp<float, CUDAContext>); REGISTER_CUDA_OPERATOR( RowWiseSparseAdagrad, RowWiseSparseAdagradOp<float, CUDAContext>); }
1964469de19d403982eedc48385422f993687490.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/fill_diagonal_tensor_op.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using CUDADeviceContext = paddle::platform::CUDADeviceContext; template <typename T> __global__ void fill_diagonal_tensor_kernel(int64_t size, T *out_data, const T *fill_data, int64_t *strides, int64_t *matdim, int64_t offset, int64_t fill_dims0, int64_t fill_dims1) { int64_t i = blockIdx.x; auto sumoff = matdim[i] + offset; for (int64_t j = threadIdx.x; j < fill_dims1; j += blockDim.x) { auto fill_index = j * (strides[1] + strides[0]) + sumoff; if (fill_index < size) { out_data[fill_index] = fill_data[i * fill_dims1 + j]; } } } template <typename T> __global__ void fill_grad_kernel(int64_t size, T *out_data, int64_t *strides, int64_t *matdim, int64_t offset, int64_t fill_dims0, int64_t fill_dims1) { int64_t i = blockIdx.x; auto sumoff = matdim[i] + offset; for (int64_t j = threadIdx.x; j < fill_dims1; j += blockDim.x) { auto fill_index = j * (strides[1] + strides[0]) + sumoff; if (fill_index < size) { out_data[fill_index] = T(0); } } } template <typename T> class FillDiagonalTensorCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { #ifdef __HIPCC__ const int64_t kMaxBlockDim = 256; #else const int64_t kMaxBlockDim = 512; #endif auto *out = ctx.Output<framework::Tensor>("Out"); auto *srctensor = ctx.Input<framework::Tensor>("Y"); auto dim1 = ctx.Attr<int>("dim1"); auto dim2 = ctx.Attr<int>("dim2"); auto offset = ctx.Attr<int64_t>("offset"); auto *xin = ctx.Input<framework::Tensor>("X"); framework::TensorCopy(*xin, ctx.GetPlace(), out); T *out_data = out->mutable_data<T>(ctx.GetPlace()); const T *fill_data = srctensor->data<T>(); auto out_dims = out->dims(); auto matdims = srctensor->dims(); auto fill_dims = phi::flatten_to_2d(matdims, matdims.size() - 1); int64_t new_dims[2]; std::vector<int64_t> memory_block; memory_block.resize(2 + fill_dims[0]); int64_t *strides = &(memory_block[0]); int64_t *matdim = &(memory_block[2]); CalMatDims(out_dims, dim1, dim2, &offset, new_dims, strides, matdim); PADDLE_ENFORCE_EQ( new_dims[0], fill_dims[0], platform::errors::InvalidArgument("The dims should be %d x %d, but get " "%d x %d in fill tensor Y", new_dims[0], new_dims[1], fill_dims[0], fill_dims[1])); PADDLE_ENFORCE_EQ( new_dims[1], fill_dims[1], platform::errors::InvalidArgument("The dims should be %d x %d, but get " "%d x %d in fill tensor Y", new_dims[0], new_dims[1], fill_dims[0], fill_dims[1])); auto size = out->numel(); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); Tensor tensor_tmp; int64_t *memory_block_cu = tensor_tmp.mutable_data<int64_t>({2 + fill_dims[0]}, ctx.GetPlace()); const auto gpu_place = ctx.GetPlace(); memory::Copy(gpu_place, memory_block_cu, platform::CPUPlace(), memory_block.data(), sizeof(int64_t) * (2 + fill_dims[0]), stream); int64_t *strides_cu = &memory_block_cu[0], *matdim_cu = &memory_block_cu[2]; auto kGridDim = new_dims[0]; auto kBlockDim = ::min(int64_t(new_dims[1]), kMaxBlockDim); hipLaunchKernelGGL(( fill_diagonal_tensor_kernel<T>), dim3(kGridDim), dim3(kBlockDim), 0, stream, size, out_data, fill_data, strides_cu, matdim_cu, offset, fill_dims[0], fill_dims[1]); } }; template <typename T> class FillDiagonalTensorGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { #ifdef __HIPCC__ const int64_t kMaxBlockDim = 256; #else const int64_t kMaxBlockDim = 512; #endif auto *dx = ctx.Output<framework::Tensor>(framework::GradVarName("X")); auto *dout = ctx.Input<framework::Tensor>(framework::GradVarName("Out")); auto dim1 = ctx.Attr<int>("dim1"); auto dim2 = ctx.Attr<int>("dim2"); auto offset = ctx.Attr<int64_t>("offset"); auto matrows = 1; if (dx) { auto *data = dx->mutable_data<T>(ctx.GetPlace()); auto dx_dims = dx->dims(); framework::TensorCopy(*dout, ctx.GetPlace(), dx); for (int i = 0; i < dx_dims.size(); i++) { if (i != dim1 && i != dim2) { matrows *= dx_dims[i]; } } int64_t new_dims[2]; std::vector<int64_t> memory_block; memory_block.resize(2 + matrows); int64_t *strides = &memory_block[0]; int64_t *matdim = &memory_block[2]; CalMatDims(dx_dims, dim1, dim2, &offset, new_dims, strides, matdim); auto size = dx->numel(); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); Tensor tensor_tmp; int64_t *memory_block_cu = tensor_tmp.mutable_data<int64_t>({2 + matrows}, ctx.GetPlace()); const auto gpu_place = ctx.GetPlace(); memory::Copy(gpu_place, memory_block_cu, platform::CPUPlace(), memory_block.data(), sizeof(int64_t) * (2 + matrows), stream); int64_t *strides_cu = &memory_block_cu[0], *matdim_cu = &memory_block_cu[2]; auto kGridDim = new_dims[0]; auto kBlockDim = ::min(int64_t(new_dims[1]), kMaxBlockDim); hipLaunchKernelGGL(( fill_grad_kernel<T>), dim3(kGridDim), dim3(kBlockDim), 0, stream, size, data, strides_cu, matdim_cu, offset, new_dims[0], new_dims[1]); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( fill_diagonal_tensor, ops::FillDiagonalTensorCUDAKernel<float>, ops::FillDiagonalTensorCUDAKernel<double>, ops::FillDiagonalTensorCUDAKernel<plat::float16>, ops::FillDiagonalTensorCUDAKernel<int>, ops::FillDiagonalTensorCUDAKernel<int64_t>, ops::FillDiagonalTensorCUDAKernel<int8_t>, ops::FillDiagonalTensorCUDAKernel<uint8_t>, ops::FillDiagonalTensorCUDAKernel<paddle::platform::complex<float>>, ops::FillDiagonalTensorCUDAKernel<paddle::platform::complex<double>>, ops::FillDiagonalTensorCUDAKernel<bool>); REGISTER_OP_CUDA_KERNEL( fill_diagonal_tensor_grad, ops::FillDiagonalTensorGradCUDAKernel<float>, ops::FillDiagonalTensorGradCUDAKernel<double>, ops::FillDiagonalTensorGradCUDAKernel<int>, ops::FillDiagonalTensorGradCUDAKernel<int64_t>, ops::FillDiagonalTensorGradCUDAKernel<plat::float16>, ops::FillDiagonalTensorGradCUDAKernel<int8_t>, ops::FillDiagonalTensorGradCUDAKernel<uint8_t>, ops::FillDiagonalTensorGradCUDAKernel<paddle::platform::complex<float>>, ops::FillDiagonalTensorGradCUDAKernel<paddle::platform::complex<double>>, ops::FillDiagonalTensorGradCUDAKernel<bool>);
1964469de19d403982eedc48385422f993687490.cu
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "paddle/fluid/operators/fill_diagonal_tensor_op.h" namespace paddle { namespace operators { using Tensor = framework::Tensor; using CUDADeviceContext = paddle::platform::CUDADeviceContext; template <typename T> __global__ void fill_diagonal_tensor_kernel(int64_t size, T *out_data, const T *fill_data, int64_t *strides, int64_t *matdim, int64_t offset, int64_t fill_dims0, int64_t fill_dims1) { int64_t i = blockIdx.x; auto sumoff = matdim[i] + offset; for (int64_t j = threadIdx.x; j < fill_dims1; j += blockDim.x) { auto fill_index = j * (strides[1] + strides[0]) + sumoff; if (fill_index < size) { out_data[fill_index] = fill_data[i * fill_dims1 + j]; } } } template <typename T> __global__ void fill_grad_kernel(int64_t size, T *out_data, int64_t *strides, int64_t *matdim, int64_t offset, int64_t fill_dims0, int64_t fill_dims1) { int64_t i = blockIdx.x; auto sumoff = matdim[i] + offset; for (int64_t j = threadIdx.x; j < fill_dims1; j += blockDim.x) { auto fill_index = j * (strides[1] + strides[0]) + sumoff; if (fill_index < size) { out_data[fill_index] = T(0); } } } template <typename T> class FillDiagonalTensorCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { #ifdef __HIPCC__ const int64_t kMaxBlockDim = 256; #else const int64_t kMaxBlockDim = 512; #endif auto *out = ctx.Output<framework::Tensor>("Out"); auto *srctensor = ctx.Input<framework::Tensor>("Y"); auto dim1 = ctx.Attr<int>("dim1"); auto dim2 = ctx.Attr<int>("dim2"); auto offset = ctx.Attr<int64_t>("offset"); auto *xin = ctx.Input<framework::Tensor>("X"); framework::TensorCopy(*xin, ctx.GetPlace(), out); T *out_data = out->mutable_data<T>(ctx.GetPlace()); const T *fill_data = srctensor->data<T>(); auto out_dims = out->dims(); auto matdims = srctensor->dims(); auto fill_dims = phi::flatten_to_2d(matdims, matdims.size() - 1); int64_t new_dims[2]; std::vector<int64_t> memory_block; memory_block.resize(2 + fill_dims[0]); int64_t *strides = &(memory_block[0]); int64_t *matdim = &(memory_block[2]); CalMatDims(out_dims, dim1, dim2, &offset, new_dims, strides, matdim); PADDLE_ENFORCE_EQ( new_dims[0], fill_dims[0], platform::errors::InvalidArgument("The dims should be %d x %d, but get " "%d x %d in fill tensor Y", new_dims[0], new_dims[1], fill_dims[0], fill_dims[1])); PADDLE_ENFORCE_EQ( new_dims[1], fill_dims[1], platform::errors::InvalidArgument("The dims should be %d x %d, but get " "%d x %d in fill tensor Y", new_dims[0], new_dims[1], fill_dims[0], fill_dims[1])); auto size = out->numel(); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); Tensor tensor_tmp; int64_t *memory_block_cu = tensor_tmp.mutable_data<int64_t>({2 + fill_dims[0]}, ctx.GetPlace()); const auto gpu_place = ctx.GetPlace(); memory::Copy(gpu_place, memory_block_cu, platform::CPUPlace(), memory_block.data(), sizeof(int64_t) * (2 + fill_dims[0]), stream); int64_t *strides_cu = &memory_block_cu[0], *matdim_cu = &memory_block_cu[2]; auto kGridDim = new_dims[0]; auto kBlockDim = std::min(int64_t(new_dims[1]), kMaxBlockDim); fill_diagonal_tensor_kernel<T><<<kGridDim, kBlockDim, 0, stream>>>( size, out_data, fill_data, strides_cu, matdim_cu, offset, fill_dims[0], fill_dims[1]); } }; template <typename T> class FillDiagonalTensorGradCUDAKernel : public framework::OpKernel<T> { public: void Compute(const framework::ExecutionContext &ctx) const override { #ifdef __HIPCC__ const int64_t kMaxBlockDim = 256; #else const int64_t kMaxBlockDim = 512; #endif auto *dx = ctx.Output<framework::Tensor>(framework::GradVarName("X")); auto *dout = ctx.Input<framework::Tensor>(framework::GradVarName("Out")); auto dim1 = ctx.Attr<int>("dim1"); auto dim2 = ctx.Attr<int>("dim2"); auto offset = ctx.Attr<int64_t>("offset"); auto matrows = 1; if (dx) { auto *data = dx->mutable_data<T>(ctx.GetPlace()); auto dx_dims = dx->dims(); framework::TensorCopy(*dout, ctx.GetPlace(), dx); for (int i = 0; i < dx_dims.size(); i++) { if (i != dim1 && i != dim2) { matrows *= dx_dims[i]; } } int64_t new_dims[2]; std::vector<int64_t> memory_block; memory_block.resize(2 + matrows); int64_t *strides = &memory_block[0]; int64_t *matdim = &memory_block[2]; CalMatDims(dx_dims, dim1, dim2, &offset, new_dims, strides, matdim); auto size = dx->numel(); auto &dev_ctx = ctx.template device_context<platform::CUDADeviceContext>(); auto stream = dev_ctx.stream(); Tensor tensor_tmp; int64_t *memory_block_cu = tensor_tmp.mutable_data<int64_t>({2 + matrows}, ctx.GetPlace()); const auto gpu_place = ctx.GetPlace(); memory::Copy(gpu_place, memory_block_cu, platform::CPUPlace(), memory_block.data(), sizeof(int64_t) * (2 + matrows), stream); int64_t *strides_cu = &memory_block_cu[0], *matdim_cu = &memory_block_cu[2]; auto kGridDim = new_dims[0]; auto kBlockDim = std::min(int64_t(new_dims[1]), kMaxBlockDim); fill_grad_kernel<T><<<kGridDim, kBlockDim, 0, stream>>>( size, data, strides_cu, matdim_cu, offset, new_dims[0], new_dims[1]); } } }; } // namespace operators } // namespace paddle namespace ops = paddle::operators; namespace plat = paddle::platform; REGISTER_OP_CUDA_KERNEL( fill_diagonal_tensor, ops::FillDiagonalTensorCUDAKernel<float>, ops::FillDiagonalTensorCUDAKernel<double>, ops::FillDiagonalTensorCUDAKernel<plat::float16>, ops::FillDiagonalTensorCUDAKernel<int>, ops::FillDiagonalTensorCUDAKernel<int64_t>, ops::FillDiagonalTensorCUDAKernel<int8_t>, ops::FillDiagonalTensorCUDAKernel<uint8_t>, ops::FillDiagonalTensorCUDAKernel<paddle::platform::complex<float>>, ops::FillDiagonalTensorCUDAKernel<paddle::platform::complex<double>>, ops::FillDiagonalTensorCUDAKernel<bool>); REGISTER_OP_CUDA_KERNEL( fill_diagonal_tensor_grad, ops::FillDiagonalTensorGradCUDAKernel<float>, ops::FillDiagonalTensorGradCUDAKernel<double>, ops::FillDiagonalTensorGradCUDAKernel<int>, ops::FillDiagonalTensorGradCUDAKernel<int64_t>, ops::FillDiagonalTensorGradCUDAKernel<plat::float16>, ops::FillDiagonalTensorGradCUDAKernel<int8_t>, ops::FillDiagonalTensorGradCUDAKernel<uint8_t>, ops::FillDiagonalTensorGradCUDAKernel<paddle::platform::complex<float>>, ops::FillDiagonalTensorGradCUDAKernel<paddle::platform::complex<double>>, ops::FillDiagonalTensorGradCUDAKernel<bool>);
e7b365af574d296bd2fbdad950faf2dfb17c0873.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "lbp.hpp" #include "opencv2/core/cuda/vec_traits.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" namespace cv { namespace gpu { namespace cudev { namespace lbp { struct LBP { __host__ __device__ __forceinline__ LBP() {} __device__ __forceinline__ int operator() (const int* integral, int ty, int fh, int fw, int& shift) const { int anchors[9]; anchors[0] = integral[ty]; anchors[1] = integral[ty + fw]; anchors[0] -= anchors[1]; anchors[2] = integral[ty + fw * 2]; anchors[1] -= anchors[2]; anchors[2] -= integral[ty + fw * 3]; ty += fh; anchors[3] = integral[ty]; anchors[4] = integral[ty + fw]; anchors[3] -= anchors[4]; anchors[5] = integral[ty + fw * 2]; anchors[4] -= anchors[5]; anchors[5] -= integral[ty + fw * 3]; anchors[0] -= anchors[3]; anchors[1] -= anchors[4]; anchors[2] -= anchors[5]; // 0 - 2 contains s0 - s2 ty += fh; anchors[6] = integral[ty]; anchors[7] = integral[ty + fw]; anchors[6] -= anchors[7]; anchors[8] = integral[ty + fw * 2]; anchors[7] -= anchors[8]; anchors[8] -= integral[ty + fw * 3]; anchors[3] -= anchors[6]; anchors[4] -= anchors[7]; anchors[5] -= anchors[8]; // 3 - 5 contains s3 - s5 anchors[0] -= anchors[4]; anchors[1] -= anchors[4]; anchors[2] -= anchors[4]; anchors[3] -= anchors[4]; anchors[5] -= anchors[4]; int response = (~(anchors[0] >> 31)) & 4; response |= (~(anchors[1] >> 31)) & 2;; response |= (~(anchors[2] >> 31)) & 1; shift = (~(anchors[5] >> 31)) & 16; shift |= (~(anchors[3] >> 31)) & 1; ty += fh; anchors[0] = integral[ty]; anchors[1] = integral[ty + fw]; anchors[0] -= anchors[1]; anchors[2] = integral[ty + fw * 2]; anchors[1] -= anchors[2]; anchors[2] -= integral[ty + fw * 3]; anchors[6] -= anchors[0]; anchors[7] -= anchors[1]; anchors[8] -= anchors[2]; // 0 -2 contains s6 - s8 anchors[6] -= anchors[4]; anchors[7] -= anchors[4]; anchors[8] -= anchors[4]; shift |= (~(anchors[6] >> 31)) & 2; shift |= (~(anchors[7] >> 31)) & 4; shift |= (~(anchors[8] >> 31)) & 8; return response; } }; template<typename Pr> __global__ void disjoin(int4* candidates, int4* objects, unsigned int n, int groupThreshold, float grouping_eps, unsigned int* nclasses) { unsigned int tid = threadIdx.x; extern __shared__ int sbuff[]; int* labels = sbuff; int* rrects = sbuff + n; Pr predicate(grouping_eps); partition(candidates, n, labels, predicate); rrects[tid * 4 + 0] = 0; rrects[tid * 4 + 1] = 0; rrects[tid * 4 + 2] = 0; rrects[tid * 4 + 3] = 0; __syncthreads(); int cls = labels[tid]; Emulation::smem::atomicAdd((rrects + cls * 4 + 0), candidates[tid].x); Emulation::smem::atomicAdd((rrects + cls * 4 + 1), candidates[tid].y); Emulation::smem::atomicAdd((rrects + cls * 4 + 2), candidates[tid].z); Emulation::smem::atomicAdd((rrects + cls * 4 + 3), candidates[tid].w); __syncthreads(); labels[tid] = 0; __syncthreads(); Emulation::smem::atomicInc((unsigned int*)labels + cls, n); __syncthreads(); *nclasses = 0; int active = labels[tid]; if (active) { int* r1 = rrects + tid * 4; float s = 1.f / active; r1[0] = saturate_cast<int>(r1[0] * s); r1[1] = saturate_cast<int>(r1[1] * s); r1[2] = saturate_cast<int>(r1[2] * s); r1[3] = saturate_cast<int>(r1[3] * s); } __syncthreads(); if (active && active >= groupThreshold) { int* r1 = rrects + tid * 4; int4 r_out = make_int4(r1[0], r1[1], r1[2], r1[3]); int aidx = Emulation::smem::atomicInc(nclasses, n); objects[aidx] = r_out; } } void connectedConmonents(PtrStepSz<int4> candidates, int ncandidates, PtrStepSz<int4> objects, int groupThreshold, float grouping_eps, unsigned int* nclasses) { if (!ncandidates) return; int block = ncandidates; int smem = block * ( sizeof(int) + sizeof(int4) ); hipLaunchKernelGGL(( disjoin<InSameComponint>), dim3(1), dim3(block), smem, 0, candidates, objects, ncandidates, groupThreshold, grouping_eps, nclasses); cudaSafeCall( hipGetLastError() ); } struct Cascade { __host__ __device__ __forceinline__ Cascade(const Stage* _stages, int _nstages, const ClNode* _nodes, const float* _leaves, const int* _subsets, const uchar4* _features, int _subsetSize) : stages(_stages), nstages(_nstages), nodes(_nodes), leaves(_leaves), subsets(_subsets), features(_features), subsetSize(_subsetSize){} __device__ __forceinline__ bool operator() (int y, int x, int* integral, const int pitch) const { int current_node = 0; int current_leave = 0; for (int s = 0; s < nstages; ++s) { float sum = 0; Stage stage = stages[s]; for (int t = 0; t < stage.ntrees; t++) { ClNode node = nodes[current_node]; uchar4 feature = features[node.featureIdx]; int shift; int c = evaluator(integral, (y + feature.y) * pitch + x + feature.x, feature.w * pitch, feature.z, shift); int idx = (subsets[ current_node * subsetSize + c] & ( 1 << shift)) ? current_leave : current_leave + 1; sum += leaves[idx]; current_node += 1; current_leave += 2; } if (sum < stage.threshold) return false; } return true; } const Stage* stages; const int nstages; const ClNode* nodes; const float* leaves; const int* subsets; const uchar4* features; const int subsetSize; const LBP evaluator; }; // stepShift, scale, width_k, sum_prev => y = sum_prev + tid_k / width_k, x = tid_k - tid_k / width_k __global__ void lbp_cascade(const Cascade cascade, int frameW, int frameH, int windowW, int windowH, float scale, const float factor, const int total, int* integral, const int pitch, PtrStepSz<int4> objects, unsigned int* classified) { int ftid = blockIdx.x * blockDim.x + threadIdx.x; if (ftid >= total) return; int step = (scale <= 2.f); int windowsForLine = (__float2int_rn( __fdividef(frameW, scale)) - windowW) >> step; int stotal = windowsForLine * ( (__float2int_rn( __fdividef(frameH, scale)) - windowH) >> step); int wshift = 0; int scaleTid = ftid; while (scaleTid >= stotal) { scaleTid -= stotal; wshift += __float2int_rn(__fdividef(frameW, scale)) + 1; scale *= factor; step = (scale <= 2.f); windowsForLine = ( ((__float2int_rn(__fdividef(frameW, scale)) - windowW) >> step)); stotal = windowsForLine * ( (__float2int_rn(__fdividef(frameH, scale)) - windowH) >> step); } int y = __fdividef(scaleTid, windowsForLine); int x = scaleTid - y * windowsForLine; x <<= step; y <<= step; if (cascade(y, x + wshift, integral, pitch)) { if(x >= __float2int_rn(__fdividef(frameW, scale)) - windowW) return; int4 rect; rect.x = __float2int_rn(x * scale); rect.y = __float2int_rn(y * scale); rect.z = __float2int_rn(windowW * scale); rect.w = __float2int_rn(windowH * scale); int res = atomicInc(classified, (unsigned int)objects.cols); objects(0, res) = rect; } } void classifyPyramid(int frameW, int frameH, int windowW, int windowH, float initialScale, float factor, int workAmount, const PtrStepSzb& mstages, const int nstages, const PtrStepSzi& mnodes, const PtrStepSzf& mleaves, const PtrStepSzi& msubsets, const PtrStepSzb& mfeatures, const int subsetSize, PtrStepSz<int4> objects, unsigned int* classified, PtrStepSzi integral) { const int block = 128; int grid = divUp(workAmount, block); hipFuncSetCacheConfig(lbp_cascade, hipFuncCachePreferL1); Cascade cascade((Stage*)mstages.ptr(), nstages, (ClNode*)mnodes.ptr(), mleaves.ptr(), msubsets.ptr(), (uchar4*)mfeatures.ptr(), subsetSize); hipLaunchKernelGGL(( lbp_cascade), dim3(grid), dim3(block), 0, 0, cascade, frameW, frameH, windowW, windowH, initialScale, factor, workAmount, integral.ptr(), (int)integral.step / sizeof(int), objects, classified); } } }}} #endif /* CUDA_DISABLER */
e7b365af574d296bd2fbdad950faf2dfb17c0873.cu
/*M/////////////////////////////////////////////////////////////////////////////////////// // // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. // // By downloading, copying, installing or using the software you agree to this license. // If you do not agree to this license, do not download, install, // copy or use the software. // // // License Agreement // For Open Source Computer Vision Library // // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. // Copyright (C) 2009, Willow Garage Inc., all rights reserved. // Third party copyrights are property of their respective owners. // // Redistribution and use in source and binary forms, with or without modification, // are permitted provided that the following conditions are met: // // * Redistribution's of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // * Redistribution's in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // * The name of the copyright holders may not be used to endorse or promote products // derived from this software without specific prior written permission. // // This software is provided by the copyright holders and contributors "as is" and // any express or implied warranties, including, but not limited to, the implied // warranties of merchantability and fitness for a particular purpose are disclaimed. // In no event shall the Intel Corporation or contributors be liable for any direct, // indirect, incidental, special, exemplary, or consequential damages // (including, but not limited to, procurement of substitute goods or services; // loss of use, data, or profits; or business interruption) however caused // and on any theory of liability, whether in contract, strict liability, // or tort (including negligence or otherwise) arising in any way out of // the use of this software, even if advised of the possibility of such damage. // //M*/ #if !defined CUDA_DISABLER #include "lbp.hpp" #include "opencv2/core/cuda/vec_traits.hpp" #include "opencv2/core/cuda/saturate_cast.hpp" namespace cv { namespace gpu { namespace cudev { namespace lbp { struct LBP { __host__ __device__ __forceinline__ LBP() {} __device__ __forceinline__ int operator() (const int* integral, int ty, int fh, int fw, int& shift) const { int anchors[9]; anchors[0] = integral[ty]; anchors[1] = integral[ty + fw]; anchors[0] -= anchors[1]; anchors[2] = integral[ty + fw * 2]; anchors[1] -= anchors[2]; anchors[2] -= integral[ty + fw * 3]; ty += fh; anchors[3] = integral[ty]; anchors[4] = integral[ty + fw]; anchors[3] -= anchors[4]; anchors[5] = integral[ty + fw * 2]; anchors[4] -= anchors[5]; anchors[5] -= integral[ty + fw * 3]; anchors[0] -= anchors[3]; anchors[1] -= anchors[4]; anchors[2] -= anchors[5]; // 0 - 2 contains s0 - s2 ty += fh; anchors[6] = integral[ty]; anchors[7] = integral[ty + fw]; anchors[6] -= anchors[7]; anchors[8] = integral[ty + fw * 2]; anchors[7] -= anchors[8]; anchors[8] -= integral[ty + fw * 3]; anchors[3] -= anchors[6]; anchors[4] -= anchors[7]; anchors[5] -= anchors[8]; // 3 - 5 contains s3 - s5 anchors[0] -= anchors[4]; anchors[1] -= anchors[4]; anchors[2] -= anchors[4]; anchors[3] -= anchors[4]; anchors[5] -= anchors[4]; int response = (~(anchors[0] >> 31)) & 4; response |= (~(anchors[1] >> 31)) & 2;; response |= (~(anchors[2] >> 31)) & 1; shift = (~(anchors[5] >> 31)) & 16; shift |= (~(anchors[3] >> 31)) & 1; ty += fh; anchors[0] = integral[ty]; anchors[1] = integral[ty + fw]; anchors[0] -= anchors[1]; anchors[2] = integral[ty + fw * 2]; anchors[1] -= anchors[2]; anchors[2] -= integral[ty + fw * 3]; anchors[6] -= anchors[0]; anchors[7] -= anchors[1]; anchors[8] -= anchors[2]; // 0 -2 contains s6 - s8 anchors[6] -= anchors[4]; anchors[7] -= anchors[4]; anchors[8] -= anchors[4]; shift |= (~(anchors[6] >> 31)) & 2; shift |= (~(anchors[7] >> 31)) & 4; shift |= (~(anchors[8] >> 31)) & 8; return response; } }; template<typename Pr> __global__ void disjoin(int4* candidates, int4* objects, unsigned int n, int groupThreshold, float grouping_eps, unsigned int* nclasses) { unsigned int tid = threadIdx.x; extern __shared__ int sbuff[]; int* labels = sbuff; int* rrects = sbuff + n; Pr predicate(grouping_eps); partition(candidates, n, labels, predicate); rrects[tid * 4 + 0] = 0; rrects[tid * 4 + 1] = 0; rrects[tid * 4 + 2] = 0; rrects[tid * 4 + 3] = 0; __syncthreads(); int cls = labels[tid]; Emulation::smem::atomicAdd((rrects + cls * 4 + 0), candidates[tid].x); Emulation::smem::atomicAdd((rrects + cls * 4 + 1), candidates[tid].y); Emulation::smem::atomicAdd((rrects + cls * 4 + 2), candidates[tid].z); Emulation::smem::atomicAdd((rrects + cls * 4 + 3), candidates[tid].w); __syncthreads(); labels[tid] = 0; __syncthreads(); Emulation::smem::atomicInc((unsigned int*)labels + cls, n); __syncthreads(); *nclasses = 0; int active = labels[tid]; if (active) { int* r1 = rrects + tid * 4; float s = 1.f / active; r1[0] = saturate_cast<int>(r1[0] * s); r1[1] = saturate_cast<int>(r1[1] * s); r1[2] = saturate_cast<int>(r1[2] * s); r1[3] = saturate_cast<int>(r1[3] * s); } __syncthreads(); if (active && active >= groupThreshold) { int* r1 = rrects + tid * 4; int4 r_out = make_int4(r1[0], r1[1], r1[2], r1[3]); int aidx = Emulation::smem::atomicInc(nclasses, n); objects[aidx] = r_out; } } void connectedConmonents(PtrStepSz<int4> candidates, int ncandidates, PtrStepSz<int4> objects, int groupThreshold, float grouping_eps, unsigned int* nclasses) { if (!ncandidates) return; int block = ncandidates; int smem = block * ( sizeof(int) + sizeof(int4) ); disjoin<InSameComponint><<<1, block, smem>>>(candidates, objects, ncandidates, groupThreshold, grouping_eps, nclasses); cudaSafeCall( cudaGetLastError() ); } struct Cascade { __host__ __device__ __forceinline__ Cascade(const Stage* _stages, int _nstages, const ClNode* _nodes, const float* _leaves, const int* _subsets, const uchar4* _features, int _subsetSize) : stages(_stages), nstages(_nstages), nodes(_nodes), leaves(_leaves), subsets(_subsets), features(_features), subsetSize(_subsetSize){} __device__ __forceinline__ bool operator() (int y, int x, int* integral, const int pitch) const { int current_node = 0; int current_leave = 0; for (int s = 0; s < nstages; ++s) { float sum = 0; Stage stage = stages[s]; for (int t = 0; t < stage.ntrees; t++) { ClNode node = nodes[current_node]; uchar4 feature = features[node.featureIdx]; int shift; int c = evaluator(integral, (y + feature.y) * pitch + x + feature.x, feature.w * pitch, feature.z, shift); int idx = (subsets[ current_node * subsetSize + c] & ( 1 << shift)) ? current_leave : current_leave + 1; sum += leaves[idx]; current_node += 1; current_leave += 2; } if (sum < stage.threshold) return false; } return true; } const Stage* stages; const int nstages; const ClNode* nodes; const float* leaves; const int* subsets; const uchar4* features; const int subsetSize; const LBP evaluator; }; // stepShift, scale, width_k, sum_prev => y = sum_prev + tid_k / width_k, x = tid_k - tid_k / width_k __global__ void lbp_cascade(const Cascade cascade, int frameW, int frameH, int windowW, int windowH, float scale, const float factor, const int total, int* integral, const int pitch, PtrStepSz<int4> objects, unsigned int* classified) { int ftid = blockIdx.x * blockDim.x + threadIdx.x; if (ftid >= total) return; int step = (scale <= 2.f); int windowsForLine = (__float2int_rn( __fdividef(frameW, scale)) - windowW) >> step; int stotal = windowsForLine * ( (__float2int_rn( __fdividef(frameH, scale)) - windowH) >> step); int wshift = 0; int scaleTid = ftid; while (scaleTid >= stotal) { scaleTid -= stotal; wshift += __float2int_rn(__fdividef(frameW, scale)) + 1; scale *= factor; step = (scale <= 2.f); windowsForLine = ( ((__float2int_rn(__fdividef(frameW, scale)) - windowW) >> step)); stotal = windowsForLine * ( (__float2int_rn(__fdividef(frameH, scale)) - windowH) >> step); } int y = __fdividef(scaleTid, windowsForLine); int x = scaleTid - y * windowsForLine; x <<= step; y <<= step; if (cascade(y, x + wshift, integral, pitch)) { if(x >= __float2int_rn(__fdividef(frameW, scale)) - windowW) return; int4 rect; rect.x = __float2int_rn(x * scale); rect.y = __float2int_rn(y * scale); rect.z = __float2int_rn(windowW * scale); rect.w = __float2int_rn(windowH * scale); int res = atomicInc(classified, (unsigned int)objects.cols); objects(0, res) = rect; } } void classifyPyramid(int frameW, int frameH, int windowW, int windowH, float initialScale, float factor, int workAmount, const PtrStepSzb& mstages, const int nstages, const PtrStepSzi& mnodes, const PtrStepSzf& mleaves, const PtrStepSzi& msubsets, const PtrStepSzb& mfeatures, const int subsetSize, PtrStepSz<int4> objects, unsigned int* classified, PtrStepSzi integral) { const int block = 128; int grid = divUp(workAmount, block); cudaFuncSetCacheConfig(lbp_cascade, cudaFuncCachePreferL1); Cascade cascade((Stage*)mstages.ptr(), nstages, (ClNode*)mnodes.ptr(), mleaves.ptr(), msubsets.ptr(), (uchar4*)mfeatures.ptr(), subsetSize); lbp_cascade<<<grid, block>>>(cascade, frameW, frameH, windowW, windowH, initialScale, factor, workAmount, integral.ptr(), (int)integral.step / sizeof(int), objects, classified); } } }}} #endif /* CUDA_DISABLER */
2a02ca62538086a955c1e67f420a44728887ab68.hip
// !!! This is a file automatically generated by hipify!!! #include <time.h> #include <math.h> #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <mpi.h> //device parameter// #define ndim 2 #define ip 2 #define kp 3 #define DPN 3 //device per note #define stepall 100 #define iprint 10 #define idata_3d 100000 //droplet parameter// #define nx 120 #define ny 120 #define nz 120 #define radd 30.0 //1 #define thick 5.0 #define tau_h 0.5 #define tau_l 0.05 #define tau_g 0.5 #define rho_l 1.0 #define rho_g 0.001 #define sigma 0.001 #define bo 100.0 //0 one bubble rising ,1 two bubble rising #define condition 0 //condition 1 #define distance_x 0.0 #define distance_z 10.0 #define radd_t 30.0 #define radd_b 30.0 //constant parameter// #define thita 10 #define dx 1.0 #define dt 1.0 #define q 19 __constant__ double eex[q]; __constant__ double eey[q]; __constant__ double eez[q]; __constant__ double wwt[q]; __constant__ int eet[q]; void parameter (double *beta,double *zeta,double *mobi,double *kappa,double *phic,double *gravity,double *ex_h,double *ey_h,double *ez_h,double *wt_h,int *et_h) { *zeta =(double)thick*dx; *beta =(double)12.0*sigma/(*zeta); *kappa=(double)(*beta)*(*zeta)*(*zeta)/8.0; *mobi =(double)0.02/(*beta); double omega=-cos(thita*M_PI/180.0); *phic =omega*pow(2.0*(*kappa)*(*beta),0.5); *gravity = bo*sigma/(rho_l-rho_g)/(2*radd)/(2*radd); //ex ex_h[ 0]= 0.0; ex_h[ 1]= 1.0; ex_h[ 2]=-1.0; ex_h[ 3]= 0.0; ex_h[ 4]= 0.0; ex_h[ 5]= 0.0; ex_h[ 6]= 0.0; ex_h[ 7]= 1.0; ex_h[ 8]=-1.0; ex_h[ 9]= 1.0; ex_h[10]=-1.0; ex_h[11]= 1.0; ex_h[12]=-1.0; ex_h[13]=-1.0; ex_h[14]= 1.0; ex_h[15]= 0.0; ex_h[16]= 0.0; ex_h[17]= 0.0; ex_h[18]= 0.0; //ey ey_h[ 0]= 0.0; ey_h[ 1]= 0.0; ey_h[ 2]= 0.0; ey_h[ 3]= 1.0; ey_h[ 4]=-1.0; ey_h[ 5]= 0.0; ey_h[ 6]= 0.0; ey_h[ 7]= 1.0; ey_h[ 8]=-1.0; ey_h[ 9]=-1.0; ey_h[10]= 1.0; ey_h[11]= 0.0; ey_h[12]= 0.0; ey_h[13]= 0.0; ey_h[14]= 0.0; ey_h[15]= 1.0; ey_h[16]=-1.0; ey_h[17]= 1.0; ey_h[18]=-1.0; //ez ez_h[ 0]= 0.0; ez_h[ 1]= 0.0; ez_h[ 2]= 0.0; ez_h[ 3]= 0.0; ez_h[ 4]= 0.0; ez_h[ 5]= 1.0; ez_h[ 6]=-1.0; ez_h[ 7]= 0.0; ez_h[ 8]= 0.0; ez_h[ 9]= 0.0; ez_h[10]= 0.0; ez_h[11]= 1.0; ez_h[12]=-1.0; ez_h[13]= 1.0; ez_h[14]=-1.0; ez_h[15]= 1.0; ez_h[16]=-1.0; ez_h[17]=-1.0; ez_h[18]= 1.0; //wt wt_h[ 0]=1.0/ 3.0; wt_h[ 1]=1.0/18.0; wt_h[ 2]=1.0/18.0; wt_h[ 3]=1.0/18.0; wt_h[ 4]=1.0/18.0; wt_h[ 5]=1.0/18.0; wt_h[ 6]=1.0/18.0; wt_h[ 7]=1.0/36.0; wt_h[ 8]=1.0/36.0; wt_h[ 9]=1.0/36.0; wt_h[10]=1.0/36.0; wt_h[11]=1.0/36.0; wt_h[12]=1.0/36.0; wt_h[13]=1.0/36.0; wt_h[14]=1.0/36.0; wt_h[15]=1.0/36.0; wt_h[16]=1.0/36.0; wt_h[17]=1.0/36.0; wt_h[18]=1.0/36.0; int l; for(l=0;l<q;l++) { et_h[l]=(nx/ip+4)*((ny+4)*(int)ez_h[l]+(int)ey_h[l])+(int)ex_h[l]; } } void initial_macro (double *c,double *m,double *b,double *p,double *u,double *v,double *w) { int i,j,k,index; double icent,jcent,kcent; for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<nz;k++){ index=nx*(k*ny+j)+i; c[index]=0.0; m[index]=0.0; b[index]=0.0; p[index]=0.0; u[index]=0.0; v[index]=0.0; w[index]=0.0; }}} icent=(double)(nx-1.0)/2.0; jcent=(double)(ny-1.0)/2.0; kcent=(double)(nz-1.0)/2.0; if(condition==1){ double icent_r=icent+0.5*distance_x; double icent_l=icent-0.5*distance_x; double kcent_b=50; double kcent_t=kcent_b+thick+(radd_t+radd_b)+distance_z; int mid =0.5*(distance_z+thick)+50+radd_b; double raddd=radd+thick/2.0+1.0; for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<mid;k++){ double rad=sqrt( (i-icent_l)*(i-icent_l)+(j-jcent)*(j-jcent)+(k-kcent_b)*(k-kcent_b)); index=nx*(k*ny+j)+i; c[index]=(double)0.5-(double)0.5*tanh(2.0*(radd_b-rad)/thick); }}} for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=mid;k<nz;k++){ double rad=sqrt( (i-icent_r)*(i-icent_r)+(j-jcent)*(j-jcent)+(k-kcent_t)*(k-kcent_t)); index=nx*(k*ny+j)+i; c[index]=(double)0.5-(double)0.5*tanh(2.0*(radd_t-rad)/thick); }}} } else{ for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<nz;k++){ double rad=sqrt( (i-icent)*(i-icent)+(j-jcent)*(j-jcent)+(k-kcent)*(k-kcent)); index=nx*(k*ny+j)+i; c[index]=(double)0.5-(double)0.5*tanh(2.0*(radd-rad)/thick); }}}} } void array_2D_do (double *phi,double *phi_do) { int i,j,k,index; int ii,jj,kk,iindex; int iside; int xd=nx/ip; jj=-1; kk=0; iside=0; for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; ii=i%xd; if(ii == 0){ jj=jj+1; } if(jj == ny){ kk=kk+1; jj=0; } if(kk == nz){ iside=iside+1; kk=0; } ii=ii+xd*iside; iindex=nx*(kk*ny+jj)+ii; phi_do[index]=phi[iindex]; } } } } void array_2D_undo (double *phi,double *phi_do) { int i,j,k,index; int ii,jj,kk,iindex; int iside; int xd=nx/ip; jj=-1; kk=0; iside=0; for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; ii=i%xd; if(ii == 0){ jj=jj+1; } if(jj == ny){ kk=kk+1; jj=0; } if(kk == nz){ iside=iside+1; kk=0; } ii=ii+xd*iside; iindex=nx*(kk*ny+jj)+ii; phi[iindex]=phi_do[index]; } } } } void array_1D_undo (double *phi,double *phi_do) { int i,k,index; int ii,kk,iindex; int iside; int xd=nx/ip; kk=0; iside=0; for(k=0;k<nz;k++){ for(i=0;i<nx;i++){ index=nx*k+i; ii=i%xd; if(ii == 0){ kk=kk+1; } if(kk == nz){ iside=iside+1; kk=0; } ii=ii+xd*iside; iindex=nx*kk+ii; phi[iindex]=phi_do[index]; } } } __device__ int index_3d (int i, int j,int k) { int ans=(nx/ip+4)*((ny+4)*k+j)+i; return ans; } __device__ int index_3d_x (int i, int j,int k) { int ans=(ny+4)*((nz/kp+4)*i+k)+j; return ans; } __device__ int index_4d (int i, int j,int k,int l) { int ans=(nx/ip+4)*((ny+4)*((nz/kp+4)*l+k)+j)+i; return ans; } __global__ void array_do( double *phi_d, double *phi) { int ii=threadIdx.x; int jj= blockIdx.x%ny; int kk= blockIdx.x/ny; int iindex =(nx/ip)*(kk*ny+jj)+ii; int i=threadIdx.x+2; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+2; int index=index_3d(i,j,k); phi[index]=phi_d[iindex]; } __global__ void array_undo( double *phi_d, double *phi) { int ii=threadIdx.x; int jj= blockIdx.x%ny; int kk= blockIdx.x/ny; int iindex =(nx/ip)*(kk*ny+jj)+ii; int i=threadIdx.x+2; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+2; int index=index_3d(i,j,k); phi_d[iindex]=phi[index]; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // boundary // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void boundary_ym( double *phi) { int i= blockIdx.x; int k=threadIdx.x; int distance=(ny)*(nx/ip+4); for(int j=0;j<2;j++){ int index =index_3d(i,j,k); phi[index]=phi[index+distance]; } for(int j=ny+2;j<ny+4;j++){ int index =index_3d(i,j,k); phi[index]=phi[index-distance]; } } __global__ void boundary_zm1( double *phi, double *t_phi ) { int k,index,index_t; int i= blockIdx.x; int j=threadIdx.x; k=2; index =index_3d(i,j,k); index_t =index_3d(i,j,1); t_phi[index_t]=phi[index]; k=nz/kp+1; index =index_3d(i,j,k); index_t =index_3d(i,j,2); t_phi[index_t]=phi[index]; } __global__ void boundary_xm1( double *phi, double *t_phi ) { int i,index,index_t; int j= blockIdx.x; int k=threadIdx.x; i=2; index =index_3d(i,j,k); index_t =index_3d_x(1,j,k); t_phi[index_t]=phi[index]; i=nx/ip+1; index =index_3d(i,j,k); index_t =index_3d_x(2,j,k); t_phi[index_t]=phi[index]; } __global__ void boundary_zm1_undo( double *phi, double *t_phi) { int k,index,index_t; int i= blockIdx.x; int j=threadIdx.x; k=1; index =index_3d(i,j,k); index_t =index_3d(i,j,0); phi[index]=t_phi[index_t]; k=nz/kp+2; index =index_3d(i,j,k); index_t =index_3d(i,j,3); phi[index]=t_phi[index_t]; } __global__ void boundary_xm1_undo( double *phi, double *t_phi) { int i,index,index_t; int j= blockIdx.x; int k=threadIdx.x; i=1; index =index_3d(i,j,k); index_t =index_3d_x(0,j,k); phi[index]=t_phi[index_t]; i=nx/ip+2; index =index_3d(i,j,k); index_t =index_3d_x(3,j,k); phi[index]=t_phi[index_t]; } __global__ void boundary_zm2( double *phi, double *t_phi ) { int k,l,index,index_t; int i= blockIdx.x; int j=threadIdx.x; for(l=0;l<2;l++){ k=2; index =index_3d(i,j,k+l); index_t =index_3d(i,j,2+l); t_phi[index_t]=phi[index]; k=nz/kp; index =index_3d(i,j,k+l); index_t =index_3d(i,j,4+l); t_phi[index_t]=phi[index]; } } __global__ void boundary_xm2( double *phi, double *t_phi ) { int i,l,index,index_t; int j= blockIdx.x; int k=threadIdx.x; for(l=0;l<2;l++){ i=2; index =index_3d(i+l,j,k); index_t =index_3d_x(2+l,j,k); t_phi[index_t]=phi[index]; i=nx/ip; index =index_3d(i+l,j,k); index_t =index_3d_x(4+l,j,k); t_phi[index_t]=phi[index]; } } __global__ void boundary_zm2_undo( double *phi, double *t_phi) { int k,l,index,index_t; int i= blockIdx.x; int j=threadIdx.x; for(l=0;l<2;l++){ k=0; index =index_3d(i,j,k+l); index_t =index_3d(i,j,0+l); phi[index]=t_phi[index_t]; k=nz/kp+2; index =index_3d(i,j,k+l); index_t =index_3d(i,j,6+l); phi[index]=t_phi[index_t]; } } __global__ void boundary_xm2_undo( double *phi, double *t_phi) { int i,l,index,index_t; int j= blockIdx.x; int k=threadIdx.x; for(l=0;l<2;l++){ i=0; index =index_3d(i+l,j,k); index_t =index_3d_x(0+l,j,k); phi[index]=t_phi[index_t]; i=nx/ip+2; index =index_3d(i+l,j,k); index_t =index_3d_x(6+l,j,k); phi[index]=t_phi[index_t]; } } __global__ void boundary_yd_bc( double *g,double *h) { int i= blockIdx.x+2; int j,index_l; int zd=nz/kp; int l=threadIdx.x; int distance=(ny)*(nx/ip+4); for(int k=2;k<zd+2;k=k+zd-1){ j=1; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l+distance]; h[index_l]=h[index_l+distance]; j=ny+2; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l-distance]; h[index_l]=h[index_l-distance]; } } __global__ void boundary_yd_bc_x( double *g,double *h) { int k= blockIdx.x+2; int j,index_l; int xd=nx/ip; int l=threadIdx.x; int distance=(ny)*(nx/ip+4); for(int i=2;i<xd+2;i=i+xd-1){ j=1; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l+distance]; h[index_l]=h[index_l+distance]; j=ny+2; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l-distance]; h[index_l]=h[index_l-distance]; } } __global__ void boundary_zd( double *phi,double *t_phi ) { int i= blockIdx.x+1; int j=threadIdx.x+1; int k,index_l,index_l_t; int xd=nx/ip; int l_top[5]={5,11,13,15,18}; int l_bot[5]={6,12,14,16,17}; for(int l=0;l<5;l++){ k=2; index_l =index_4d(i,j,k,l_bot[l]); index_l_t=((xd+4)*(1*(ny+4)+j)+i)*5+l;//k=1;q=5 t_phi[index_l_t]=phi[index_l]; k=nz/kp+1; index_l =index_4d(i,j,k,l_top[l]); index_l_t=((xd+4)*(2*(ny+4)+j)+i)*5+l;//k=2;q=5 t_phi[index_l_t]=phi[index_l]; } } __global__ void boundary_xd( double *phi,double *t_phi ) { int j= blockIdx.x+1; int k=threadIdx.x+1; int i,index_l,index_l_t; int zd=nz/kp; int l_right[5]={1,7, 9,11,14}; int l_left[5] ={2,8,10,12,13}; for(int l=0;l<5;l++){ i=2; index_l =index_4d(i,j,k,l_left[l]); index_l_t=((ny+4)*(1*(zd+4)+k)+j)*5+l;//k=1;q=5 t_phi[index_l_t]=phi[index_l]; i=nx/ip+1; index_l =index_4d(i,j,k,l_right[l]); index_l_t=((ny+4)*(2*(zd+4)+k)+j)*5+l;//k=2;q=5 t_phi[index_l_t]=phi[index_l]; } } __global__ void boundary_zd_undo( double *phi,double *t_phi) { int i= blockIdx.x+1; int j=threadIdx.x+1; int k,index_l,index_l_t; int xd=nx/ip; int l_top[5]={5,11,13,15,18}; int l_bot[5]={6,12,14,16,17}; for(int l=0;l<5;l++){ k=1; index_l =index_4d(i,j,k,l_top[l]); index_l_t=((xd+4)*(0*(ny+4)+j)+i)*5+l; phi[index_l]=t_phi[index_l_t]; k=nz/kp+2; index_l =index_4d(i,j,k,l_bot[l]); index_l_t=((xd+4)*(3*(ny+4)+j)+i)*5+l; phi[index_l]=t_phi[index_l_t]; } } __global__ void boundary_xd_undo( double *phi,double *t_phi) { int j= blockIdx.x+1; int k=threadIdx.x+1; int i,index_l,index_l_t; int zd=nz/kp; int l_right[5]={1,7, 9,11,14}; int l_left[5] ={2,8,10,12,13}; for(int l=0;l<5;l++){ i=1; index_l =index_4d(i,j,k,l_right[l]); index_l_t=((ny+4)*(0*(zd+4)+k)+j)*5+l; phi[index_l]=t_phi[index_l_t]; i=nx/ip+2; index_l =index_4d(i,j,k,l_left[l]); index_l_t=((ny+4)*(3*(zd+4)+k)+j)*5+l; phi[index_l]=t_phi[index_l_t]; } } __global__ void boundary_yd_in( double *g,double *h) { int i= blockIdx.x+3; int k=threadIdx.x+3; int j,index_l; int distance=(ny)*(nx/ip+4); for(int l=0;l<q;l++){ j=1; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l+distance]; h[index_l]=h[index_l+distance]; j=ny+2; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l-distance]; h[index_l]=h[index_l-distance]; } } __global__ void boundary_ym_bc( double *phi) { int i =threadIdx.x+2; int zd=nz/kp; int kk[4]= {2,3,zd,zd+1}; int distance=(ny)*(nx/ip+4); for (int t=0;t<4;t++){ int k=kk[t]; for (int j=0;j<2;j++){ int index=index_3d(i,j,k); phi[index]=phi[index+distance]; } for (int j=ny+2;j<ny+4;j++){ int index=index_3d(i,j,k); phi[index]=phi[index-distance]; }} } __global__ void boundary_ym_bc_x( double *phi) { int k =threadIdx.x+2; int xd=nx/ip; int ii[4]= {2,3,xd,xd+1}; int distance=(ny)*(xd+4); for (int t=0;t<4;t++){ int i=ii[t]; for (int j=0;j<2;j++){ int index=index_3d(i,j,k); phi[index]=phi[index+distance]; } for (int j=ny+2;j<ny+4;j++){ int index=index_3d(i,j,k); phi[index]=phi[index-distance]; }} } __global__ void boundary_ym_in( double *phi) { int i= blockIdx.x+4; int k=threadIdx.x+4; int distance=(ny)*(nx/ip+4); for(int j=0;j<2;j++){ int index=index_3d(i,j,k); phi[index]=phi[index+distance]; } for(int j=ny+2;j<ny+4;j++){ int index=index_3d(i,j,k); phi[index]=phi[index-distance]; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // gradient // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void gradient_cen ( double *gra_phi, double *phi) { const int i=threadIdx.x+2; const int j= blockIdx.x%ny+2; const int k= blockIdx.x/ny+2; const int index=index_3d(i,j,k); const double cs2_inv=3.0; double temp =0.0; double temp_x=0.0; double temp_y=0.0; double temp_z=0.0; for(int l=1;l<q;l=l+2){ double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; temp=2.0*wt*(phi[index+et]-phi[index-et]); temp_x=ex*temp+temp_x; temp_y=ey*temp+temp_y; temp_z=ez*temp+temp_z; } gra_phi[index_4d(i,j,k,0)]=temp_x*0.5*cs2_inv; gra_phi[index_4d(i,j,k,1)]=temp_y*0.5*cs2_inv; gra_phi[index_4d(i,j,k,2)]=temp_z*0.5*cs2_inv; } __device__ double grad_phie_c(double *phi,int index,int et) { double ans; ans=(phi[index+et]-phi[index-et])*0.5; return ans; } __device__ double grad_phie_m(double *phi,int index,int et) { double ans; ans=(-phi[index+2*et]+5.0*phi[index+et]-3.0*phi[index]-phi[index-et])*0.25; return ans; } __device__ double gradient_cen_x ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ex=eex[l]; double wt=wwt[l]; int et=eet[l]; ans=ex*2.0*wt*(phi[index+et]-phi[index-et])+ans; } ans=ans*0.5*cs2_inv; return ans; } __device__ double gradient_cen_y ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ey=eey[l]; double wt=wwt[l]; int et=eet[l]; ans=ey*2.0*wt*(phi[index+et]-phi[index-et])+ans; } ans=ans*0.5*cs2_inv; return ans; } __device__ double gradient_cen_z ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; ans=ez*2.0*wt*(phi[index+et]-phi[index-et])+ans; } ans=ans*0.5*cs2_inv; return ans; } __device__ double gradient_mix_x ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ex=eex[l]; double wt=wwt[l]; int et=eet[l]; ans=ex*wt*(-phi[index+2*et]+6.0*phi[index+et]-6.0*phi[index-et]+phi[index-2*et])+ans; } ans=ans*0.25*cs2_inv; return ans; } __device__ double gradient_mix_y ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ey=eey[l]; double wt=wwt[l]; int et=eet[l]; ans=ey*wt*(-phi[index+2*et]+6.0*phi[index+et]-6.0*phi[index-et]+phi[index-2*et])+ans; } ans=ans*0.25*cs2_inv; return ans; } __device__ double gradient_mix_z ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; ans=ez*wt*(-phi[index+2*et]+6.0*phi[index+et]-6.0*phi[index-et]+phi[index-2*et])+ans; } ans=ans*0.25*cs2_inv; return ans; } __device__ double laplace_phi (double *phi,int index) { double ans=0.0; double phi_index=phi[index]; double cs2_inv =3.0; double dt_inv=1./dt; for(int l=1;l<q;l=l+2) { double wt=wwt[l]; int et=eet[l]; ans=2.0*wt*(phi[index+et]-2.0*phi_index+phi[index-et])+ans; } ans=ans*cs2_inv*dt_inv; return ans; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // chemical mu // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void chemical(double *c,double *m,double kappa,double beta ) { int i= threadIdx.x+2; int j=blockIdx.x%ny+2; int k=blockIdx.x/ny+2; int index=index_3d(i,j,k); double cl=c[index]; m[index]=beta*(4.0*cl*cl*cl-6.0*cl*cl+2.0*cl)-kappa*laplace_phi( c,index ); } __global__ void chemical_bc( double *c,double *m,double kappa,double beta ) { int i=threadIdx.x+2; int j=blockIdx.x +2; int zd=nz/kp; int kk[4]= {2,3,zd,zd+1}; for (int t=0;t<4;t++){ int k=kk[t]; int index=index_3d(i,j,k); double cl=c[index]; m[index]=beta*(4.0*cl*cl*cl-6.0*cl*cl+2.0*cl)-kappa*laplace_phi( c,index ); } } __global__ void chemical_bc_x( double *c,double *m,double kappa,double beta ) { int k=threadIdx.x+4; int j=blockIdx.x +2; int xd=nx/ip; int ii[4]= {2,3,xd,xd+1}; for (int t=0;t<4;t++){ int i=ii[t]; int index=index_3d(i,j,k); double cl=c[index]; m[index]=beta*(4.0*cl*cl*cl-6.0*cl*cl+2.0*cl)-kappa*laplace_phi( c,index ); } } __global__ void chemical_in( double *c,double *m,double kappa,double beta ) { int i=threadIdx.x+4; int j=blockIdx.x%ny+2; int k=blockIdx.x/ny+4; int index=index_3d(i,j,k); double cl=c[index]; m[index]=beta*(4.0*cl*cl*cl-6.0*cl*cl+2.0*cl)-kappa*laplace_phi( c,index ); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // eq collision // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void eq_collision(double *g,double *h,double *c,double *m,double *p,double gravity,double *gra_c, double *gra_m,double *u,double *v,double *w,double mobi) { int i=threadIdx.x+2; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+2; int index=index_3d(i,j,k); double cs2_inv =3.0; const double cs2=1.0/cs2_inv; double uu=u[index]; double vv=v[index]; double ww=w[index]; double cc=c[index]; double rr=cc*rho_l+(1.0-cc)*rho_g; // double tt=cc*tau_l+(1.0-cc)*tau_g; const double rr_inv=1.0/rr; double pp=p[index]; double dr = rho_l-rho_g; double gr_cx_c=gra_c[index_4d(i,j,k,0)]; double gr_cy_c=gra_c[index_4d(i,j,k,1)]; double gr_cz_c=gra_c[index_4d(i,j,k,2)]; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double gr_px_c=gradient_cen_x ( p,index ); double gr_py_c=gradient_cen_y ( p,index ); double gr_pz_c=gradient_cen_z ( p,index ); double gr_cx_m=gradient_mix_x ( c,index ); double gr_cy_m=gradient_mix_y ( c,index ); double gr_cz_m=gradient_mix_z ( c,index ); double gr_mx_m=gradient_mix_x ( m,index ); double gr_my_m=gradient_mix_y ( m,index ); double gr_mz_m=gradient_mix_z ( m,index ); double gr_px_m=gradient_mix_x ( p,index ); double gr_py_m=gradient_mix_y ( p,index ); double gr_pz_m=gradient_mix_z ( p,index ); double lap_mu =laplace_phi( m,index ); double udotu=uu*uu+vv*vv+ww*ww; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; double edotu=ex*uu+ey*vv+ez*ww; double uugly=edotu*cs2_inv+edotu*edotu*0.5*cs2_inv*cs2_inv-udotu*0.5*cs2_inv; double gamma=wt*(1.0+uugly); double u_et=u[index+et]; double v_et=v[index+et]; double w_et=w[index+et]; double lap_mu_et=laplace_phi( m,index+et ); double udotu_et=u_et*u_et+v_et*v_et+w_et*w_et; double edotu_et=ex*u_et+ey*v_et+ez*w_et; double uugly_et=edotu_et*cs2_inv+edotu_et*edotu_et*0.5*cs2_inv*cs2_inv-udotu_et*0.5*cs2_inv; double gamma_et=wt*(1.0+uugly_et); /////////////////////////////////////////////////////// double geq_t=wt*(pp+rr*cs2*uugly);//geq double heq_t=cc*gamma;//heq /////////////////////////////////////////////////////// double temp_cc = grad_phie_c( c,index,et ) - ( uu * gr_cx_c + vv * gr_cy_c + ww * gr_cz_c ); double temp_mc = grad_phie_c( m,index,et ) - ( uu * gr_mx_c + vv * gr_my_c + ww * gr_mz_c ); double temp_pc = grad_phie_c( p,index,et ) - ( uu * gr_px_c + vv * gr_py_c + ww * gr_pz_c ); double temp_cm = grad_phie_m( c,index,et ) - ( uu * gr_cx_m + vv * gr_cy_m + ww * gr_cz_m ); double temp_mm = grad_phie_m( m,index,et ) - ( uu * gr_mx_m + vv * gr_my_m + ww * gr_mz_m ); double temp_pm = grad_phie_m( p,index,et ) - ( uu * gr_px_m + vv * gr_py_m + ww * gr_pz_m ); double temp_z = ez*gravity-ww*gravity; /////////////////////////////////////////////////////// double temp_gc = cs2*wt*uugly*temp_cc*dr-(cc*temp_mc+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hc = temp_cc-cc*rr_inv*cs2_inv*(temp_pc+cc*temp_mc+(rr-rho_l)*temp_z); geq_t=geq_t-0.5*temp_gc;//geq_bar heq_t=heq_t-0.5*gamma*temp_hc;//heq_bar /////////////////////////////////////////////////////// double temp_gm = cs2*wt*uugly*temp_cm*dr-(cc*temp_mm+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hm = temp_cm-cc*rr_inv*cs2_inv*(temp_pm+cc*temp_mm+(rr-rho_l)*temp_z); temp_hm = 0.5*dt*mobi*( gamma*lap_mu + gamma_et*lap_mu_et )+temp_hm*gamma; ////////////////////////collision////////////////////////////// g[index_l] = geq_t+temp_gm; h[index_l] = heq_t+temp_hm; } } __global__ void eq_collision_bc(double *g,double *h,double *c,double *m,double *p,double gravity,double *gra_c, double *gra_m,double *u,double *v,double *w,double mobi) { int i=threadIdx.x+2; int j= blockIdx.x+2; int zd=nz/kp; double cs2_inv =3.0; double cs2=1.0/cs2_inv; double dr = rho_l-rho_g; for(int k=2;k<zd+2;k=k+zd-1) { int index=index_3d(i,j,k); double uu=u[index]; double vv=v[index]; double ww=w[index]; double cc=c[index]; double ceq=cc; if (cc < 0)ceq=0; else if(cc > 1)ceq=1; else ceq=cc; double rr=cc*rho_l+(1.0-cc)*rho_g; double tt=cc*tau_l+(1.0-cc)*tau_g; double rr_inv=1.0/rr; double pp=p[index]; double gr_cx_c=gra_c[index_4d(i,j,k,0)]; double gr_cy_c=gra_c[index_4d(i,j,k,1)]; double gr_cz_c=gra_c[index_4d(i,j,k,2)]; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double gr_px_c=gradient_cen_x ( p,index ); double gr_py_c=gradient_cen_y ( p,index ); double gr_pz_c=gradient_cen_z ( p,index ); double gr_cx_m=gradient_mix_x ( c,index ); double gr_cy_m=gradient_mix_y ( c,index ); double gr_cz_m=gradient_mix_z ( c,index ); double gr_mx_m=gradient_mix_x ( m,index ); double gr_my_m=gradient_mix_y ( m,index ); double gr_mz_m=gradient_mix_z ( m,index ); double gr_px_m=gradient_mix_x ( p,index ); double gr_py_m=gradient_mix_y ( p,index ); double gr_pz_m=gradient_mix_z ( p,index ); double lap_mu =laplace_phi( m,index ); double udotu=uu*uu+vv*vv+ww*ww; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; double edotu=ex*uu+ey*vv+ez*ww; double uugly=edotu*cs2_inv+edotu*edotu*0.5*cs2_inv*cs2_inv-udotu*0.5*cs2_inv; double gamma=wt*(1.0+uugly); double u_et=u[index+et]; double v_et=v[index+et]; double w_et=w[index+et]; double lap_mu_et=laplace_phi( m,index+et ); double udotu_et=u_et*u_et+v_et*v_et+w_et*w_et; double edotu_et=ex*u_et+ey*v_et+ez*w_et; double uugly_et=edotu_et*cs2_inv+edotu_et*edotu_et*0.5*cs2_inv*cs2_inv-udotu_et*0.5*cs2_inv; double gamma_et=wt*(1.0+uugly_et); /////////////////////////////////////////////////////// double geq_t=wt*(pp+rr*cs2*uugly);//geq double heq_t=ceq*gamma;//heq /////////////////////////////////////////////////////// double temp_cc = grad_phie_c( c,index,et ) - ( uu * gr_cx_c + vv * gr_cy_c + ww * gr_cz_c ); double temp_mc = grad_phie_c( m,index,et ) - ( uu * gr_mx_c + vv * gr_my_c + ww * gr_mz_c ); double temp_pc = grad_phie_c( p,index,et ) - ( uu * gr_px_c + vv * gr_py_c + ww * gr_pz_c ); double temp_cm = grad_phie_m( c,index,et ) - ( uu * gr_cx_m + vv * gr_cy_m + ww * gr_cz_m ); double temp_mm = grad_phie_m( m,index,et ) - ( uu * gr_mx_m + vv * gr_my_m + ww * gr_mz_m ); double temp_pm = grad_phie_m( p,index,et ) - ( uu * gr_px_m + vv * gr_py_m + ww * gr_pz_m ); double temp_z = ez*gravity-ww*gravity; /////////////////////////////////////////////////////// double temp_gc = cs2*wt*uugly*temp_cc*dr-(cc*temp_mc+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hc = temp_cc-cc*rr_inv*cs2_inv*(temp_pc+cc*temp_mc+rr*temp_z); geq_t=geq_t-0.5*temp_gc;//geq_bar heq_t=heq_t-0.5*temp_hc*gamma;//heq_bar /////////////////////////////////////////////////////// double temp_gm = cs2*wt*uugly*temp_cm*dr-(cc*temp_mm+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hm = temp_cm-cc*rr_inv*cs2_inv*(temp_pm+cc*temp_mm+rr*temp_z); temp_hm = 0.5*dt*mobi*( gamma*lap_mu + gamma_et*lap_mu_et )+temp_hm*gamma; ////////////////////////collision////////////////////////////// g[index_l] = g[index_l]*(1.0-1.0/(tt +0.5))+geq_t/(tt +0.5)+temp_gm; h[index_l] = h[index_l]*(1.0-1.0/(tau_h +0.5))+heq_t/(tau_h +0.5)+temp_hm; } } } __global__ void eq_collision_bc_x(double *g,double *h,double *c,double *m,double *p,double gravity,double *gra_c, double *gra_m,double *u,double *v,double *w,double mobi) { int k=threadIdx.x+3; int j= blockIdx.x+2; int xd=nx/ip; double cs2_inv =3.0; double cs2 =1.0/cs2_inv; double dr = rho_l-rho_g; for(int i=2;i<xd+2;i=i+xd-1) { int index=index_3d(i,j,k); double uu=u[index]; double vv=v[index]; double ww=w[index]; double cc=c[index]; double ceq=cc; if (cc < 0)ceq=0; else if(cc > 1)ceq=1; else ceq=cc; double rr=cc*rho_l+(1.0-cc)*rho_g; double tt=cc*tau_l+(1.0-cc)*tau_g; double rr_inv=1.0/rr; double pp=p[index]; double gr_cx_c=gra_c[index_4d(i,j,k,0)]; double gr_cy_c=gra_c[index_4d(i,j,k,1)]; double gr_cz_c=gra_c[index_4d(i,j,k,2)]; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double gr_px_c=gradient_cen_x ( p,index ); double gr_py_c=gradient_cen_y ( p,index ); double gr_pz_c=gradient_cen_z ( p,index ); double gr_cx_m=gradient_mix_x ( c,index ); double gr_cy_m=gradient_mix_y ( c,index ); double gr_cz_m=gradient_mix_z ( c,index ); double gr_mx_m=gradient_mix_x ( m,index ); double gr_my_m=gradient_mix_y ( m,index ); double gr_mz_m=gradient_mix_z ( m,index ); double gr_px_m=gradient_mix_x ( p,index ); double gr_py_m=gradient_mix_y ( p,index ); double gr_pz_m=gradient_mix_z ( p,index ); double lap_mu =laplace_phi( m,index ); double udotu=uu*uu+vv*vv+ww*ww; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; double edotu=ex*uu+ey*vv+ez*ww; double uugly=edotu*cs2_inv+edotu*edotu*0.5*cs2_inv*cs2_inv-udotu*0.5*cs2_inv; double gamma=wt*(1.0+uugly); double u_et=u[index+et]; double v_et=v[index+et]; double w_et=w[index+et]; double lap_mu_et=laplace_phi( m,index+et ); double udotu_et=u_et*u_et+v_et*v_et+w_et*w_et; double edotu_et=ex*u_et+ey*v_et+ez*w_et; double uugly_et=edotu_et*cs2_inv+edotu_et*edotu_et*0.5*cs2_inv*cs2_inv-udotu_et*0.5*cs2_inv; double gamma_et=wt*(1.0+uugly_et); /////////////////////////////////////////////////////// double geq_t=wt*(pp+rr*cs2*uugly);//geq double heq_t=ceq*gamma;//heq /////////////////////////////////////////////////////// double temp_cc = grad_phie_c( c,index,et ) - ( uu * gr_cx_c + vv * gr_cy_c + ww * gr_cz_c ); double temp_mc = grad_phie_c( m,index,et ) - ( uu * gr_mx_c + vv * gr_my_c + ww * gr_mz_c ); double temp_pc = grad_phie_c( p,index,et ) - ( uu * gr_px_c + vv * gr_py_c + ww * gr_pz_c ); double temp_cm = grad_phie_m( c,index,et ) - ( uu * gr_cx_m + vv * gr_cy_m + ww * gr_cz_m ); double temp_mm = grad_phie_m( m,index,et ) - ( uu * gr_mx_m + vv * gr_my_m + ww * gr_mz_m ); double temp_pm = grad_phie_m( p,index,et ) - ( uu * gr_px_m + vv * gr_py_m + ww * gr_pz_m ); double temp_z = ez*gravity-ww*gravity; /////////////////////////////////////////////////////// double temp_gc = cs2*wt*uugly*temp_cc*dr-(cc*temp_mc+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hc = temp_cc-cc*rr_inv*cs2_inv*(temp_pc+cc*temp_mc+rr*temp_z); geq_t=geq_t-0.5*temp_gc;//geq_bar heq_t=heq_t-0.5*temp_hc*gamma;//heq_bar /////////////////////////////////////////////////////// double temp_gm = cs2*wt*uugly*temp_cm*dr-(cc*temp_mm+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hm = temp_cm-cc*rr_inv*cs2_inv*(temp_pm+cc*temp_mm+rr*temp_z); temp_hm = 0.5*dt*mobi*( gamma*lap_mu + gamma_et*lap_mu_et )+temp_hm*gamma; ////////////////////////collision////////////////////////////// g[index_l] = g[index_l]*(1.0-1.0/(tt +0.5))+geq_t/(tt +0.5)+temp_gm; h[index_l] = h[index_l]*(1.0-1.0/(tau_h +0.5))+heq_t/(tau_h +0.5)+temp_hm; } } } __global__ void eq_collision_in(double *g,double *h,double *c,double *m,double *p,double gravity,double *gra_c, double *gra_m,double *u,double *v,double *w,double mobi) { int i=threadIdx.x+3; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+3; int index=index_3d(i,j,k); double cs2_inv =3.0; double cs2=1.0/cs2_inv; double uu=u[index]; double vv=v[index]; double ww=w[index]; double cc=c[index]; double ceq=cc; if (cc < 0)ceq=0; else if(cc > 1)ceq=1; else ceq=cc; double rr=cc*rho_l+(1.0-cc)*rho_g; double tt=cc*tau_l+(1.0-cc)*tau_g; double rr_inv=1.0/rr; double pp=p[index]; double dr = rho_l-rho_g; double gr_cx_c=gra_c[index_4d(i,j,k,0)]; double gr_cy_c=gra_c[index_4d(i,j,k,1)]; double gr_cz_c=gra_c[index_4d(i,j,k,2)]; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double gr_px_c=gradient_cen_x ( p,index ); double gr_py_c=gradient_cen_y ( p,index ); double gr_pz_c=gradient_cen_z ( p,index ); double gr_cx_m=gradient_mix_x ( c,index ); double gr_cy_m=gradient_mix_y ( c,index ); double gr_cz_m=gradient_mix_z ( c,index ); double gr_mx_m=gradient_mix_x ( m,index ); double gr_my_m=gradient_mix_y ( m,index ); double gr_mz_m=gradient_mix_z ( m,index ); double gr_px_m=gradient_mix_x ( p,index ); double gr_py_m=gradient_mix_y ( p,index ); double gr_pz_m=gradient_mix_z ( p,index ); double lap_mu =laplace_phi( m,index ); double udotu=uu*uu+vv*vv+ww*ww; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; double edotu=ex*uu+ey*vv+ez*ww; double uugly=edotu*cs2_inv+edotu*edotu*0.5*cs2_inv*cs2_inv-udotu*0.5*cs2_inv; double gamma=wt*(1.0+uugly); double u_et=u[index+et]; double v_et=v[index+et]; double w_et=w[index+et]; double lap_mu_et=laplace_phi( m,index+et ); double udotu_et=u_et*u_et+v_et*v_et+w_et*w_et; double edotu_et=ex*u_et+ey*v_et+ez*w_et; double uugly_et=edotu_et*cs2_inv+edotu_et*edotu_et*0.5*cs2_inv*cs2_inv-udotu_et*0.5*cs2_inv; double gamma_et=wt*(1.0+uugly_et); /////////////////////////////////////////////////////// double geq_t=wt*(pp+rr*cs2*uugly);//geq double heq_t=ceq*gamma;//heq /////////////////////////////////////////////////////// double temp_cc = grad_phie_c( c,index,et ) - ( uu * gr_cx_c + vv * gr_cy_c + ww * gr_cz_c ); double temp_mc = grad_phie_c( m,index,et ) - ( uu * gr_mx_c + vv * gr_my_c + ww * gr_mz_c ); double temp_pc = grad_phie_c( p,index,et ) - ( uu * gr_px_c + vv * gr_py_c + ww * gr_pz_c ); double temp_cm = grad_phie_m( c,index,et ) - ( uu * gr_cx_m + vv * gr_cy_m + ww * gr_cz_m ); double temp_mm = grad_phie_m( m,index,et ) - ( uu * gr_mx_m + vv * gr_my_m + ww * gr_mz_m ); double temp_pm = grad_phie_m( p,index,et ) - ( uu * gr_px_m + vv * gr_py_m + ww * gr_pz_m ); double temp_z = ez*gravity-ww*gravity; /////////////////////////////////////////////////////// double temp_gc = cs2*wt*uugly*temp_cc*dr-(cc*temp_mc+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hc = temp_cc-cc*rr_inv*cs2_inv*(temp_pc+cc*temp_mc+rr*temp_z); geq_t=geq_t-0.5*temp_gc;//geq_bar heq_t=heq_t-0.5*temp_hc*gamma;//heq_bar /////////////////////////////////////////////////////// double temp_gm = cs2*wt*uugly*temp_cm*dr-(cc*temp_mm+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hm = temp_cm-cc*rr_inv*cs2_inv*(temp_pm+cc*temp_mm+rr*temp_z); temp_hm = 0.5*dt*mobi*( gamma*lap_mu + gamma_et*lap_mu_et )+temp_hm*gamma; ////////////////////////collision////////////////////////////// g[index_l] = g[index_l]*(1.0-1.0/(tt +0.5))+geq_t/(tt +0.5)+temp_gm; h[index_l] = h[index_l]*(1.0-1.0/(tau_h +0.5))+heq_t/(tau_h +0.5)+temp_hm; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // macro // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void macro_h_bc(double *h,double *h_next,double *c) { int i=threadIdx.x+2; int j= blockIdx.x+2; int zd=nz/kp; int kk[4]= {2,3,zd,zd+1}; for (int t=0;t<4;t++){ int k=kk[t]; int index=index_3d(i,j,k); double sum_c=0.0; for(int l=0;l<q;l++){ int index_l =index_4d(i,j,k,l); int et=eet[l]; sum_c=h[index_l-et]+sum_c; h_next[index_l]=h[index_l-et]; } c[index]=sum_c; } } __global__ void macro_h_bc_x(double *h,double *h_next,double *c) { int k=threadIdx.x+4; int j= blockIdx.x+2; int xd=nx/ip; int ii[4]= {2,3,xd,xd+1}; for (int t=0;t<4;t++){ int i=ii[t]; int index=index_3d(i,j,k); double sum_c=0.0; for(int l=0;l<q;l++){ int index_l =index_4d(i,j,k,l); int et=eet[l]; sum_c=h[index_l-et]+sum_c; h_next[index_l]=h[index_l-et]; } c[index]=sum_c; } } __global__ void macro_h_in(double *h,double *h_next,double *c) { int i=threadIdx.x+4; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+4; int index=index_3d(i,j,k); double sum_c=0.0; for(int l=0;l<q;l++){ int index_l =index_4d(i,j,k,l); int et=eet[l]; sum_c=h[index_l-et]+sum_c; h_next[index_l]=h[index_l-et]; } c[index]=sum_c; } __global__ void macro_g_bc(double *g,double *g_next,double *c,double *m,double *p,double *gra_c,double *gra_m,double *u,double *v,double *w) { int i=threadIdx.x+2; int j= blockIdx.x+2; int zd=nz/kp; int kk[4]= {2,3,zd,zd+1}; double dr=rho_l-rho_g; double cs2_inv=3.0; double cs2=1.0/cs2_inv; for (int t=0;t<4;t++){ int k=kk[t]; int index=index_3d(i,j,k); double cc=c[index]; double rr=cc*rho_l+((double)1.0-cc)*rho_g; double gr_rx_c=gra_c[index_4d(i,j,k,0)]*dr; double gr_ry_c=gra_c[index_4d(i,j,k,1)]*dr; double gr_rz_c=gra_c[index_4d(i,j,k,2)]*dr; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double sum_u=0.0; double sum_v=0.0; double sum_w=0.0; double sum_p=0.0; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; int et=eet[l]; double temp_g=g[index_l-et]; sum_u=ex*temp_g+sum_u; sum_v=ey*temp_g+sum_v; sum_w=ez*temp_g+sum_w; sum_p= temp_g+sum_p; g_next[index_l]=temp_g; } double uu=(sum_u*cs2_inv-0.5*dt*cc*gr_mx_c)/rr; double vv=(sum_v*cs2_inv-0.5*dt*cc*gr_my_c)/rr; double ww=(sum_w*cs2_inv-0.5*dt*cc*gr_mz_c)/rr; u[index]=uu; v[index]=vv; w[index]=ww; p[index]=sum_p+0.5*dt*(uu*gr_rx_c+vv*gr_ry_c+ww*gr_rz_c)*cs2; } } __global__ void macro_g_bc_x(double *g,double *g_next,double *c,double *m,double *p,double *gra_c,double *gra_m,double *u,double *v,double *w) { int k=threadIdx.x+4; int j= blockIdx.x+2; int xd=nx/ip; int ii[4]= {2,3,xd,xd+1}; double cs2_inv=3.0; double cs2=1.0/cs2_inv; double dr=rho_l-rho_g; for (int t=0;t<4;t++){ int i=ii[t]; int index=index_3d(i,j,k); double cc=c[index]; double rr=cc*rho_l+((double)1.0-cc)*rho_g; double gr_rx_c=gra_c[index_4d(i,j,k,0)]*dr; double gr_ry_c=gra_c[index_4d(i,j,k,1)]*dr; double gr_rz_c=gra_c[index_4d(i,j,k,2)]*dr; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double sum_u=0.0; double sum_v=0.0; double sum_w=0.0; double sum_p=0.0; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; int et=eet[l]; double temp_g=g[index_l-et]; sum_u=ex*temp_g+sum_u; sum_v=ey*temp_g+sum_v; sum_w=ez*temp_g+sum_w; sum_p= temp_g+sum_p; g_next[index_l]=temp_g; } double uu=(sum_u*cs2_inv-0.5*dt*cc*gr_mx_c)/rr; double vv=(sum_v*cs2_inv-0.5*dt*cc*gr_my_c)/rr; double ww=(sum_w*cs2_inv-0.5*dt*cc*gr_mz_c)/rr; u[index]=uu; v[index]=vv; w[index]=ww; p[index]=sum_p+0.5*dt*(uu*gr_rx_c+vv*gr_ry_c+ww*gr_rz_c)*cs2; } } __global__ void macro_g_in( double *g, double *g_next,double *c,double *m,double *p,double *gra_c,double *gra_m,double *u,double *v,double *w) { int i=threadIdx.x+4; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+4; int index=index_3d(i,j,k); double cs2_inv=3.0; double cs2=1.0/cs2_inv; double cc=c[index]; double rr=cc*rho_l+((double)1.0-cc)*rho_g; double dr=rho_l-rho_g; double gr_rx_c=gra_c[index_4d(i,j,k,0)]*dr; double gr_ry_c=gra_c[index_4d(i,j,k,1)]*dr; double gr_rz_c=gra_c[index_4d(i,j,k,2)]*dr; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double sum_u=0.0; double sum_v=0.0; double sum_w=0.0; double sum_p=0.0; for(int l=0;l<q;l++){ int index_l =index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; int et=eet[l]; double temp_g=g[index_l-et]; sum_u=ex*temp_g+sum_u; sum_v=ey*temp_g+sum_v; sum_w=ez*temp_g+sum_w; sum_p= temp_g+sum_p; g_next[index_l]=temp_g; } double uu=(sum_u*cs2_inv-0.5*dt*cc*gr_mx_c)/rr; double vv=(sum_v*cs2_inv-0.5*dt*cc*gr_my_c)/rr; double ww=(sum_w*cs2_inv-0.5*dt*cc*gr_mz_c)/rr; u[index]=uu; v[index]=vv; w[index]=ww; p[index]=sum_p+0.5*dt*(uu*gr_rx_c+vv*gr_ry_c+ww*gr_rz_c)*cs2; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // post // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void p_real(double *c,double *p,double *a,double beta,double kappa,double *gra_c) { int i=threadIdx.x+2; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+2; int index=index_3d(i,j,k); double gr_cx_c=gra_c[index_4d(i,j,k,0)]; double gr_cy_c=gra_c[index_4d(i,j,k,1)]; double gr_cz_c=gra_c[index_4d(i,j,k,2)]; double la_c =laplace_phi(c,index ); double cc=c[index]; double pp=p[index]; double th,cu,e0; e0=beta*cc*cc*(cc-1)*(cc-1); th=cc*beta*(4*cc*cc*cc-6*cc*cc+2*cc)-e0; cu=-kappa*cc*la_c+0.5*kappa*(gr_cx_c*gr_cx_c+gr_cy_c*gr_cy_c+gr_cz_c*gr_cz_c); a[index]=pp+th+cu; } double maxvalue(double *phi, int* indexx) { double max=0.0; int i,j,k; for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<nz;k++){ int index=nx*(k*ny+j)+i; if(max < phi[index]){ max=phi[index]; *indexx=index; }}}} return max; } void max_w(double *c,double *w,double *max) { *max=0.0; int i,j,k; for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<nz;k++){ int index=nx*(k*ny+j)+i; if(*max < w[index]){ *max = w[index]; }}}} } double minvalue(double *phi, int* indexx) { double min=100.0; int i,j,k; for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<nz;k++){ int index=nx*(k*ny+j)+i; if(min > phi[index]){ min=phi[index]; *indexx=index; }}}} return min; } void Reynolds_Time(double w, double *Re, int step) { Re[step/2-1]=2*radd*3/tau_l*w; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // main // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { int i,j,k,index; //define matrix() double *c_d_h,*c_f_h,*c_fdo_h,*c_d,*c; // dicom & final & transfered on host/ orifinal & transfered on device double *m_d_h,*m_f_h,*m_fdo_h,*m_d,*m; double *b_d_h,*b_f_h,*b_fdo_h,*b_d,*b; // wettability double *p_d_h,*p_f_h,*p_fdo_h,*p_d,*p; double *u_d_h,*u_f_h,*u_fdo_h,*u_d,*u; double *v_d_h,*v_f_h,*v_fdo_h,*v_d,*v; double *w_d_h,*w_f_h,*w_fdo_h,*w_d,*w; double *a_d_h,*a_f_h,*a_fdo_h,*a_d,*a; //total pressure double *xz_d_h,*xz_f_h,*xz_fdo_h,*xz_d; //define matrix() int *et_h;// double *ex_h,*ey_h,*ez_h,*wt_h; double *h,*h_t; double *g,*g_t; //gradient matrix double *gra_c; double *gra_m; //define matrix() double *t_c_h,*t_c; double *t_m_h,*t_m; double *t_b_h,*t_b; double *t_p_h,*t_p; double *t_u_h,*t_u; double *t_v_h,*t_v; double *t_w_h,*t_w; double *t_g_h,*t_g; double *t_h_h,*t_h; double *t_c_x_h,*t_c_x; double *t_m_x_h,*t_m_x; double *t_b_x_h,*t_b_x; double *t_p_x_h,*t_p_x; double *t_u_x_h,*t_u_x; double *t_v_x_h,*t_v_x; double *t_w_x_h,*t_w_x; double *t_g_x_h,*t_g_x; double *t_h_x_h,*t_h_x; double *lx,*lz; double *Re; ////mpi int nproc,myid; int l_nbr, b_nbr, r_nbr, t_nbr, my_coord[ndim], iroot, itag; int ipart[ndim],periods[ndim],sideways,updown,right,up,reorder; int n_f; MPI_Status istat[8]; MPI_Comm comm; MPI_Init( &argc, &argv ); MPI_Comm_size(MPI_COMM_WORLD, &nproc); comm = MPI_COMM_WORLD; ipart[0]=ip; ipart[1]=kp; periods[0]=1; periods[1]=1; reorder=1; MPI_Cart_create(MPI_COMM_WORLD,ndim,ipart,periods,reorder,&comm); MPI_Comm_rank(comm,&myid); MPI_Cart_coords(comm,myid,ndim,my_coord); sideways=0; updown=1; right=1; up=1; MPI_Cart_shift(comm,sideways,right,&l_nbr,&r_nbr); MPI_Cart_shift(comm,updown ,up ,&b_nbr,&t_nbr); n_f=nx/ip*ny*nz/kp; if(myid==0){ printf("===============================================================\n"); printf("Checking devices...\n"); } MPI_Barrier(MPI_COMM_WORLD); printf("NPROC,MYID,i,k=%d\t%d\t%d\t%d\t\n",nproc,myid,my_coord[0],my_coord[1]); MPI_Barrier(MPI_COMM_WORLD); hipSetDevice(myid%DPN); ////memory allocate on cpu int size_final = nx*ny*nz; int size_dicom = (nx/ip+4)*(ny+4)*(nz/kp+4); int size_difun = (nx/ip+4)*(ny+4)*(nz/kp+4)*q; int size_allgr = (nx/ip+4)*(ny+4)*(nz/kp+4)*3;//(x+y+z) int tran_mac_1 = (nx/ip+4)*(ny+4)*4*1; //u,v,w int tran_mac_2 = (nx/ip+4)*(ny+4)*4*2; //c,m,b,p int tran_difun = (nx/ip+4)*(ny+4)*4*5;//5 int tran_mac_1_x = (nz/kp+4)*(ny+4)*4*1; //u,v,w x face int tran_mac_2_x = (nz/kp+4)*(ny+4)*4*2; //c,m,b,p x face int tran_difun_x = (nz/kp+4)*(ny+4)*4*5;//5 x face hipHostMalloc((void**)&c_d_h ,sizeof(double)*size_dicom); hipHostMalloc((void**)&m_d_h ,sizeof(double)*size_dicom); hipHostMalloc((void**)&b_d_h ,sizeof(double)*size_dicom); hipHostMalloc((void**)&p_d_h ,sizeof(double)*size_dicom); hipHostMalloc((void**)&u_d_h ,sizeof(double)*size_dicom); hipHostMalloc((void**)&v_d_h ,sizeof(double)*size_dicom); hipHostMalloc((void**)&w_d_h ,sizeof(double)*size_dicom); hipHostMalloc((void**)&a_d_h ,sizeof(double)*size_dicom); hipHostMalloc((void**)&et_h ,sizeof(double)* q ); hipHostMalloc((void**)&ex_h ,sizeof(double)* q ); hipHostMalloc((void**)&ey_h ,sizeof(double)* q ); hipHostMalloc((void**)&ez_h ,sizeof(double)* q ); hipHostMalloc((void**)&wt_h ,sizeof(double)* q ); hipHostMalloc((void**)&t_c_h ,sizeof(double)* tran_mac_2 ); hipHostMalloc((void**)&t_m_h ,sizeof(double)* tran_mac_2 ); hipHostMalloc((void**)&t_b_h ,sizeof(double)* tran_mac_2 ); hipHostMalloc((void**)&t_p_h ,sizeof(double)* tran_mac_2 ); hipHostMalloc((void**)&t_u_h ,sizeof(double)* tran_mac_1 ); hipHostMalloc((void**)&t_v_h ,sizeof(double)* tran_mac_1 ); hipHostMalloc((void**)&t_w_h ,sizeof(double)* tran_mac_1 ); hipHostMalloc((void**)&t_g_h ,sizeof(double)* tran_difun ); hipHostMalloc((void**)&t_h_h ,sizeof(double)* tran_difun ); hipHostMalloc((void**)&t_c_x_h ,sizeof(double)* tran_mac_2_x ); hipHostMalloc((void**)&t_m_x_h ,sizeof(double)* tran_mac_2_x ); hipHostMalloc((void**)&t_b_x_h ,sizeof(double)* tran_mac_2_x ); hipHostMalloc((void**)&t_p_x_h ,sizeof(double)* tran_mac_2_x ); hipHostMalloc((void**)&t_u_x_h ,sizeof(double)* tran_mac_1_x ); hipHostMalloc((void**)&t_v_x_h ,sizeof(double)* tran_mac_1_x ); hipHostMalloc((void**)&t_w_x_h ,sizeof(double)* tran_mac_1_x ); hipHostMalloc((void**)&t_g_x_h ,sizeof(double)* tran_difun_x ); hipHostMalloc((void**)&t_h_x_h ,sizeof(double)* tran_difun_x ); hipHostMalloc((void**)&xz_d_h ,sizeof(double)*(nx/ip+4)*(nz/kp+4)); /////////////////////////////////////////////////////////////////////////////////////////// // zz // /////////////////////////////////////////////////////////////////////////////////////////// int step=0; double beta,zeta,mobi,kappa,phic,gravity; parameter (&beta,&zeta,&mobi,&kappa,&phic,&gravity,ex_h,ey_h,ez_h,wt_h,et_h); FILE *data_2d_t; FILE *data_3d_t; FILE *data_2d; FILE *data_3d; FILE *properties; FILE *final_2d; FILE *final_3d; if(myid == 0){ hipHostMalloc((void**)&c_f_h ,sizeof(double)*size_final); hipHostMalloc((void**)&m_f_h ,sizeof(double)*size_final); hipHostMalloc((void**)&b_f_h ,sizeof(double)*size_final); hipHostMalloc((void**)&p_f_h ,sizeof(double)*size_final); hipHostMalloc((void**)&u_f_h ,sizeof(double)*size_final); hipHostMalloc((void**)&v_f_h ,sizeof(double)*size_final); hipHostMalloc((void**)&w_f_h ,sizeof(double)*size_final); hipHostMalloc((void**)&a_f_h ,sizeof(double)*size_final); hipHostMalloc((void**)&xz_f_h ,sizeof(double)* nx*nz ); hipHostMalloc((void**)&c_fdo_h ,sizeof(double)*size_final); hipHostMalloc((void**)&m_fdo_h ,sizeof(double)*size_final); hipHostMalloc((void**)&b_fdo_h ,sizeof(double)*size_final); hipHostMalloc((void**)&p_fdo_h ,sizeof(double)*size_final); hipHostMalloc((void**)&u_fdo_h ,sizeof(double)*size_final); hipHostMalloc((void**)&v_fdo_h ,sizeof(double)*size_final); hipHostMalloc((void**)&w_fdo_h ,sizeof(double)*size_final); hipHostMalloc((void**)&a_fdo_h ,sizeof(double)*size_final); hipHostMalloc((void**)&xz_fdo_h ,sizeof(double)* nx*nz ); hipHostMalloc((void**)&lx ,sizeof(double)* stepall/2 ); hipHostMalloc((void**)&lz ,sizeof(double)* stepall/2 ); hipHostMalloc((void**)&Re ,sizeof(double)* stepall/2 ); initial_macro(c_f_h,m_f_h,b_f_h,p_f_h,u_f_h,v_f_h,w_f_h); array_2D_do(c_f_h,c_fdo_h); array_2D_do(m_f_h,m_fdo_h); array_2D_do(b_f_h,b_fdo_h); array_2D_do(p_f_h,p_fdo_h); array_2D_do(u_f_h,u_fdo_h); array_2D_do(v_f_h,v_fdo_h); array_2D_do(w_f_h,w_fdo_h); array_2D_do(a_f_h,a_fdo_h); //writing data properties = fopen("properties.txt","w"); if(condition==0){ double mo=gravity*(rho_l-rho_g)*pow(tau_l,4)*rho_l*rho_l/81.0/pow(sigma,3); printf("===============================================================\n"); fprintf( properties, "Three dimensional droplets - Bubble rising\n"); fprintf( properties, "Grid size nx=%d, ny=%d, nz=%d\n",nx,ny,nz); fprintf( properties, "Radius=%f, Thickness=%f\n",radd, thick); fprintf( properties, "Bo=%f\n",bo); fprintf( properties, "Mo=%f\n",mo); printf ("Bo=%f\n",bo); printf ("Mo=%f\n",mo); printf("Three dimensional droplets - One Bubble rising\n"); printf("===============================================================\n"); } else if(condition==1){ double mo=gravity*(rho_l-rho_g)*pow(tau_l,4)*rho_l*rho_l/81.0/pow(sigma,3); printf("===============================================================\n"); fprintf( properties, "Three dimensional droplets - Bubble rising\n"); fprintf( properties, "Grid size nx=%d, ny=%d, nz=%d\n",nx,ny,nz); fprintf( properties, "Radius=%f, Thickness=%f\n",radd, thick); fprintf( properties, "Bo=%f\n",bo); fprintf( properties, "Mo=%f\n",mo); printf ("Bo=%f\n",bo); printf ("Mo=%f\n",mo); printf("Three dimensional droplets - Two Bubble rising\n"); printf("===============================================================\n"); } printf("Initializing..."); fprintf( properties, "Tau_h =%f, Tau_g=%f, Tau_l=%f\n", tau_h,tau_g,tau_l); fprintf( properties, "rho_l =%f, rho_g=%f, sigma=%f\n", rho_l,rho_g,sigma); fclose(properties); data_2d = fopen("data_2d.dat","w"); fprintf( data_2d, "VARIABLES=\"X\",\"Z\",\"c\",\"u\",\"v\",\"w\",\"p\"\n"); fprintf( data_2d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_2d, "I=%d, J=%d\n", nx,nz); j=ny/2; for(k=0;k<nz;k++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; fprintf( data_2d, "%d\t%d\t%e\t%e\t%e\t%e\t%e\t\n", i,k,c_f_h[index],u_f_h[index],v_f_h[index],w_f_h[index],p_f_h[index]); }} fclose(data_2d); data_3d = fopen("data_3d.dat","w"); fprintf( data_3d, "VARIABLES=\"X\",\"Y\",\"Z\",\"c\"\n"); fprintf( data_3d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_3d, "I=%d, J=%d, K=%d\n", nx,ny,nz); for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=(nx)*(k*(ny)+j)+i; fprintf( data_3d, "%d\t%d\t%d\t%e\t\n", i,j,k,c_f_h[index]); }}} fclose(data_3d); printf("done\n"); printf("===============================================================\n"); printf("Iterating...\n"); } MPI_Barrier(MPI_COMM_WORLD); //scatter iroot = 0; MPI_Scatter((void *)&c_fdo_h[0],n_f, MPI_DOUBLE,(void *)&c_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&m_fdo_h[0],n_f, MPI_DOUBLE,(void *)&m_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&b_fdo_h[0],n_f, MPI_DOUBLE,(void *)&b_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&p_fdo_h[0],n_f, MPI_DOUBLE,(void *)&p_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&u_fdo_h[0],n_f, MPI_DOUBLE,(void *)&u_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&v_fdo_h[0],n_f, MPI_DOUBLE,(void *)&v_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&w_fdo_h[0],n_f, MPI_DOUBLE,(void *)&w_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&a_fdo_h[0],n_f, MPI_DOUBLE,(void *)&a_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Barrier(MPI_COMM_WORLD); //memory allocation on gpu hipMalloc((void**)&c_d ,sizeof(double)* size_dicom ); hipMalloc((void**)&m_d ,sizeof(double)* size_dicom ); hipMalloc((void**)&b_d ,sizeof(double)* size_dicom ); hipMalloc((void**)&p_d ,sizeof(double)* size_dicom ); hipMalloc((void**)&u_d ,sizeof(double)* size_dicom ); hipMalloc((void**)&v_d ,sizeof(double)* size_dicom ); hipMalloc((void**)&w_d ,sizeof(double)* size_dicom ); hipMalloc((void**)&a_d ,sizeof(double)* size_dicom ); hipMalloc((void**)&h ,sizeof(double)* size_difun ); hipMalloc((void**)&g ,sizeof(double)* size_difun ); hipMalloc((void**)&h_t ,sizeof(double)* size_difun ); hipMalloc((void**)&g_t ,sizeof(double)* size_difun ); hipMalloc((void**)&t_c ,sizeof(double)* tran_mac_2 ); hipMalloc((void**)&t_m ,sizeof(double)* tran_mac_2 ); hipMalloc((void**)&t_b ,sizeof(double)* tran_mac_2 ); hipMalloc((void**)&t_p ,sizeof(double)* tran_mac_2 ); hipMalloc((void**)&t_u ,sizeof(double)* tran_mac_1 ); hipMalloc((void**)&t_v ,sizeof(double)* tran_mac_1 ); hipMalloc((void**)&t_w ,sizeof(double)* tran_mac_1 ); hipMalloc((void**)&t_g ,sizeof(double)* tran_difun ); hipMalloc((void**)&t_h ,sizeof(double)* tran_difun ); hipMalloc((void**)&t_c_x ,sizeof(double)* tran_mac_2_x ); hipMalloc((void**)&t_m_x ,sizeof(double)* tran_mac_2_x ); hipMalloc((void**)&t_b_x ,sizeof(double)* tran_mac_2_x ); hipMalloc((void**)&t_p_x ,sizeof(double)* tran_mac_2_x ); hipMalloc((void**)&t_u_x ,sizeof(double)* tran_mac_1_x ); hipMalloc((void**)&t_v_x ,sizeof(double)* tran_mac_1_x ); hipMalloc((void**)&t_w_x ,sizeof(double)* tran_mac_1_x ); hipMalloc((void**)&t_g_x ,sizeof(double)* tran_difun_x ); hipMalloc((void**)&t_h_x ,sizeof(double)* tran_difun_x ); hipMalloc((void**)&gra_c ,sizeof(double)* size_allgr ); hipMalloc((void**)&gra_m ,sizeof(double)* size_allgr ); hipMalloc((void**)&xz_d,sizeof(double)*(nx/ip+4)*(nz/kp+4)); MPI_Barrier(MPI_COMM_WORLD); //cpu to gpu hipMemcpy(c_d, c_d_h, sizeof(double)* size_dicom , hipMemcpyHostToDevice); hipMemcpy(m_d, m_d_h, sizeof(double)* size_dicom , hipMemcpyHostToDevice); hipMemcpy(b_d, b_d_h, sizeof(double)* size_dicom , hipMemcpyHostToDevice); hipMemcpy(p_d, p_d_h, sizeof(double)* size_dicom , hipMemcpyHostToDevice); hipMemcpy(u_d, u_d_h, sizeof(double)* size_dicom , hipMemcpyHostToDevice); hipMemcpy(v_d, v_d_h, sizeof(double)* size_dicom , hipMemcpyHostToDevice); hipMemcpy(w_d, w_d_h, sizeof(double)* size_dicom , hipMemcpyHostToDevice); hipMemcpy(a_d, a_d_h, sizeof(double)* size_dicom , hipMemcpyHostToDevice); hipMemcpy(t_c, t_c_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice); hipMemcpy(t_m, t_m_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice); hipMemcpy(t_b, t_b_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice); hipMemcpy(t_p, t_p_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice); hipMemcpy(t_u, t_u_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice); hipMemcpy(t_v, t_v_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice); hipMemcpy(t_w, t_w_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice); hipMemcpy(t_g, t_g_h, sizeof(double)* tran_difun , hipMemcpyHostToDevice); hipMemcpy(t_h, t_h_h, sizeof(double)* tran_difun , hipMemcpyHostToDevice); hipMemcpy(t_c_x, t_c_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice); hipMemcpy(t_m_x, t_m_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice); hipMemcpy(t_b_x, t_b_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice); hipMemcpy(t_p_x, t_p_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice); hipMemcpy(t_u_x, t_u_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice); hipMemcpy(t_v_x, t_v_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice); hipMemcpy(t_w_x, t_w_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice); hipMemcpy(t_g_x, t_g_x_h, sizeof(double)* tran_difun_x , hipMemcpyHostToDevice); hipMemcpy(t_h_x, t_h_x_h, sizeof(double)* tran_difun_x , hipMemcpyHostToDevice); hipMemcpyToSymbol ( eex , ex_h, sizeof(double)*q ); hipMemcpyToSymbol ( eey , ey_h, sizeof(double)*q ); hipMemcpyToSymbol ( eez , ez_h, sizeof(double)*q ); hipMemcpyToSymbol ( wwt , wt_h, sizeof(double)*q ); hipMemcpyToSymbol ( eet , et_h, sizeof(int )*q ); MPI_Barrier(MPI_COMM_WORLD); int xd=nx/ip; //x decomposition int zd=nz/kp; //z decomposition int grid_t0 =ny*zd; int block_t0 =xd; int grid_bc =ny; int block_t0_x =zd-2; int grid_in =ny*(zd-2); int grid_in2 =ny*(zd-4); hipMalloc((void**)&c ,sizeof(double)* size_dicom ); hipMalloc((void**)&m ,sizeof(double)* size_dicom ); hipMalloc((void**)&b ,sizeof(double)* size_dicom ); hipMalloc((void**)&p ,sizeof(double)* size_dicom ); hipMalloc((void**)&u ,sizeof(double)* size_dicom ); hipMalloc((void**)&v ,sizeof(double)* size_dicom ); hipMalloc((void**)&w ,sizeof(double)* size_dicom ); hipMalloc((void**)&a ,sizeof(double)* size_dicom ); hipLaunchKernelGGL(( array_do) , dim3(grid_t0) , dim3(block_t0), 0, 0, c_d,c ); hipLaunchKernelGGL(( array_do) , dim3(grid_t0) , dim3(block_t0), 0, 0, m_d,m ); hipLaunchKernelGGL(( array_do) , dim3(grid_t0) , dim3(block_t0), 0, 0, b_d,b ); hipLaunchKernelGGL(( array_do) , dim3(grid_t0) , dim3(block_t0), 0, 0, p_d,p ); hipLaunchKernelGGL(( array_do) , dim3(grid_t0) , dim3(block_t0), 0, 0, u_d,u ); hipLaunchKernelGGL(( array_do) , dim3(grid_t0) , dim3(block_t0), 0, 0, v_d,v ); hipLaunchKernelGGL(( array_do) , dim3(grid_t0) , dim3(block_t0), 0, 0, w_d,w ); hipLaunchKernelGGL(( array_do) , dim3(grid_t0) , dim3(block_t0), 0, 0, a_d,a ); MPI_Barrier(MPI_COMM_WORLD); /////////////////////////////////////////////////////////////////////////////////////////// int num_trans_m_2 =(xd+4)*(ny+4)*2; int num_trans_m_1 =(xd+4)*(ny+4)*1; int startb =(xd+4)*( 0 *(ny+4)+0)+0; int start =(xd+4)*( 2 *(ny+4)+0)+0; int end =(xd+4)*( 4 *(ny+4)+0)+0; int endb =(xd+4)*( 6 *(ny+4)+0)+0; int startb_1 =(xd+4)*( 0 *(ny+4)+0)+0; int start_1 =(xd+4)*( 1 *(ny+4)+0)+0; int end_1 =(xd+4)*( 2 *(ny+4)+0)+0; int endb_1 =(xd+4)*( 3 *(ny+4)+0)+0; int num_trans_d =(xd+4)*(ny+4)*5; int startb_d =((xd+4)*( 0 *(ny+4)+0)+0)*5; int start_d =((xd+4)*( 1 *(ny+4)+0)+0)*5; int end_d =((xd+4)*( 2 *(ny+4)+0)+0)*5; int endb_d =((xd+4)*( 3 *(ny+4)+0)+0)*5; int num_trans_m_2_x =(ny+4)*(zd+4)*2; int num_trans_m_1_x =(ny+4)*(zd+4)*1; int startb_x =(ny+4)*( 0 *(zd+4)+0)+0; int start_x =(ny+4)*( 2 *(zd+4)+0)+0; int end_x =(ny+4)*( 4 *(zd+4)+0)+0; int endb_x =(ny+4)*( 6 *(zd+4)+0)+0; int startb_1_x =(ny+4)*( 0 *(zd+4)+0)+0; int start_1_x =(ny+4)*( 1 *(zd+4)+0)+0; int end_1_x =(ny+4)*( 2 *(zd+4)+0)+0; int endb_1_x =(ny+4)*( 3 *(zd+4)+0)+0; int num_trans_d_x =(ny+4)*(zd+4)*5; int startb_d_x =((ny+4)*( 0 *(zd+4)+0)+0)*5; int start_d_x =((ny+4)*( 1 *(zd+4)+0)+0)*5; int end_d_x =((ny+4)*( 2 *(zd+4)+0)+0)*5; int endb_d_x =((ny+4)*( 3 *(zd+4)+0)+0)*5; /////////////////////////////////////////////////////////////////////////////////////////// /* checkk <<<grid_t2 , block_t2>>>( c_d,c ); hipMemcpy(c_d_h,c_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); FILE *check; if(myid==1){ check = fopen("check.dat","w"); fprintf( check, "VARIABLES=\"X\",\"Z\",\"c\"\n"); fprintf( check, "ZONE T=\"gpu\" F=POINT\n"); fprintf( check, "I=%d, J=%d\n", nx+4,zd+4); j=ny/2; for(k=0;k<zd+4;k++){ for(i=0;i<nx+4;i++){ index_3d(i,j,k); fprintf( check, "%d\t%d\t%e\t\n", i,k,c_d_h[index]); }} fclose(check); } */ /////////////////////////////////////////////////////////////////////////////////////////// ////y hipLaunchKernelGGL(( boundary_ym) , dim3(xd+4) , dim3(zd+4) , 0, 0, c ); ////z hipLaunchKernelGGL(( boundary_zm2) , dim3(xd+4) , dim3(ny+4) , 0, 0, c,t_c ); hipMemcpy(t_c_h, t_c, sizeof(double)* tran_mac_2 , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=110; MPI_Sendrecv ((void *)&t_c_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_c_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_c_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_c_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpy(t_c, t_c_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_zm2_undo) , dim3(xd+4) , dim3(ny+4) , 0, 0, c,t_c ); ////x hipLaunchKernelGGL(( boundary_xm2) , dim3(ny+4) , dim3(zd+4) , 0, 0, c,t_c_x ); hipMemcpy(t_c_x_h, t_c_x, sizeof(double)* tran_mac_2_x , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=11; MPI_Sendrecv ((void *)&t_c_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_c_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_c_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_c_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpy(t_c_x, t_c_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_xm2_undo) , dim3(ny+4) , dim3(zd+4) , 0, 0, c,t_c_x ); /////////////////////////////////////////////////////////////////////////////////////////// hipLaunchKernelGGL(( chemical) , dim3(grid_t0), dim3(block_t0), 0, 0, c,m,kappa,beta ); // chemical_b <<<grid_t0, block_t0>>>( c,m,b,kappa,beta,phic );//wettability /////////////////////////////////////////////////////////////////////////////////////////// ////y hipLaunchKernelGGL(( boundary_ym) , dim3(xd+4) , dim3(zd+4) , 0, 0, m ); ////z hipLaunchKernelGGL(( boundary_zm2), dim3(xd+4) , dim3(ny+4) , 0, 0, m,t_m ); hipMemcpy(t_m_h, t_m, sizeof(double)* tran_mac_2 , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=110; MPI_Sendrecv ((void *)&t_m_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_m_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_m_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_m_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpy(t_m, t_m_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_zm2_undo) , dim3(xd+4) , dim3(ny+4) , 0, 0, m,t_m ); ////x hipLaunchKernelGGL(( boundary_xm2), dim3(ny+4) , dim3(zd+4) , 0, 0, m,t_m_x ); hipMemcpy(t_m_x_h, t_m_x, sizeof(double)* tran_mac_2_x , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=11; MPI_Sendrecv ((void *)&t_m_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_m_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_m_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_m_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpy(t_m_x, t_m_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_xm2_undo) , dim3(ny+4) , dim3(zd+4) , 0, 0, m,t_m_x ); /////////////////////////////////////////////////////////////////////////////////////////// ////y hipLaunchKernelGGL(( boundary_ym) , dim3(xd+4) , dim3(zd+4) , 0, 0, b ); ////z hipLaunchKernelGGL(( boundary_zm2) , dim3(xd+4) , dim3(ny+4) , 0, 0, b,t_b ); hipMemcpy(t_b_h, t_b, sizeof(double)* tran_mac_2 , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=130; MPI_Sendrecv ((void *)&t_b_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_b_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=140; MPI_Sendrecv ((void *)&t_b_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_b_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpy(t_b, t_b_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_zm2_undo) , dim3(xd+4) , dim3(ny+4) , 0, 0, b,t_b ); ////x hipLaunchKernelGGL(( boundary_xm2) , dim3(ny+4) , dim3(zd+4) , 0, 0, b,t_b_x ); hipMemcpy(t_b_x_h, t_b_x, sizeof(double)* tran_mac_2_x , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=13; MPI_Sendrecv ((void *)&t_b_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_b_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=14; MPI_Sendrecv ((void *)&t_b_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_b_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpy(t_b_x, t_b_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_xm2_undo) , dim3(ny+4) , dim3(zd+4) , 0, 0, b,t_b_x ); /////////////////////////////////////////////////////////////////////////////////////////// ////y hipLaunchKernelGGL(( boundary_ym) , dim3(xd+4) , dim3(zd+4) , 0, 0, p ); ////z hipLaunchKernelGGL(( boundary_zm2) , dim3(xd+4) , dim3(ny+4) , 0, 0, p,t_p ); hipMemcpy(t_p_h, t_p, sizeof(double)* tran_mac_2 , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=150; MPI_Sendrecv ((void *)&t_p_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_p_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=160; MPI_Sendrecv ((void *)&t_p_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_p_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpy(t_p, t_p_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_zm2_undo) , dim3(xd+4) , dim3(ny+4) , 0, 0, p,t_p ); ////x hipLaunchKernelGGL(( boundary_xm2) , dim3(ny+4) , dim3(zd+4) , 0, 0, p,t_p_x ); hipMemcpy(t_p_x_h, t_p_x, sizeof(double)* tran_mac_2_x , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=15; MPI_Sendrecv ((void *)&t_p_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_p_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=16; MPI_Sendrecv ((void *)&t_p_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_p_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpy(t_p_x, t_p_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_xm2_undo) , dim3(ny+4) , dim3(zd+4) , 0, 0, p,t_p_x ); /////////////////////////////////////////////////////////////////////////////////////////// ////y hipLaunchKernelGGL(( boundary_ym) , dim3(xd+4) , dim3(zd+4) , 0, 0, u ); ////z hipLaunchKernelGGL(( boundary_zm1) , dim3(xd+4) , dim3(ny+4) , 0, 0, u,t_u ); hipMemcpy(t_u_h, t_u, sizeof(double)* tran_mac_1 , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=170; MPI_Sendrecv ((void *)&t_u_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_u_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=180; MPI_Sendrecv ((void *)&t_u_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_u_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpy(t_u, t_u_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_zm1_undo) , dim3(xd+4) , dim3(ny+4) , 0, 0, u,t_u ); ////x hipLaunchKernelGGL(( boundary_xm1) , dim3(ny+4) , dim3(zd+4) , 0, 0, u,t_u_x ); hipMemcpy(t_u_x_h, t_u_x, sizeof(double)* tran_mac_1_x , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=17; MPI_Sendrecv ((void *)&t_u_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_u_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=18; MPI_Sendrecv ((void *)&t_u_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_u_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpy(t_u_x, t_u_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_xm1_undo) , dim3(ny+4) , dim3(zd+4) , 0, 0, u,t_u_x ); /////////////////////////////////////////////////////////////////////////////////////////// ////y hipLaunchKernelGGL(( boundary_ym) , dim3(xd+4) , dim3(zd+4) , 0, 0, v ); ////z hipLaunchKernelGGL(( boundary_zm1) , dim3(xd+4) , dim3(ny+4) , 0, 0, v,t_v ); hipMemcpy(t_v_h, t_v, sizeof(double)* tran_mac_1 , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=190; MPI_Sendrecv ((void *)&t_v_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_v_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=200; MPI_Sendrecv ((void *)&t_v_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_v_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpy(t_v, t_v_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_zm1_undo) , dim3(xd+4) , dim3(ny+4) , 0, 0, v,t_v ); ////x hipLaunchKernelGGL(( boundary_xm1) , dim3(ny+4) , dim3(zd+4) , 0, 0, v,t_v_x ); hipMemcpy(t_v_x_h, t_v_x, sizeof(double)* tran_mac_1_x , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=19; MPI_Sendrecv ((void *)&t_v_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_v_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=20; MPI_Sendrecv ((void *)&t_v_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_v_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpy(t_v_x, t_v_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_xm1_undo) , dim3(ny+4) , dim3(zd+4) , 0, 0, v,t_v_x ); /////////////////////////////////////////////////////////////////////////////////////////// ////y hipLaunchKernelGGL(( boundary_ym) , dim3(xd+4) , dim3(zd+4) , 0, 0, w ); ////z hipLaunchKernelGGL(( boundary_zm1) , dim3(xd+4) , dim3(ny+4) , 0, 0, w,t_w ); hipMemcpy(t_w_h, t_w, sizeof(double)* tran_mac_1 , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=210; MPI_Sendrecv ((void *)&t_w_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_w_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=220; MPI_Sendrecv ((void *)&t_w_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_w_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpy(t_w, t_w_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_zm1_undo) , dim3(xd+4) , dim3(ny+4) , 0, 0, w,t_w ); ////x hipLaunchKernelGGL(( boundary_xm1) , dim3(ny+4) , dim3(zd+4) , 0, 0, w,t_w_x ); hipMemcpy(t_w_x_h, t_w_x, sizeof(double)* tran_mac_1_x , hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=21; MPI_Sendrecv ((void *)&t_w_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_w_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=22; MPI_Sendrecv ((void *)&t_w_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_w_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpy(t_w_x, t_w_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice); hipLaunchKernelGGL(( boundary_xm1_undo) , dim3(ny+4) , dim3(zd+4) , 0, 0, w,t_w_x ); MPI_Barrier(MPI_COMM_WORLD); /////////////////////////////////////////////////////////////////////////////////////////// hipLaunchKernelGGL(( gradient_cen) , dim3(grid_t0), dim3(block_t0),0, 0, gra_c,c); hipLaunchKernelGGL(( gradient_cen) , dim3(grid_t0), dim3(block_t0),0, 0, gra_m,m); hipDeviceSynchronize(); hipLaunchKernelGGL(( eq_collision) , dim3(grid_t0), dim3(block_t0) , 0, 0, g,h,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi); hipDeviceSynchronize(); hipStream_t stream0,stream1; int leastPriority; int greatestPriority; hipDeviceGetStreamPriorityRange (&leastPriority,&greatestPriority); int priority=greatestPriority; hipStreamCreateWithPriority(&stream0,0,priority); hipStreamCreate(&stream1); //time hipEvent_t gpu_start,gpu_start_temp,gpu_stop,gpu_stop_temp; hipEventCreate(&gpu_start); hipEventCreate(&gpu_stop); hipEventCreate(&gpu_start_temp); hipEventCreate(&gpu_stop_temp); hipEventRecord(gpu_start_temp,0); hipEventRecord(gpu_start,0); /////////////////////////////////////////////////////////////////////////////////////////// // sstart // /////////////////////////////////////////////////////////////////////////////////////////// for(step=1;step<=stepall;step++){ hipLaunchKernelGGL(( eq_collision_bc) , dim3(grid_bc) , dim3(block_t0) , 0, stream0 , g,h,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); hipLaunchKernelGGL(( eq_collision_bc_x) , dim3(grid_bc) , dim3(block_t0_x) , 0, stream0 , g,h,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); hipLaunchKernelGGL(( boundary_yd_bc) , dim3(xd) , dim3(q) , 0, stream0 , g,h ); hipLaunchKernelGGL(( boundary_yd_bc_x) , dim3(zd) , dim3(q) , 0, stream0 , g,h ); ////z... hipLaunchKernelGGL(( boundary_zd) , dim3(xd+2) , dim3(ny+2) , 0, stream0 , g,t_g ); hipLaunchKernelGGL(( boundary_zd) , dim3(xd+2) , dim3(ny+2) , 0, stream0 , h,t_h ); hipLaunchKernelGGL(( eq_collision_in) , dim3(grid_in) , dim3(xd-2) , 0, stream1 , g,h,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); ////...z hipMemcpyAsync(t_g_h, t_g, sizeof(double)*tran_difun , hipMemcpyDeviceToHost,stream0); hipMemcpyAsync(t_h_h, t_h, sizeof(double)*tran_difun , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=230; MPI_Sendrecv ((void *)&t_g_h[end_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, ( void *)&t_g_h[startb_d], num_trans_d, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=231; MPI_Sendrecv ((void *)&t_g_h[start_d ], num_trans_d, MPI_DOUBLE, b_nbr, itag, ( void *)&t_g_h[endb_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, comm, istat); itag=232; MPI_Sendrecv ((void *)&t_h_h[end_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, ( void *)&t_h_h[startb_d], num_trans_d, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=233; MPI_Sendrecv ((void *)&t_h_h[start_d ], num_trans_d, MPI_DOUBLE, b_nbr, itag, ( void *)&t_h_h[endb_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_g, t_g_h, sizeof(double)*tran_difun , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_h, t_h_h, sizeof(double)*tran_difun , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_zd_undo) , dim3(xd+2) , dim3(ny+2) , 0, stream0 , g,t_g ); hipLaunchKernelGGL(( boundary_zd_undo) , dim3(xd+2) , dim3(ny+2) , 0, stream0 , h,t_h ); ////x... hipLaunchKernelGGL(( boundary_xd) , dim3(ny+2) , dim3(zd+2) , 0, stream0 , g,t_g_x ); hipLaunchKernelGGL(( boundary_xd) , dim3(ny+2) , dim3(zd+2) , 0, stream0 , h,t_h_x ); hipLaunchKernelGGL(( boundary_yd_in) , dim3(xd-2) , dim3(zd-2) , 0, stream1 , g,h ); ////...x hipMemcpyAsync(t_g_x_h, t_g_x, sizeof(double)*tran_difun_x , hipMemcpyDeviceToHost,stream0); hipMemcpyAsync(t_h_x_h, t_h_x, sizeof(double)*tran_difun_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=23; MPI_Sendrecv ((void *)&t_g_x_h[end_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_g_x_h[startb_d_x], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=24; MPI_Sendrecv ((void *)&t_g_x_h[start_d_x ], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_g_x_h[endb_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, comm, istat); itag=25; MPI_Sendrecv ((void *)&t_h_x_h[end_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_h_x_h[startb_d_x], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=26; MPI_Sendrecv ((void *)&t_h_x_h[start_d_x ], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_h_x_h[endb_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_g_x, t_g_x_h, sizeof(double)*tran_difun_x , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_h_x, t_h_x_h, sizeof(double)*tran_difun_x , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_xd_undo) , dim3(ny+2) , dim3(zd+2) , 0, stream0 , g,t_g_x ); hipLaunchKernelGGL(( boundary_xd_undo) , dim3(ny+2) , dim3(zd+2) , 0, stream0 , h,t_h_x ); /////////////////////////////////////////////////////////////////////////////////////////// hipDeviceSynchronize(); hipLaunchKernelGGL(( macro_h_bc) , dim3(grid_bc ) , dim3(block_t0) , 0, stream0 , h,h_t,c ); hipLaunchKernelGGL(( macro_h_bc_x) , dim3(grid_bc) , dim3(zd-4 ) , 0, stream0 , h,h_t,c ); hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd ) , 0, stream0 , c ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd ) , 0, stream0 , c ); ////z... hipLaunchKernelGGL(( boundary_zm2) , dim3(xd+4 ) , dim3(ny+4) , 0, stream0 , c,t_c ); hipLaunchKernelGGL(( macro_h_in) , dim3(grid_in2 ) , dim3(xd-4) , 0, stream1 , h,h_t,c ); ////...z hipMemcpyAsync(t_c_h, t_c, sizeof(double)* tran_mac_2 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=110; MPI_Sendrecv ((void *)&t_c_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_c_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_c_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_c_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_c, t_c_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_zm2_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , c,t_c ); ////x... hipLaunchKernelGGL(( boundary_xm2) , dim3(ny+4) , dim3(zd+4 ) , 0, stream0 , c,t_c_x ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4) ,dim3( zd-4) , 0, stream1 , c ); ////...x hipMemcpyAsync(t_c_x_h, t_c_x, sizeof(double)* tran_mac_2_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=11; MPI_Sendrecv ((void *)&t_c_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_c_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_c_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_c_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_c_x, t_c_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_xm2_undo) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , c,t_c_x ); /////////////////////////////////////////////////////////////////////////////////////////// hipDeviceSynchronize(); hipLaunchKernelGGL(( chemical_bc) , dim3(grid_bc ) , dim3(block_t0) , 0, stream0 , c,m,kappa,beta ); hipLaunchKernelGGL(( chemical_bc_x) , dim3(grid_bc ) , dim3(zd-4 ) , 0, stream0 , c,m,kappa,beta ); hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd ) , 0, stream0 , m ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd ) , 0, stream0 , m ); ////z... hipLaunchKernelGGL(( boundary_zm2) , dim3(xd+4 ) , dim3(ny+4 ) , 0, stream0 , m,t_m ); hipLaunchKernelGGL(( chemical_in) , dim3(grid_in2 ) , dim3(xd-4 ) , 0, stream1 , c,m,kappa,beta ); ////...z hipMemcpyAsync(t_m_h, t_m, sizeof(double)* tran_mac_2 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=110; MPI_Sendrecv ((void *)&t_m_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_m_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_m_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_m_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_m, t_m_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_zm2_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , m,t_m ); ////x... hipLaunchKernelGGL(( boundary_xm2) , dim3(ny+4) , dim3(zd+4 ) , 0, stream0 , m,t_m_x ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4 ) , dim3(zd-4 ) , 0, stream1 , m ); hipLaunchKernelGGL(( gradient_cen) , dim3(grid_t0 ) , dim3(block_t0) , 0, stream1 , gra_c,c ); ////...x hipMemcpyAsync(t_m_x_h, t_m_x, sizeof(double)* tran_mac_2_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=11; MPI_Sendrecv ((void *)&t_m_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_m_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_m_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_m_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_m_x, t_m_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_xm2_undo) , dim3(ny+4) , dim3(zd+4 ) , 0, stream0 , m,t_m_x ); /////////////////////////////////////////////////////////////////////////////////////////// hipDeviceSynchronize(); hipLaunchKernelGGL(( gradient_cen) , dim3(grid_t0 ) , dim3(block_t0) , 0, 0, gra_m,m ); hipLaunchKernelGGL(( macro_g_bc) , dim3(grid_bc ) , dim3(block_t0) , 0, stream0 , g,g_t,c,m,p,gra_c,gra_m,u,v,w ); hipLaunchKernelGGL(( macro_g_bc_x) , dim3(grid_bc ) , dim3(zd-4) , 0, stream0 , g,g_t,c,m,p,gra_c,gra_m,u,v,w ); ////y bc hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd) , 0, stream0 , u ); hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd) , 0, stream0 , v ); hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd) , 0, stream0 , w ); hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd) , 0, stream0 , p ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd) , 0, stream0 , u ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd) , 0, stream0 , v ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd) , 0, stream0 , w ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd) , 0, stream0 , p ); ////z... hipLaunchKernelGGL(( boundary_zm2) , dim3(xd+4) , dim3(ny+4 ) , 0, stream0 , p,t_p ); hipLaunchKernelGGL(( boundary_zm1) , dim3(xd+4) , dim3(ny+4 ) , 0, stream0 , u,t_u ); hipLaunchKernelGGL(( boundary_zm1) , dim3(xd+4) , dim3(ny+4 ) , 0, stream0 , v,t_v ); hipLaunchKernelGGL(( boundary_zm1) , dim3(xd+4) , dim3(ny+4 ) , 0, stream0 , w,t_w ); hipLaunchKernelGGL(( macro_g_in) , dim3(grid_in2 ) , dim3(xd-4) , 0, stream1 , g,g_t,c,m,p,gra_c,gra_m,u,v,w); ////...z hipMemcpyAsync(t_p_h, t_p, sizeof(double)* tran_mac_2 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=150; MPI_Sendrecv ((void *)&t_p_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_p_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=160; MPI_Sendrecv ((void *)&t_p_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_p_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpyAsync(t_u_h, t_u, sizeof(double)* tran_mac_1 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=170; MPI_Sendrecv ((void *)&t_u_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_u_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=180; MPI_Sendrecv ((void *)&t_u_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_u_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpyAsync(t_v_h, t_v, sizeof(double)* tran_mac_1 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=190; MPI_Sendrecv ((void *)&t_v_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_v_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=200; MPI_Sendrecv ((void *)&t_v_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_v_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpyAsync(t_w_h, t_w, sizeof(double)* tran_mac_1 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=210; MPI_Sendrecv ((void *)&t_w_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_w_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=220; MPI_Sendrecv ((void *)&t_w_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_w_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_p, t_p_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_u, t_u_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_v, t_v_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_w, t_w_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice,stream0); hipStreamSynchronize(stream0); hipLaunchKernelGGL(( boundary_zm2_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , p,t_p ); hipLaunchKernelGGL(( boundary_zm1_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , u,t_u ); hipLaunchKernelGGL(( boundary_zm1_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , v,t_v ); hipLaunchKernelGGL(( boundary_zm1_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , w,t_w ); ////x... hipLaunchKernelGGL(( boundary_xm2) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , p,t_p_x ); hipLaunchKernelGGL(( boundary_xm1) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , u,t_u_x ); hipLaunchKernelGGL(( boundary_xm1) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , v,t_v_x ); hipLaunchKernelGGL(( boundary_xm1) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , w,t_w_x ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4) ,dim3( zd-4) , 0, stream1 , p ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4) ,dim3( zd-4) , 0, stream1 , u ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4) ,dim3( zd-4) , 0, stream1 , v ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4) ,dim3( zd-4) , 0, stream1 , w ); ////...x hipMemcpyAsync(t_p_x_h, t_p_x, sizeof(double)* tran_mac_2_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=15; MPI_Sendrecv ((void *)&t_p_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_p_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=16; MPI_Sendrecv ((void *)&t_p_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_p_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpyAsync(t_u_x_h, t_u_x, sizeof(double)* tran_mac_1_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=17; MPI_Sendrecv ((void *)&t_u_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_u_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=18; MPI_Sendrecv ((void *)&t_u_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_u_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpyAsync(t_v_x_h, t_v_x, sizeof(double)* tran_mac_1_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=19; MPI_Sendrecv ((void *)&t_v_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_v_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=20; MPI_Sendrecv ((void *)&t_v_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_v_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpyAsync(t_w_x_h, t_w_x, sizeof(double)* tran_mac_1_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=21; MPI_Sendrecv ((void *)&t_w_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_w_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=22; MPI_Sendrecv ((void *)&t_w_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_w_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_p_x, t_p_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_u_x, t_u_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_v_x, t_v_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_w_x, t_w_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice,stream0); hipStreamSynchronize(stream0); hipLaunchKernelGGL(( boundary_xm2_undo) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , p,t_p_x ); hipLaunchKernelGGL(( boundary_xm1_undo) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , u,t_u_x ); hipLaunchKernelGGL(( boundary_xm1_undo) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , v,t_v_x ); hipLaunchKernelGGL(( boundary_xm1_undo) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , w,t_w_x ); /////////////////////////////////////////////////////////////////////////////////////////// // nnext time step // /////////////////////////////////////////////////////////////////////////////////////////// step=step+1; hipDeviceSynchronize(); hipLaunchKernelGGL(( eq_collision_bc) , dim3(grid_bc) , dim3(block_t0) , 0, stream0 , g_t,h_t,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); hipLaunchKernelGGL(( eq_collision_bc_x) , dim3(grid_bc) , dim3(block_t0_x) , 0, stream0 , g_t,h_t,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); hipLaunchKernelGGL(( boundary_yd_bc) , dim3(xd) , dim3(q) , 0, stream0 , g_t,h_t ); hipLaunchKernelGGL(( boundary_yd_bc_x) , dim3(zd) , dim3(q) , 0, stream0 , g_t,h_t ); ////z... hipLaunchKernelGGL(( boundary_zd) , dim3(xd+2) , dim3(ny+2) , 0, stream0 , g_t,t_g ); hipLaunchKernelGGL(( boundary_zd) , dim3(xd+2) , dim3(ny+2) , 0, stream0 , h_t,t_h ); hipLaunchKernelGGL(( eq_collision_in) , dim3(grid_in) , dim3(xd-2) , 0, stream1 , g_t,h_t,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); ////...z hipMemcpyAsync(t_g_h, t_g, sizeof(double)*tran_difun , hipMemcpyDeviceToHost,stream0); hipMemcpyAsync(t_h_h, t_h, sizeof(double)*tran_difun , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=230; MPI_Sendrecv ((void *)&t_g_h[end_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, ( void *)&t_g_h[startb_d], num_trans_d, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=231; MPI_Sendrecv ((void *)&t_g_h[start_d ], num_trans_d, MPI_DOUBLE, b_nbr, itag, ( void *)&t_g_h[endb_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, comm, istat); itag=232; MPI_Sendrecv ((void *)&t_h_h[end_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, ( void *)&t_h_h[startb_d], num_trans_d, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=233; MPI_Sendrecv ((void *)&t_h_h[start_d ], num_trans_d, MPI_DOUBLE, b_nbr, itag, ( void *)&t_h_h[endb_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_g, t_g_h, sizeof(double)*tran_difun , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_h, t_h_h, sizeof(double)*tran_difun , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_zd_undo) , dim3(xd+2) , dim3(ny+2) , 0, stream0 , g_t,t_g ); hipLaunchKernelGGL(( boundary_zd_undo) , dim3(xd+2) , dim3(ny+2) , 0, stream0 , h_t,t_h ); ////x... hipLaunchKernelGGL(( boundary_xd) , dim3(ny+2) , dim3(zd+2) , 0, stream0 , g_t,t_g_x ); hipLaunchKernelGGL(( boundary_xd) , dim3(ny+2) , dim3(zd+2) , 0, stream0 , h_t,t_h_x ); hipLaunchKernelGGL(( boundary_yd_in) , dim3(xd-2) , dim3(zd-2) , 0, stream1 , g_t,h_t ); ////...x hipMemcpyAsync(t_g_x_h, t_g_x, sizeof(double)*tran_difun_x , hipMemcpyDeviceToHost,stream0); hipMemcpyAsync(t_h_x_h, t_h_x, sizeof(double)*tran_difun_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=23; MPI_Sendrecv ((void *)&t_g_x_h[end_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_g_x_h[startb_d_x], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=24; MPI_Sendrecv ((void *)&t_g_x_h[start_d_x ], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_g_x_h[endb_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, comm, istat); itag=25; MPI_Sendrecv ((void *)&t_h_x_h[end_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_h_x_h[startb_d_x], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=26; MPI_Sendrecv ((void *)&t_h_x_h[start_d_x ], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_h_x_h[endb_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_g_x, t_g_x_h, sizeof(double)*tran_difun_x , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_h_x, t_h_x_h, sizeof(double)*tran_difun_x , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_xd_undo) , dim3(ny+2) , dim3(zd+2) , 0, stream0 , g_t,t_g_x ); hipLaunchKernelGGL(( boundary_xd_undo) , dim3(ny+2) , dim3(zd+2) , 0, stream0 , h_t,t_h_x ); /////////////////////////////////////////////////////////////////////////////////////////// hipDeviceSynchronize(); hipLaunchKernelGGL(( macro_h_bc) , dim3(grid_bc ) , dim3(block_t0) , 0, stream0 , h_t,h,c ); hipLaunchKernelGGL(( macro_h_bc_x) , dim3(grid_bc) , dim3(zd-4 ) , 0, stream0 , h_t,h,c ); hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd ) , 0, stream0 , c ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd ) , 0, stream0 , c ); ////z... hipLaunchKernelGGL(( boundary_zm2) , dim3(xd+4 ) , dim3(ny+4) , 0, stream0 , c,t_c ); hipLaunchKernelGGL(( macro_h_in) , dim3(grid_in2 ) , dim3(xd-4) , 0, stream1 , h_t,h,c ); ////...z hipMemcpyAsync(t_c_h, t_c, sizeof(double)* tran_mac_2 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=110; MPI_Sendrecv ((void *)&t_c_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_c_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_c_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_c_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_c, t_c_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_zm2_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , c,t_c ); ////x... hipLaunchKernelGGL(( boundary_xm2) , dim3(ny+4) , dim3(zd+4 ) , 0, stream0 , c,t_c_x ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4) ,dim3( zd-4) , 0, stream1 , c ); ////...x hipMemcpyAsync(t_c_x_h, t_c_x, sizeof(double)* tran_mac_2_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=11; MPI_Sendrecv ((void *)&t_c_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_c_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_c_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_c_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_c_x, t_c_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_xm2_undo) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , c,t_c_x ); /////////////////////////////////////////////////////////////////////////////////////////// hipDeviceSynchronize(); hipLaunchKernelGGL(( chemical_bc) , dim3(grid_bc ) , dim3(block_t0) , 0, stream0 , c,m,kappa,beta ); hipLaunchKernelGGL(( chemical_bc_x) , dim3(grid_bc ) , dim3(zd-4 ) , 0, stream0 , c,m,kappa,beta ); hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd ) , 0, stream0 , m ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd ) , 0, stream0 , m ); ////z... hipLaunchKernelGGL(( boundary_zm2) , dim3(xd+4 ) , dim3(ny+4 ) , 0, stream0 , m,t_m ); hipLaunchKernelGGL(( chemical_in) , dim3(grid_in2 ) , dim3(xd-4 ) , 0, stream1 , c,m,kappa,beta ); ////...z hipMemcpyAsync(t_m_h, t_m, sizeof(double)* tran_mac_2 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=110; MPI_Sendrecv ((void *)&t_m_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_m_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_m_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_m_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_m, t_m_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_zm2_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , m,t_m ); ////x... hipLaunchKernelGGL(( boundary_xm2) , dim3(ny+4) , dim3(zd+4 ) , 0, stream0 , m,t_m_x ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4 ) , dim3(zd-4 ) , 0, stream1 , m ); hipLaunchKernelGGL(( gradient_cen) , dim3(grid_t0 ) , dim3(block_t0) , 0, stream1 , gra_c,c ); ////...x hipMemcpyAsync(t_m_x_h, t_m_x, sizeof(double)* tran_mac_2_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=11; MPI_Sendrecv ((void *)&t_m_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_m_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_m_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_m_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_m_x, t_m_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice,stream0); hipLaunchKernelGGL(( boundary_xm2_undo) , dim3(ny+4) , dim3(zd+4 ) , 0, stream0 , m,t_m_x ); /////////////////////////////////////////////////////////////////////////////////////////// hipDeviceSynchronize(); hipLaunchKernelGGL(( gradient_cen) , dim3(grid_t0 ) , dim3(block_t0) , 0, 0, gra_m,m ); hipLaunchKernelGGL(( macro_g_bc) , dim3(grid_bc ) , dim3(block_t0) , 0, stream0 , g_t,g,c,m,p,gra_c,gra_m,u,v,w ); hipLaunchKernelGGL(( macro_g_bc_x) , dim3(grid_bc ) , dim3(zd-4) , 0, stream0 , g_t,g,c,m,p,gra_c,gra_m,u,v,w ); ////y bc hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd) , 0, stream0 , u ); hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd) , 0, stream0 , v ); hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd) , 0, stream0 , w ); hipLaunchKernelGGL(( boundary_ym_bc) , dim3(1 ) , dim3(xd) , 0, stream0 , p ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd) , 0, stream0 , u ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd) , 0, stream0 , v ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd) , 0, stream0 , w ); hipLaunchKernelGGL(( boundary_ym_bc_x) , dim3(1 ) , dim3(zd) , 0, stream0 , p ); ////z... hipLaunchKernelGGL(( boundary_zm2) , dim3(xd+4) , dim3(ny+4 ) , 0, stream0 , p,t_p ); hipLaunchKernelGGL(( boundary_zm1) , dim3(xd+4) , dim3(ny+4 ) , 0, stream0 , u,t_u ); hipLaunchKernelGGL(( boundary_zm1) , dim3(xd+4) , dim3(ny+4 ) , 0, stream0 , v,t_v ); hipLaunchKernelGGL(( boundary_zm1) , dim3(xd+4) , dim3(ny+4 ) , 0, stream0 , w,t_w ); hipLaunchKernelGGL(( macro_g_in) , dim3(grid_in2 ) , dim3(xd-4) , 0, stream1 , g_t,g,c,m,p,gra_c,gra_m,u,v,w); ////...z hipMemcpyAsync(t_p_h, t_p, sizeof(double)* tran_mac_2 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=150; MPI_Sendrecv ((void *)&t_p_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_p_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=160; MPI_Sendrecv ((void *)&t_p_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_p_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpyAsync(t_u_h, t_u, sizeof(double)* tran_mac_1 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=170; MPI_Sendrecv ((void *)&t_u_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_u_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=180; MPI_Sendrecv ((void *)&t_u_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_u_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpyAsync(t_v_h, t_v, sizeof(double)* tran_mac_1 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=190; MPI_Sendrecv ((void *)&t_v_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_v_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=200; MPI_Sendrecv ((void *)&t_v_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_v_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); hipMemcpyAsync(t_w_h, t_w, sizeof(double)* tran_mac_1 , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=210; MPI_Sendrecv ((void *)&t_w_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_w_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=220; MPI_Sendrecv ((void *)&t_w_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_w_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_p, t_p_h, sizeof(double)* tran_mac_2 , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_u, t_u_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_v, t_v_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_w, t_w_h, sizeof(double)* tran_mac_1 , hipMemcpyHostToDevice,stream0); hipStreamSynchronize(stream0); hipLaunchKernelGGL(( boundary_zm2_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , p,t_p ); hipLaunchKernelGGL(( boundary_zm1_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , u,t_u ); hipLaunchKernelGGL(( boundary_zm1_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , v,t_v ); hipLaunchKernelGGL(( boundary_zm1_undo) , dim3(xd+4) , dim3(ny+4) , 0, stream0 , w,t_w ); ////x... hipLaunchKernelGGL(( boundary_xm2) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , p,t_p_x ); hipLaunchKernelGGL(( boundary_xm1) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , u,t_u_x ); hipLaunchKernelGGL(( boundary_xm1) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , v,t_v_x ); hipLaunchKernelGGL(( boundary_xm1) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , w,t_w_x ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4) ,dim3( zd-4) , 0, stream1 , p ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4) ,dim3( zd-4) , 0, stream1 , u ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4) ,dim3( zd-4) , 0, stream1 , v ); hipLaunchKernelGGL(( boundary_ym_in) , dim3(xd-4) ,dim3( zd-4) , 0, stream1 , w ); ////...x hipMemcpyAsync(t_p_x_h, t_p_x, sizeof(double)* tran_mac_2_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=15; MPI_Sendrecv ((void *)&t_p_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_p_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=16; MPI_Sendrecv ((void *)&t_p_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_p_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpyAsync(t_u_x_h, t_u_x, sizeof(double)* tran_mac_1_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=17; MPI_Sendrecv ((void *)&t_u_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_u_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=18; MPI_Sendrecv ((void *)&t_u_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_u_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpyAsync(t_v_x_h, t_v_x, sizeof(double)* tran_mac_1_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=19; MPI_Sendrecv ((void *)&t_v_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_v_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=20; MPI_Sendrecv ((void *)&t_v_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_v_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipMemcpyAsync(t_w_x_h, t_w_x, sizeof(double)* tran_mac_1_x , hipMemcpyDeviceToHost,stream0); hipStreamSynchronize(stream0); itag=21; MPI_Sendrecv ((void *)&t_w_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_w_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=22; MPI_Sendrecv ((void *)&t_w_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_w_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); hipStreamSynchronize(stream0); hipMemcpyAsync(t_p_x, t_p_x_h, sizeof(double)* tran_mac_2_x , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_u_x, t_u_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_v_x, t_v_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice,stream0); hipMemcpyAsync(t_w_x, t_w_x_h, sizeof(double)* tran_mac_1_x , hipMemcpyHostToDevice,stream0); hipStreamSynchronize(stream0); hipLaunchKernelGGL(( boundary_xm2_undo) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , p,t_p_x ); hipLaunchKernelGGL(( boundary_xm1_undo) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , u,t_u_x ); hipLaunchKernelGGL(( boundary_xm1_undo) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , v,t_v_x ); hipLaunchKernelGGL(( boundary_xm1_undo) , dim3(ny+4) , dim3(zd+4) , 0, stream0 , w,t_w_x ); if(condition == 0){ hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, c_d,c ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, w_d,w ); MPI_Barrier(MPI_COMM_WORLD); hipMemcpy(c_d_h,c_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(w_d_h,w_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); MPI_Gather((void *)&c_d_h[0], n_f, MPI_DOUBLE,(void *)&c_f_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&w_d_h[0], n_f, MPI_DOUBLE,(void *)&w_f_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Barrier(MPI_COMM_WORLD); if(myid==0){ double maxw; max_w(c_f_h,w_f_h,&maxw); Reynolds_Time( maxw, Re, step ); }} if(step%iprint ==0){ hipLaunchKernelGGL(( p_real) , dim3(grid_t0) , dim3(block_t0), 0, 0, c,p,a,beta,kappa,gra_c); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, c_d,c ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, m_d,m ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, b_d,b ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, p_d,p ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, u_d,u ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, v_d,v ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, w_d,w ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, a_d,a ); MPI_Barrier(MPI_COMM_WORLD); hipMemcpy(c_d_h,c_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(m_d_h,m_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(b_d_h,b_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(p_d_h,p_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(u_d_h,u_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(v_d_h,v_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(w_d_h,w_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(a_d_h,a_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); MPI_Gather((void *)&c_d_h[0], n_f, MPI_DOUBLE,(void *)&c_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&m_d_h[0], n_f, MPI_DOUBLE,(void *)&m_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&b_d_h[0], n_f, MPI_DOUBLE,(void *)&b_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&p_d_h[0], n_f, MPI_DOUBLE,(void *)&p_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&u_d_h[0], n_f, MPI_DOUBLE,(void *)&u_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&v_d_h[0], n_f, MPI_DOUBLE,(void *)&v_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&w_d_h[0], n_f, MPI_DOUBLE,(void *)&w_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&a_d_h[0], n_f, MPI_DOUBLE,(void *)&a_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Barrier(MPI_COMM_WORLD); if(myid==0){ array_2D_undo(c_f_h,c_fdo_h); array_2D_undo(m_f_h,m_fdo_h); array_2D_undo(b_f_h,b_fdo_h); array_2D_undo(p_f_h,p_fdo_h); array_2D_undo(u_f_h,u_fdo_h); array_2D_undo(v_f_h,v_fdo_h); array_2D_undo(w_f_h,w_fdo_h); array_2D_undo(a_f_h,a_fdo_h); printf("step=%d\n",step); hipEventRecord(gpu_stop_temp,0); hipEventSynchronize(gpu_stop_temp); float cudatime_temp; hipEventElapsedTime(&cudatime_temp,gpu_start_temp,gpu_stop_temp); cudatime_temp=cudatime_temp/1000.0;//unit sec int remain_time=(int)(cudatime_temp/iprint*(stepall-step)); printf("time remaining: %d hr,%d min,%d sec\n",(int)remain_time/3600,(int)(remain_time%3600)/60,(int)remain_time%60); int indexx; printf("c max=%lf\n",maxvalue(c_f_h,&indexx)); printf("c min=%lf\n",minvalue(c_f_h,&indexx)); printf("p max=%e\n" ,maxvalue(p_f_h,&indexx)); printf("u max=%e\n" ,maxvalue(u_f_h,&indexx)); printf("v max=%e\n" ,maxvalue(v_f_h,&indexx)); printf("w max=%e\n" ,maxvalue(w_f_h,&indexx)); data_2d = fopen("data_2d.dat","a"); fprintf( data_2d, "VARIABLES=\"X\",\"Z\",\"c\",\"u\",\"v\",\"w\",\"p\"\n"); fprintf( data_2d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_2d, "I=%d, J=%d\n", nx,nz); j=ny/2; for(k=0;k<nz;k++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; fprintf( data_2d, "%d\t%d\t%e\t%e\t%e\t%e\t%e\t\n", i,k,c_f_h[index],u_f_h[index],v_f_h[index],w_f_h[index],p_f_h[index]); }} fclose(data_2d); data_2d_t = fopen("data_2d_t.dat","w"); fprintf( data_2d_t, "VARIABLES=\"X\",\"Z\",\"c\",\"u\",\"v\",\"w\",\"p\"\n"); fprintf( data_2d_t, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_2d_t, "I=%d, J=%d\n", nx,nz); j=ny/2; for(k=0;k<nz;k++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; fprintf( data_2d_t, "%d\t%d\t%e\t%e\t%e\t%e\t%e\t\n", i,k,c_f_h[index],u_f_h[index],v_f_h[index],w_f_h[index],p_f_h[index]); }} fclose(data_2d_t); if(step%idata_3d ==0){ data_3d = fopen("data_3d.dat","a"); fprintf( data_3d, "VARIABLES=\"X\",\"Y\",\"Z\",\"c\"\n"); fprintf( data_3d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_3d, "I=%d, J=%d, K=%d\n", nx,ny,nz); for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=(nx)*(k*(ny)+j)+i; fprintf( data_3d, "%d\t%d\t%d\t%e\t\n", i,j,k,c_f_h[index]); }}} fclose(data_3d); data_3d_t = fopen("data_3d_t.dat","w"); fprintf( data_3d_t, "VARIABLES=\"X\",\"Y\",\"Z\",\"c\"\n"); fprintf( data_3d_t, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_3d_t, "I=%d, J=%d, K=%d\n", nx,ny,nz); for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=(nx)*(k*(ny)+j)+i; fprintf( data_3d_t, "%d\t%d\t%d\t%e\t\n", i,j,k,c_f_h[index]); }}} fclose(data_3d_t); } printf("===============================================================\n"); } hipEventRecord(gpu_start_temp,0); } } /////////////////////////////////////////////////////////////////////////////////////////// // eend // /////////////////////////////////////////////////////////////////////////////////////////// MPI_Barrier(MPI_COMM_WORLD); hipEventRecord(gpu_stop,0); hipEventSynchronize(gpu_stop); float cudatime; if(myid==0){ printf("===============================================================\n"); printf("Iteration terminated!\n"); hipEventElapsedTime(&cudatime,gpu_start,gpu_stop); printf("GPU total time = %f ms\n",cudatime); //unit = ms printf("mlups=%lf \n",(double)(nx*ny*nz)*stepall*pow(10.0,-6.0)/(cudatime/1000.0)); printf("===============================================================\n"); } hipEventDestroy(gpu_start); hipEventDestroy(gpu_stop); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, c_d,c ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, m_d,m ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, b_d,b ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, p_d,p ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, u_d,u ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, v_d,v ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, w_d,w ); hipLaunchKernelGGL(( array_undo) , dim3(grid_t0) , dim3(block_t0), 0, 0, a_d,a ); MPI_Barrier(MPI_COMM_WORLD); hipMemcpy(c_d_h,c_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(m_d_h,m_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(b_d_h,b_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(p_d_h,p_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(u_d_h,u_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(v_d_h,v_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(w_d_h,w_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); hipMemcpy(a_d_h,a_d,sizeof(double)*size_dicom,hipMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); //////////////////////////////////////////////////////////////////////////////////////////////////////////////// MPI_Gather((void *)&c_d_h[0], n_f, MPI_DOUBLE,(void *)&c_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&m_d_h[0], n_f, MPI_DOUBLE,(void *)&m_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&b_d_h[0], n_f, MPI_DOUBLE,(void *)&b_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&p_d_h[0], n_f, MPI_DOUBLE,(void *)&p_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&u_d_h[0], n_f, MPI_DOUBLE,(void *)&u_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&v_d_h[0], n_f, MPI_DOUBLE,(void *)&v_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&w_d_h[0], n_f, MPI_DOUBLE,(void *)&w_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&a_d_h[0], n_f, MPI_DOUBLE,(void *)&a_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Barrier(MPI_COMM_WORLD); //////////////////////////////////////////////////////////////////////////////////////////////////////////////// if(myid==0){ array_2D_undo(c_f_h,c_fdo_h); array_2D_undo(m_f_h,m_fdo_h); array_2D_undo(b_f_h,b_fdo_h); array_2D_undo(p_f_h,p_fdo_h); array_2D_undo(u_f_h,u_fdo_h); array_2D_undo(v_f_h,v_fdo_h); array_2D_undo(w_f_h,w_fdo_h); array_2D_undo(a_f_h,a_fdo_h); final_2d = fopen("final_2d.dat","w"); fprintf( final_2d, "VARIABLES=\"X\",\"Z\",\"c\",\"u\",\"v\",\"w\",\"p\",\"p_real\"\n"); fprintf( final_2d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( final_2d, "I=%d, J=%d\n", nx,nz); j=ny/2; for(k=0;k<nz;k++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; fprintf( final_2d, "%d\t%d\t%e\t%e\t%e\t%e\t%e\t%e\t\n", i,k,c_f_h[index],u_f_h[index],v_f_h[index],w_f_h[index],p_f_h[index],a_f_h[index]); }} fclose(final_2d); final_3d = fopen("final_3d.dat","w"); fprintf( final_3d, "VARIABLES=\"X\",\"Y\",\"Z\",\"c\",\"u\",\"v\",\"w\",\"p\",\"p_real\"\n"); fprintf( final_3d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( final_3d, "I=%d, J=%d, K=%d\n", nx,ny,nz); for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=(nx)*(k*(ny)+j)+i; fprintf( final_3d, "%d\t%d\t%d\t%e\t%e\t%e\t%e\t%e\t%e\t\n", i,j,k,c_f_h[index],u_f_h[index],v_f_h[index],w_f_h[index],p_f_h[index],a_f_h[index]); }}} fclose(final_3d); properties = fopen("properties.txt","a"); fprintf( properties,"MLUPS =%e\n",(double)(nx*ny*nz)*stepall*pow(10.0,-6.0)/(cudatime/1000.0)); if(condition == 0){ FILE *Reynolds; double T=sqrt(radd*2/gravity); Reynolds = fopen("Reynolds.dat","w"); fprintf( Reynolds, "VARIABLES=\"T\",\"Reynolds\"\n"); fprintf( Reynolds, "ZONE T=\"Reynolds\" F=POINT\n"); fprintf( Reynolds, "I=%d\n", stepall/2); for(i=0;i<stepall/2;i++){ fprintf( Reynolds, "%e\t%e\n",(double)2*(i+1)/T,Re[i]);} fclose ( Reynolds); } } // Free memory hipHostFree( c_d_h ); hipHostFree( m_d_h ); hipHostFree( b_d_h ); hipHostFree( p_d_h ); hipHostFree( u_d_h ); hipHostFree( v_d_h ); hipHostFree( w_d_h ); hipHostFree( a_d_h ); hipHostFree( et_h ); hipHostFree( ex_h ); hipHostFree( ey_h ); hipHostFree( ez_h ); hipHostFree( wt_h ); hipHostFree( t_c_h ); hipHostFree( t_m_h ); hipHostFree( t_b_h ); hipHostFree( t_p_h ); hipHostFree( t_u_h ); hipHostFree( t_v_h ); hipHostFree( t_w_h ); hipHostFree( t_g_h ); hipHostFree( t_h_h ); if(myid==0){ hipHostFree( c_f_h ); hipHostFree( m_f_h ); hipHostFree( b_f_h ); hipHostFree( p_f_h ); hipHostFree( u_f_h ); hipHostFree( v_f_h ); hipHostFree( w_f_h ); hipHostFree( a_f_h ); hipHostFree( xz_f_h ); hipHostFree( lx ); hipHostFree( lz ); } hipHostFree( xz_d_h ); hipFree( xz_d ); hipFree( c_d ); hipFree( m_d ); hipFree( b_d ); hipFree( p_d ); hipFree( u_d ); hipFree( v_d ); hipFree( w_d ); hipFree( a_d ); hipFree( h ); hipFree( g ); hipFree( h_t ); hipFree( g_t ); hipFree( gra_c ); hipFree( gra_m ); hipFree( t_c ); hipFree( t_m ); hipFree( t_b ); hipFree( t_p ); hipFree( t_u ); hipFree( t_v ); hipFree( t_w ); hipFree( t_g ); hipFree( t_h ); hipFree( c ); hipFree( m ); hipFree( b ); hipFree( p ); hipFree( u ); hipFree( v ); hipFree( w ); hipFree( a ); MPI_Finalize(); return 0; }
2a02ca62538086a955c1e67f420a44728887ab68.cu
#include <time.h> #include <math.h> #include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <mpi.h> //device parameter// #define ndim 2 #define ip 2 #define kp 3 #define DPN 3 //device per note #define stepall 100 #define iprint 10 #define idata_3d 100000 //droplet parameter// #define nx 120 #define ny 120 #define nz 120 #define radd 30.0 //1不用調 #define thick 5.0 #define tau_h 0.5 #define tau_l 0.05 #define tau_g 0.5 #define rho_l 1.0 #define rho_g 0.001 #define sigma 0.001 #define bo 100.0 //0 one bubble rising ,1 two bubble rising #define condition 0 //condition 1 #define distance_x 0.0 #define distance_z 10.0 #define radd_t 30.0 #define radd_b 30.0 //constant parameter// #define thita 10 #define dx 1.0 #define dt 1.0 #define q 19 __constant__ double eex[q]; __constant__ double eey[q]; __constant__ double eez[q]; __constant__ double wwt[q]; __constant__ int eet[q]; void parameter (double *beta,double *zeta,double *mobi,double *kappa,double *phic,double *gravity,double *ex_h,double *ey_h,double *ez_h,double *wt_h,int *et_h) { *zeta =(double)thick*dx; *beta =(double)12.0*sigma/(*zeta); *kappa=(double)(*beta)*(*zeta)*(*zeta)/8.0; *mobi =(double)0.02/(*beta); double omega=-cos(thita*M_PI/180.0); *phic =omega*pow(2.0*(*kappa)*(*beta),0.5); *gravity = bo*sigma/(rho_l-rho_g)/(2*radd)/(2*radd); //ex ex_h[ 0]= 0.0; ex_h[ 1]= 1.0; ex_h[ 2]=-1.0; ex_h[ 3]= 0.0; ex_h[ 4]= 0.0; ex_h[ 5]= 0.0; ex_h[ 6]= 0.0; ex_h[ 7]= 1.0; ex_h[ 8]=-1.0; ex_h[ 9]= 1.0; ex_h[10]=-1.0; ex_h[11]= 1.0; ex_h[12]=-1.0; ex_h[13]=-1.0; ex_h[14]= 1.0; ex_h[15]= 0.0; ex_h[16]= 0.0; ex_h[17]= 0.0; ex_h[18]= 0.0; //ey ey_h[ 0]= 0.0; ey_h[ 1]= 0.0; ey_h[ 2]= 0.0; ey_h[ 3]= 1.0; ey_h[ 4]=-1.0; ey_h[ 5]= 0.0; ey_h[ 6]= 0.0; ey_h[ 7]= 1.0; ey_h[ 8]=-1.0; ey_h[ 9]=-1.0; ey_h[10]= 1.0; ey_h[11]= 0.0; ey_h[12]= 0.0; ey_h[13]= 0.0; ey_h[14]= 0.0; ey_h[15]= 1.0; ey_h[16]=-1.0; ey_h[17]= 1.0; ey_h[18]=-1.0; //ez ez_h[ 0]= 0.0; ez_h[ 1]= 0.0; ez_h[ 2]= 0.0; ez_h[ 3]= 0.0; ez_h[ 4]= 0.0; ez_h[ 5]= 1.0; ez_h[ 6]=-1.0; ez_h[ 7]= 0.0; ez_h[ 8]= 0.0; ez_h[ 9]= 0.0; ez_h[10]= 0.0; ez_h[11]= 1.0; ez_h[12]=-1.0; ez_h[13]= 1.0; ez_h[14]=-1.0; ez_h[15]= 1.0; ez_h[16]=-1.0; ez_h[17]=-1.0; ez_h[18]= 1.0; //wt wt_h[ 0]=1.0/ 3.0; wt_h[ 1]=1.0/18.0; wt_h[ 2]=1.0/18.0; wt_h[ 3]=1.0/18.0; wt_h[ 4]=1.0/18.0; wt_h[ 5]=1.0/18.0; wt_h[ 6]=1.0/18.0; wt_h[ 7]=1.0/36.0; wt_h[ 8]=1.0/36.0; wt_h[ 9]=1.0/36.0; wt_h[10]=1.0/36.0; wt_h[11]=1.0/36.0; wt_h[12]=1.0/36.0; wt_h[13]=1.0/36.0; wt_h[14]=1.0/36.0; wt_h[15]=1.0/36.0; wt_h[16]=1.0/36.0; wt_h[17]=1.0/36.0; wt_h[18]=1.0/36.0; int l; for(l=0;l<q;l++) { et_h[l]=(nx/ip+4)*((ny+4)*(int)ez_h[l]+(int)ey_h[l])+(int)ex_h[l]; } } void initial_macro (double *c,double *m,double *b,double *p,double *u,double *v,double *w) { int i,j,k,index; double icent,jcent,kcent; for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<nz;k++){ index=nx*(k*ny+j)+i; c[index]=0.0; m[index]=0.0; b[index]=0.0; p[index]=0.0; u[index]=0.0; v[index]=0.0; w[index]=0.0; }}} icent=(double)(nx-1.0)/2.0; jcent=(double)(ny-1.0)/2.0; kcent=(double)(nz-1.0)/2.0; if(condition==1){ double icent_r=icent+0.5*distance_x; double icent_l=icent-0.5*distance_x; double kcent_b=50; double kcent_t=kcent_b+thick+(radd_t+radd_b)+distance_z; int mid =0.5*(distance_z+thick)+50+radd_b; double raddd=radd+thick/2.0+1.0; for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<mid;k++){ double rad=sqrt( (i-icent_l)*(i-icent_l)+(j-jcent)*(j-jcent)+(k-kcent_b)*(k-kcent_b)); index=nx*(k*ny+j)+i; c[index]=(double)0.5-(double)0.5*tanh(2.0*(radd_b-rad)/thick); }}} for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=mid;k<nz;k++){ double rad=sqrt( (i-icent_r)*(i-icent_r)+(j-jcent)*(j-jcent)+(k-kcent_t)*(k-kcent_t)); index=nx*(k*ny+j)+i; c[index]=(double)0.5-(double)0.5*tanh(2.0*(radd_t-rad)/thick); }}} } else{ for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<nz;k++){ double rad=sqrt( (i-icent)*(i-icent)+(j-jcent)*(j-jcent)+(k-kcent)*(k-kcent)); index=nx*(k*ny+j)+i; c[index]=(double)0.5-(double)0.5*tanh(2.0*(radd-rad)/thick); }}}} } void array_2D_do (double *phi,double *phi_do) { int i,j,k,index; int ii,jj,kk,iindex; int iside; int xd=nx/ip; jj=-1; kk=0; iside=0; for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; ii=i%xd; if(ii == 0){ jj=jj+1; } if(jj == ny){ kk=kk+1; jj=0; } if(kk == nz){ iside=iside+1; kk=0; } ii=ii+xd*iside; iindex=nx*(kk*ny+jj)+ii; phi_do[index]=phi[iindex]; } } } } void array_2D_undo (double *phi,double *phi_do) { int i,j,k,index; int ii,jj,kk,iindex; int iside; int xd=nx/ip; jj=-1; kk=0; iside=0; for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; ii=i%xd; if(ii == 0){ jj=jj+1; } if(jj == ny){ kk=kk+1; jj=0; } if(kk == nz){ iside=iside+1; kk=0; } ii=ii+xd*iside; iindex=nx*(kk*ny+jj)+ii; phi[iindex]=phi_do[index]; } } } } void array_1D_undo (double *phi,double *phi_do) { int i,k,index; int ii,kk,iindex; int iside; int xd=nx/ip; kk=0; iside=0; for(k=0;k<nz;k++){ for(i=0;i<nx;i++){ index=nx*k+i; ii=i%xd; if(ii == 0){ kk=kk+1; } if(kk == nz){ iside=iside+1; kk=0; } ii=ii+xd*iside; iindex=nx*kk+ii; phi[iindex]=phi_do[index]; } } } __device__ int index_3d (int i, int j,int k) { int ans=(nx/ip+4)*((ny+4)*k+j)+i; return ans; } __device__ int index_3d_x (int i, int j,int k) { int ans=(ny+4)*((nz/kp+4)*i+k)+j; return ans; } __device__ int index_4d (int i, int j,int k,int l) { int ans=(nx/ip+4)*((ny+4)*((nz/kp+4)*l+k)+j)+i; return ans; } __global__ void array_do( double *phi_d, double *phi) { int ii=threadIdx.x; int jj= blockIdx.x%ny; int kk= blockIdx.x/ny; int iindex =(nx/ip)*(kk*ny+jj)+ii; int i=threadIdx.x+2; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+2; int index=index_3d(i,j,k); phi[index]=phi_d[iindex]; } __global__ void array_undo( double *phi_d, double *phi) { int ii=threadIdx.x; int jj= blockIdx.x%ny; int kk= blockIdx.x/ny; int iindex =(nx/ip)*(kk*ny+jj)+ii; int i=threadIdx.x+2; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+2; int index=index_3d(i,j,k); phi_d[iindex]=phi[index]; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // boundary // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void boundary_ym( double *phi) { int i= blockIdx.x; int k=threadIdx.x; int distance=(ny)*(nx/ip+4); for(int j=0;j<2;j++){ int index =index_3d(i,j,k); phi[index]=phi[index+distance]; } for(int j=ny+2;j<ny+4;j++){ int index =index_3d(i,j,k); phi[index]=phi[index-distance]; } } __global__ void boundary_zm1( double *phi, double *t_phi ) { int k,index,index_t; int i= blockIdx.x; int j=threadIdx.x; k=2; index =index_3d(i,j,k); index_t =index_3d(i,j,1); t_phi[index_t]=phi[index]; k=nz/kp+1; index =index_3d(i,j,k); index_t =index_3d(i,j,2); t_phi[index_t]=phi[index]; } __global__ void boundary_xm1( double *phi, double *t_phi ) { int i,index,index_t; int j= blockIdx.x; int k=threadIdx.x; i=2; index =index_3d(i,j,k); index_t =index_3d_x(1,j,k); t_phi[index_t]=phi[index]; i=nx/ip+1; index =index_3d(i,j,k); index_t =index_3d_x(2,j,k); t_phi[index_t]=phi[index]; } __global__ void boundary_zm1_undo( double *phi, double *t_phi) { int k,index,index_t; int i= blockIdx.x; int j=threadIdx.x; k=1; index =index_3d(i,j,k); index_t =index_3d(i,j,0); phi[index]=t_phi[index_t]; k=nz/kp+2; index =index_3d(i,j,k); index_t =index_3d(i,j,3); phi[index]=t_phi[index_t]; } __global__ void boundary_xm1_undo( double *phi, double *t_phi) { int i,index,index_t; int j= blockIdx.x; int k=threadIdx.x; i=1; index =index_3d(i,j,k); index_t =index_3d_x(0,j,k); phi[index]=t_phi[index_t]; i=nx/ip+2; index =index_3d(i,j,k); index_t =index_3d_x(3,j,k); phi[index]=t_phi[index_t]; } __global__ void boundary_zm2( double *phi, double *t_phi ) { int k,l,index,index_t; int i= blockIdx.x; int j=threadIdx.x; for(l=0;l<2;l++){ k=2; index =index_3d(i,j,k+l); index_t =index_3d(i,j,2+l); t_phi[index_t]=phi[index]; k=nz/kp; index =index_3d(i,j,k+l); index_t =index_3d(i,j,4+l); t_phi[index_t]=phi[index]; } } __global__ void boundary_xm2( double *phi, double *t_phi ) { int i,l,index,index_t; int j= blockIdx.x; int k=threadIdx.x; for(l=0;l<2;l++){ i=2; index =index_3d(i+l,j,k); index_t =index_3d_x(2+l,j,k); t_phi[index_t]=phi[index]; i=nx/ip; index =index_3d(i+l,j,k); index_t =index_3d_x(4+l,j,k); t_phi[index_t]=phi[index]; } } __global__ void boundary_zm2_undo( double *phi, double *t_phi) { int k,l,index,index_t; int i= blockIdx.x; int j=threadIdx.x; for(l=0;l<2;l++){ k=0; index =index_3d(i,j,k+l); index_t =index_3d(i,j,0+l); phi[index]=t_phi[index_t]; k=nz/kp+2; index =index_3d(i,j,k+l); index_t =index_3d(i,j,6+l); phi[index]=t_phi[index_t]; } } __global__ void boundary_xm2_undo( double *phi, double *t_phi) { int i,l,index,index_t; int j= blockIdx.x; int k=threadIdx.x; for(l=0;l<2;l++){ i=0; index =index_3d(i+l,j,k); index_t =index_3d_x(0+l,j,k); phi[index]=t_phi[index_t]; i=nx/ip+2; index =index_3d(i+l,j,k); index_t =index_3d_x(6+l,j,k); phi[index]=t_phi[index_t]; } } __global__ void boundary_yd_bc( double *g,double *h) { int i= blockIdx.x+2; int j,index_l; int zd=nz/kp; int l=threadIdx.x; int distance=(ny)*(nx/ip+4); for(int k=2;k<zd+2;k=k+zd-1){ j=1; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l+distance]; h[index_l]=h[index_l+distance]; j=ny+2; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l-distance]; h[index_l]=h[index_l-distance]; } } __global__ void boundary_yd_bc_x( double *g,double *h) { int k= blockIdx.x+2; int j,index_l; int xd=nx/ip; int l=threadIdx.x; int distance=(ny)*(nx/ip+4); for(int i=2;i<xd+2;i=i+xd-1){ j=1; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l+distance]; h[index_l]=h[index_l+distance]; j=ny+2; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l-distance]; h[index_l]=h[index_l-distance]; } } __global__ void boundary_zd( double *phi,double *t_phi ) { int i= blockIdx.x+1; int j=threadIdx.x+1; int k,index_l,index_l_t; int xd=nx/ip; int l_top[5]={5,11,13,15,18}; int l_bot[5]={6,12,14,16,17}; for(int l=0;l<5;l++){ k=2; index_l =index_4d(i,j,k,l_bot[l]); index_l_t=((xd+4)*(1*(ny+4)+j)+i)*5+l;//k=1;q=5 t_phi[index_l_t]=phi[index_l]; k=nz/kp+1; index_l =index_4d(i,j,k,l_top[l]); index_l_t=((xd+4)*(2*(ny+4)+j)+i)*5+l;//k=2;q=5 t_phi[index_l_t]=phi[index_l]; } } __global__ void boundary_xd( double *phi,double *t_phi ) { int j= blockIdx.x+1; int k=threadIdx.x+1; int i,index_l,index_l_t; int zd=nz/kp; int l_right[5]={1,7, 9,11,14}; int l_left[5] ={2,8,10,12,13}; for(int l=0;l<5;l++){ i=2; index_l =index_4d(i,j,k,l_left[l]); index_l_t=((ny+4)*(1*(zd+4)+k)+j)*5+l;//k=1;q=5 t_phi[index_l_t]=phi[index_l]; i=nx/ip+1; index_l =index_4d(i,j,k,l_right[l]); index_l_t=((ny+4)*(2*(zd+4)+k)+j)*5+l;//k=2;q=5 t_phi[index_l_t]=phi[index_l]; } } __global__ void boundary_zd_undo( double *phi,double *t_phi) { int i= blockIdx.x+1; int j=threadIdx.x+1; int k,index_l,index_l_t; int xd=nx/ip; int l_top[5]={5,11,13,15,18}; int l_bot[5]={6,12,14,16,17}; for(int l=0;l<5;l++){ k=1; index_l =index_4d(i,j,k,l_top[l]); index_l_t=((xd+4)*(0*(ny+4)+j)+i)*5+l; phi[index_l]=t_phi[index_l_t]; k=nz/kp+2; index_l =index_4d(i,j,k,l_bot[l]); index_l_t=((xd+4)*(3*(ny+4)+j)+i)*5+l; phi[index_l]=t_phi[index_l_t]; } } __global__ void boundary_xd_undo( double *phi,double *t_phi) { int j= blockIdx.x+1; int k=threadIdx.x+1; int i,index_l,index_l_t; int zd=nz/kp; int l_right[5]={1,7, 9,11,14}; int l_left[5] ={2,8,10,12,13}; for(int l=0;l<5;l++){ i=1; index_l =index_4d(i,j,k,l_right[l]); index_l_t=((ny+4)*(0*(zd+4)+k)+j)*5+l; phi[index_l]=t_phi[index_l_t]; i=nx/ip+2; index_l =index_4d(i,j,k,l_left[l]); index_l_t=((ny+4)*(3*(zd+4)+k)+j)*5+l; phi[index_l]=t_phi[index_l_t]; } } __global__ void boundary_yd_in( double *g,double *h) { int i= blockIdx.x+3; int k=threadIdx.x+3; int j,index_l; int distance=(ny)*(nx/ip+4); for(int l=0;l<q;l++){ j=1; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l+distance]; h[index_l]=h[index_l+distance]; j=ny+2; index_l=index_4d(i,j,k,l); g[index_l]=g[index_l-distance]; h[index_l]=h[index_l-distance]; } } __global__ void boundary_ym_bc( double *phi) { int i =threadIdx.x+2; int zd=nz/kp; int kk[4]= {2,3,zd,zd+1}; int distance=(ny)*(nx/ip+4); for (int t=0;t<4;t++){ int k=kk[t]; for (int j=0;j<2;j++){ int index=index_3d(i,j,k); phi[index]=phi[index+distance]; } for (int j=ny+2;j<ny+4;j++){ int index=index_3d(i,j,k); phi[index]=phi[index-distance]; }} } __global__ void boundary_ym_bc_x( double *phi) { int k =threadIdx.x+2; int xd=nx/ip; int ii[4]= {2,3,xd,xd+1}; int distance=(ny)*(xd+4); for (int t=0;t<4;t++){ int i=ii[t]; for (int j=0;j<2;j++){ int index=index_3d(i,j,k); phi[index]=phi[index+distance]; } for (int j=ny+2;j<ny+4;j++){ int index=index_3d(i,j,k); phi[index]=phi[index-distance]; }} } __global__ void boundary_ym_in( double *phi) { int i= blockIdx.x+4; int k=threadIdx.x+4; int distance=(ny)*(nx/ip+4); for(int j=0;j<2;j++){ int index=index_3d(i,j,k); phi[index]=phi[index+distance]; } for(int j=ny+2;j<ny+4;j++){ int index=index_3d(i,j,k); phi[index]=phi[index-distance]; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // gradient // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void gradient_cen ( double *gra_phi, double *phi) { const int i=threadIdx.x+2; const int j= blockIdx.x%ny+2; const int k= blockIdx.x/ny+2; const int index=index_3d(i,j,k); const double cs2_inv=3.0; double temp =0.0; double temp_x=0.0; double temp_y=0.0; double temp_z=0.0; for(int l=1;l<q;l=l+2){ double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; temp=2.0*wt*(phi[index+et]-phi[index-et]); temp_x=ex*temp+temp_x; temp_y=ey*temp+temp_y; temp_z=ez*temp+temp_z; } gra_phi[index_4d(i,j,k,0)]=temp_x*0.5*cs2_inv; gra_phi[index_4d(i,j,k,1)]=temp_y*0.5*cs2_inv; gra_phi[index_4d(i,j,k,2)]=temp_z*0.5*cs2_inv; } __device__ double grad_phie_c(double *phi,int index,int et) { double ans; ans=(phi[index+et]-phi[index-et])*0.5; return ans; } __device__ double grad_phie_m(double *phi,int index,int et) { double ans; ans=(-phi[index+2*et]+5.0*phi[index+et]-3.0*phi[index]-phi[index-et])*0.25; return ans; } __device__ double gradient_cen_x ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ex=eex[l]; double wt=wwt[l]; int et=eet[l]; ans=ex*2.0*wt*(phi[index+et]-phi[index-et])+ans; } ans=ans*0.5*cs2_inv; return ans; } __device__ double gradient_cen_y ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ey=eey[l]; double wt=wwt[l]; int et=eet[l]; ans=ey*2.0*wt*(phi[index+et]-phi[index-et])+ans; } ans=ans*0.5*cs2_inv; return ans; } __device__ double gradient_cen_z ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; ans=ez*2.0*wt*(phi[index+et]-phi[index-et])+ans; } ans=ans*0.5*cs2_inv; return ans; } __device__ double gradient_mix_x ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ex=eex[l]; double wt=wwt[l]; int et=eet[l]; ans=ex*wt*(-phi[index+2*et]+6.0*phi[index+et]-6.0*phi[index-et]+phi[index-2*et])+ans; } ans=ans*0.25*cs2_inv; return ans; } __device__ double gradient_mix_y ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ey=eey[l]; double wt=wwt[l]; int et=eet[l]; ans=ey*wt*(-phi[index+2*et]+6.0*phi[index+et]-6.0*phi[index-et]+phi[index-2*et])+ans; } ans=ans*0.25*cs2_inv; return ans; } __device__ double gradient_mix_z ( double *phi, int index ) { double ans=0.0; double cs2_inv=3.0; #pragma unroll 9 for(int l=1;l<q;l=l+2){ double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; ans=ez*wt*(-phi[index+2*et]+6.0*phi[index+et]-6.0*phi[index-et]+phi[index-2*et])+ans; } ans=ans*0.25*cs2_inv; return ans; } __device__ double laplace_phi (double *phi,int index) { double ans=0.0; double phi_index=phi[index]; double cs2_inv =3.0; double dt_inv=1./dt; for(int l=1;l<q;l=l+2) { double wt=wwt[l]; int et=eet[l]; ans=2.0*wt*(phi[index+et]-2.0*phi_index+phi[index-et])+ans; } ans=ans*cs2_inv*dt_inv; return ans; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // chemical mu // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void chemical(double *c,double *m,double kappa,double beta ) { int i= threadIdx.x+2; int j=blockIdx.x%ny+2; int k=blockIdx.x/ny+2; int index=index_3d(i,j,k); double cl=c[index]; m[index]=beta*(4.0*cl*cl*cl-6.0*cl*cl+2.0*cl)-kappa*laplace_phi( c,index ); } __global__ void chemical_bc( double *c,double *m,double kappa,double beta ) { int i=threadIdx.x+2; int j=blockIdx.x +2; int zd=nz/kp; int kk[4]= {2,3,zd,zd+1}; for (int t=0;t<4;t++){ int k=kk[t]; int index=index_3d(i,j,k); double cl=c[index]; m[index]=beta*(4.0*cl*cl*cl-6.0*cl*cl+2.0*cl)-kappa*laplace_phi( c,index ); } } __global__ void chemical_bc_x( double *c,double *m,double kappa,double beta ) { int k=threadIdx.x+4; int j=blockIdx.x +2; int xd=nx/ip; int ii[4]= {2,3,xd,xd+1}; for (int t=0;t<4;t++){ int i=ii[t]; int index=index_3d(i,j,k); double cl=c[index]; m[index]=beta*(4.0*cl*cl*cl-6.0*cl*cl+2.0*cl)-kappa*laplace_phi( c,index ); } } __global__ void chemical_in( double *c,double *m,double kappa,double beta ) { int i=threadIdx.x+4; int j=blockIdx.x%ny+2; int k=blockIdx.x/ny+4; int index=index_3d(i,j,k); double cl=c[index]; m[index]=beta*(4.0*cl*cl*cl-6.0*cl*cl+2.0*cl)-kappa*laplace_phi( c,index ); } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // eq collision // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void eq_collision(double *g,double *h,double *c,double *m,double *p,double gravity,double *gra_c, double *gra_m,double *u,double *v,double *w,double mobi) { int i=threadIdx.x+2; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+2; int index=index_3d(i,j,k); double cs2_inv =3.0; const double cs2=1.0/cs2_inv; double uu=u[index]; double vv=v[index]; double ww=w[index]; double cc=c[index]; double rr=cc*rho_l+(1.0-cc)*rho_g; // double tt=cc*tau_l+(1.0-cc)*tau_g; const double rr_inv=1.0/rr; double pp=p[index]; double dr = rho_l-rho_g; double gr_cx_c=gra_c[index_4d(i,j,k,0)]; double gr_cy_c=gra_c[index_4d(i,j,k,1)]; double gr_cz_c=gra_c[index_4d(i,j,k,2)]; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double gr_px_c=gradient_cen_x ( p,index ); double gr_py_c=gradient_cen_y ( p,index ); double gr_pz_c=gradient_cen_z ( p,index ); double gr_cx_m=gradient_mix_x ( c,index ); double gr_cy_m=gradient_mix_y ( c,index ); double gr_cz_m=gradient_mix_z ( c,index ); double gr_mx_m=gradient_mix_x ( m,index ); double gr_my_m=gradient_mix_y ( m,index ); double gr_mz_m=gradient_mix_z ( m,index ); double gr_px_m=gradient_mix_x ( p,index ); double gr_py_m=gradient_mix_y ( p,index ); double gr_pz_m=gradient_mix_z ( p,index ); double lap_mu =laplace_phi( m,index ); double udotu=uu*uu+vv*vv+ww*ww; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; double edotu=ex*uu+ey*vv+ez*ww; double uugly=edotu*cs2_inv+edotu*edotu*0.5*cs2_inv*cs2_inv-udotu*0.5*cs2_inv; double gamma=wt*(1.0+uugly); double u_et=u[index+et]; double v_et=v[index+et]; double w_et=w[index+et]; double lap_mu_et=laplace_phi( m,index+et ); double udotu_et=u_et*u_et+v_et*v_et+w_et*w_et; double edotu_et=ex*u_et+ey*v_et+ez*w_et; double uugly_et=edotu_et*cs2_inv+edotu_et*edotu_et*0.5*cs2_inv*cs2_inv-udotu_et*0.5*cs2_inv; double gamma_et=wt*(1.0+uugly_et); /////////////////////////////////////////////////////// double geq_t=wt*(pp+rr*cs2*uugly);//geq double heq_t=cc*gamma;//heq /////////////////////////////////////////////////////// double temp_cc = grad_phie_c( c,index,et ) - ( uu * gr_cx_c + vv * gr_cy_c + ww * gr_cz_c ); double temp_mc = grad_phie_c( m,index,et ) - ( uu * gr_mx_c + vv * gr_my_c + ww * gr_mz_c ); double temp_pc = grad_phie_c( p,index,et ) - ( uu * gr_px_c + vv * gr_py_c + ww * gr_pz_c ); double temp_cm = grad_phie_m( c,index,et ) - ( uu * gr_cx_m + vv * gr_cy_m + ww * gr_cz_m ); double temp_mm = grad_phie_m( m,index,et ) - ( uu * gr_mx_m + vv * gr_my_m + ww * gr_mz_m ); double temp_pm = grad_phie_m( p,index,et ) - ( uu * gr_px_m + vv * gr_py_m + ww * gr_pz_m ); double temp_z = ez*gravity-ww*gravity; /////////////////////////////////////////////////////// double temp_gc = cs2*wt*uugly*temp_cc*dr-(cc*temp_mc+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hc = temp_cc-cc*rr_inv*cs2_inv*(temp_pc+cc*temp_mc+(rr-rho_l)*temp_z); geq_t=geq_t-0.5*temp_gc;//geq_bar heq_t=heq_t-0.5*gamma*temp_hc;//heq_bar /////////////////////////////////////////////////////// double temp_gm = cs2*wt*uugly*temp_cm*dr-(cc*temp_mm+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hm = temp_cm-cc*rr_inv*cs2_inv*(temp_pm+cc*temp_mm+(rr-rho_l)*temp_z); temp_hm = 0.5*dt*mobi*( gamma*lap_mu + gamma_et*lap_mu_et )+temp_hm*gamma; ////////////////////////collision////////////////////////////// g[index_l] = geq_t+temp_gm; h[index_l] = heq_t+temp_hm; } } __global__ void eq_collision_bc(double *g,double *h,double *c,double *m,double *p,double gravity,double *gra_c, double *gra_m,double *u,double *v,double *w,double mobi) { int i=threadIdx.x+2; int j= blockIdx.x+2; int zd=nz/kp; double cs2_inv =3.0; double cs2=1.0/cs2_inv; double dr = rho_l-rho_g; for(int k=2;k<zd+2;k=k+zd-1) { int index=index_3d(i,j,k); double uu=u[index]; double vv=v[index]; double ww=w[index]; double cc=c[index]; double ceq=cc; if (cc < 0)ceq=0; else if(cc > 1)ceq=1; else ceq=cc; double rr=cc*rho_l+(1.0-cc)*rho_g; double tt=cc*tau_l+(1.0-cc)*tau_g; double rr_inv=1.0/rr; double pp=p[index]; double gr_cx_c=gra_c[index_4d(i,j,k,0)]; double gr_cy_c=gra_c[index_4d(i,j,k,1)]; double gr_cz_c=gra_c[index_4d(i,j,k,2)]; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double gr_px_c=gradient_cen_x ( p,index ); double gr_py_c=gradient_cen_y ( p,index ); double gr_pz_c=gradient_cen_z ( p,index ); double gr_cx_m=gradient_mix_x ( c,index ); double gr_cy_m=gradient_mix_y ( c,index ); double gr_cz_m=gradient_mix_z ( c,index ); double gr_mx_m=gradient_mix_x ( m,index ); double gr_my_m=gradient_mix_y ( m,index ); double gr_mz_m=gradient_mix_z ( m,index ); double gr_px_m=gradient_mix_x ( p,index ); double gr_py_m=gradient_mix_y ( p,index ); double gr_pz_m=gradient_mix_z ( p,index ); double lap_mu =laplace_phi( m,index ); double udotu=uu*uu+vv*vv+ww*ww; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; double edotu=ex*uu+ey*vv+ez*ww; double uugly=edotu*cs2_inv+edotu*edotu*0.5*cs2_inv*cs2_inv-udotu*0.5*cs2_inv; double gamma=wt*(1.0+uugly); double u_et=u[index+et]; double v_et=v[index+et]; double w_et=w[index+et]; double lap_mu_et=laplace_phi( m,index+et ); double udotu_et=u_et*u_et+v_et*v_et+w_et*w_et; double edotu_et=ex*u_et+ey*v_et+ez*w_et; double uugly_et=edotu_et*cs2_inv+edotu_et*edotu_et*0.5*cs2_inv*cs2_inv-udotu_et*0.5*cs2_inv; double gamma_et=wt*(1.0+uugly_et); /////////////////////////////////////////////////////// double geq_t=wt*(pp+rr*cs2*uugly);//geq double heq_t=ceq*gamma;//heq /////////////////////////////////////////////////////// double temp_cc = grad_phie_c( c,index,et ) - ( uu * gr_cx_c + vv * gr_cy_c + ww * gr_cz_c ); double temp_mc = grad_phie_c( m,index,et ) - ( uu * gr_mx_c + vv * gr_my_c + ww * gr_mz_c ); double temp_pc = grad_phie_c( p,index,et ) - ( uu * gr_px_c + vv * gr_py_c + ww * gr_pz_c ); double temp_cm = grad_phie_m( c,index,et ) - ( uu * gr_cx_m + vv * gr_cy_m + ww * gr_cz_m ); double temp_mm = grad_phie_m( m,index,et ) - ( uu * gr_mx_m + vv * gr_my_m + ww * gr_mz_m ); double temp_pm = grad_phie_m( p,index,et ) - ( uu * gr_px_m + vv * gr_py_m + ww * gr_pz_m ); double temp_z = ez*gravity-ww*gravity; /////////////////////////////////////////////////////// double temp_gc = cs2*wt*uugly*temp_cc*dr-(cc*temp_mc+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hc = temp_cc-cc*rr_inv*cs2_inv*(temp_pc+cc*temp_mc+rr*temp_z); geq_t=geq_t-0.5*temp_gc;//geq_bar heq_t=heq_t-0.5*temp_hc*gamma;//heq_bar /////////////////////////////////////////////////////// double temp_gm = cs2*wt*uugly*temp_cm*dr-(cc*temp_mm+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hm = temp_cm-cc*rr_inv*cs2_inv*(temp_pm+cc*temp_mm+rr*temp_z); temp_hm = 0.5*dt*mobi*( gamma*lap_mu + gamma_et*lap_mu_et )+temp_hm*gamma; ////////////////////////collision////////////////////////////// g[index_l] = g[index_l]*(1.0-1.0/(tt +0.5))+geq_t/(tt +0.5)+temp_gm; h[index_l] = h[index_l]*(1.0-1.0/(tau_h +0.5))+heq_t/(tau_h +0.5)+temp_hm; } } } __global__ void eq_collision_bc_x(double *g,double *h,double *c,double *m,double *p,double gravity,double *gra_c, double *gra_m,double *u,double *v,double *w,double mobi) { int k=threadIdx.x+3; int j= blockIdx.x+2; int xd=nx/ip; double cs2_inv =3.0; double cs2 =1.0/cs2_inv; double dr = rho_l-rho_g; for(int i=2;i<xd+2;i=i+xd-1) { int index=index_3d(i,j,k); double uu=u[index]; double vv=v[index]; double ww=w[index]; double cc=c[index]; double ceq=cc; if (cc < 0)ceq=0; else if(cc > 1)ceq=1; else ceq=cc; double rr=cc*rho_l+(1.0-cc)*rho_g; double tt=cc*tau_l+(1.0-cc)*tau_g; double rr_inv=1.0/rr; double pp=p[index]; double gr_cx_c=gra_c[index_4d(i,j,k,0)]; double gr_cy_c=gra_c[index_4d(i,j,k,1)]; double gr_cz_c=gra_c[index_4d(i,j,k,2)]; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double gr_px_c=gradient_cen_x ( p,index ); double gr_py_c=gradient_cen_y ( p,index ); double gr_pz_c=gradient_cen_z ( p,index ); double gr_cx_m=gradient_mix_x ( c,index ); double gr_cy_m=gradient_mix_y ( c,index ); double gr_cz_m=gradient_mix_z ( c,index ); double gr_mx_m=gradient_mix_x ( m,index ); double gr_my_m=gradient_mix_y ( m,index ); double gr_mz_m=gradient_mix_z ( m,index ); double gr_px_m=gradient_mix_x ( p,index ); double gr_py_m=gradient_mix_y ( p,index ); double gr_pz_m=gradient_mix_z ( p,index ); double lap_mu =laplace_phi( m,index ); double udotu=uu*uu+vv*vv+ww*ww; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; double edotu=ex*uu+ey*vv+ez*ww; double uugly=edotu*cs2_inv+edotu*edotu*0.5*cs2_inv*cs2_inv-udotu*0.5*cs2_inv; double gamma=wt*(1.0+uugly); double u_et=u[index+et]; double v_et=v[index+et]; double w_et=w[index+et]; double lap_mu_et=laplace_phi( m,index+et ); double udotu_et=u_et*u_et+v_et*v_et+w_et*w_et; double edotu_et=ex*u_et+ey*v_et+ez*w_et; double uugly_et=edotu_et*cs2_inv+edotu_et*edotu_et*0.5*cs2_inv*cs2_inv-udotu_et*0.5*cs2_inv; double gamma_et=wt*(1.0+uugly_et); /////////////////////////////////////////////////////// double geq_t=wt*(pp+rr*cs2*uugly);//geq double heq_t=ceq*gamma;//heq /////////////////////////////////////////////////////// double temp_cc = grad_phie_c( c,index,et ) - ( uu * gr_cx_c + vv * gr_cy_c + ww * gr_cz_c ); double temp_mc = grad_phie_c( m,index,et ) - ( uu * gr_mx_c + vv * gr_my_c + ww * gr_mz_c ); double temp_pc = grad_phie_c( p,index,et ) - ( uu * gr_px_c + vv * gr_py_c + ww * gr_pz_c ); double temp_cm = grad_phie_m( c,index,et ) - ( uu * gr_cx_m + vv * gr_cy_m + ww * gr_cz_m ); double temp_mm = grad_phie_m( m,index,et ) - ( uu * gr_mx_m + vv * gr_my_m + ww * gr_mz_m ); double temp_pm = grad_phie_m( p,index,et ) - ( uu * gr_px_m + vv * gr_py_m + ww * gr_pz_m ); double temp_z = ez*gravity-ww*gravity; /////////////////////////////////////////////////////// double temp_gc = cs2*wt*uugly*temp_cc*dr-(cc*temp_mc+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hc = temp_cc-cc*rr_inv*cs2_inv*(temp_pc+cc*temp_mc+rr*temp_z); geq_t=geq_t-0.5*temp_gc;//geq_bar heq_t=heq_t-0.5*temp_hc*gamma;//heq_bar /////////////////////////////////////////////////////// double temp_gm = cs2*wt*uugly*temp_cm*dr-(cc*temp_mm+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hm = temp_cm-cc*rr_inv*cs2_inv*(temp_pm+cc*temp_mm+rr*temp_z); temp_hm = 0.5*dt*mobi*( gamma*lap_mu + gamma_et*lap_mu_et )+temp_hm*gamma; ////////////////////////collision////////////////////////////// g[index_l] = g[index_l]*(1.0-1.0/(tt +0.5))+geq_t/(tt +0.5)+temp_gm; h[index_l] = h[index_l]*(1.0-1.0/(tau_h +0.5))+heq_t/(tau_h +0.5)+temp_hm; } } } __global__ void eq_collision_in(double *g,double *h,double *c,double *m,double *p,double gravity,double *gra_c, double *gra_m,double *u,double *v,double *w,double mobi) { int i=threadIdx.x+3; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+3; int index=index_3d(i,j,k); double cs2_inv =3.0; double cs2=1.0/cs2_inv; double uu=u[index]; double vv=v[index]; double ww=w[index]; double cc=c[index]; double ceq=cc; if (cc < 0)ceq=0; else if(cc > 1)ceq=1; else ceq=cc; double rr=cc*rho_l+(1.0-cc)*rho_g; double tt=cc*tau_l+(1.0-cc)*tau_g; double rr_inv=1.0/rr; double pp=p[index]; double dr = rho_l-rho_g; double gr_cx_c=gra_c[index_4d(i,j,k,0)]; double gr_cy_c=gra_c[index_4d(i,j,k,1)]; double gr_cz_c=gra_c[index_4d(i,j,k,2)]; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double gr_px_c=gradient_cen_x ( p,index ); double gr_py_c=gradient_cen_y ( p,index ); double gr_pz_c=gradient_cen_z ( p,index ); double gr_cx_m=gradient_mix_x ( c,index ); double gr_cy_m=gradient_mix_y ( c,index ); double gr_cz_m=gradient_mix_z ( c,index ); double gr_mx_m=gradient_mix_x ( m,index ); double gr_my_m=gradient_mix_y ( m,index ); double gr_mz_m=gradient_mix_z ( m,index ); double gr_px_m=gradient_mix_x ( p,index ); double gr_py_m=gradient_mix_y ( p,index ); double gr_pz_m=gradient_mix_z ( p,index ); double lap_mu =laplace_phi( m,index ); double udotu=uu*uu+vv*vv+ww*ww; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; double wt=wwt[l]; int et=eet[l]; double edotu=ex*uu+ey*vv+ez*ww; double uugly=edotu*cs2_inv+edotu*edotu*0.5*cs2_inv*cs2_inv-udotu*0.5*cs2_inv; double gamma=wt*(1.0+uugly); double u_et=u[index+et]; double v_et=v[index+et]; double w_et=w[index+et]; double lap_mu_et=laplace_phi( m,index+et ); double udotu_et=u_et*u_et+v_et*v_et+w_et*w_et; double edotu_et=ex*u_et+ey*v_et+ez*w_et; double uugly_et=edotu_et*cs2_inv+edotu_et*edotu_et*0.5*cs2_inv*cs2_inv-udotu_et*0.5*cs2_inv; double gamma_et=wt*(1.0+uugly_et); /////////////////////////////////////////////////////// double geq_t=wt*(pp+rr*cs2*uugly);//geq double heq_t=ceq*gamma;//heq /////////////////////////////////////////////////////// double temp_cc = grad_phie_c( c,index,et ) - ( uu * gr_cx_c + vv * gr_cy_c + ww * gr_cz_c ); double temp_mc = grad_phie_c( m,index,et ) - ( uu * gr_mx_c + vv * gr_my_c + ww * gr_mz_c ); double temp_pc = grad_phie_c( p,index,et ) - ( uu * gr_px_c + vv * gr_py_c + ww * gr_pz_c ); double temp_cm = grad_phie_m( c,index,et ) - ( uu * gr_cx_m + vv * gr_cy_m + ww * gr_cz_m ); double temp_mm = grad_phie_m( m,index,et ) - ( uu * gr_mx_m + vv * gr_my_m + ww * gr_mz_m ); double temp_pm = grad_phie_m( p,index,et ) - ( uu * gr_px_m + vv * gr_py_m + ww * gr_pz_m ); double temp_z = ez*gravity-ww*gravity; /////////////////////////////////////////////////////// double temp_gc = cs2*wt*uugly*temp_cc*dr-(cc*temp_mc+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hc = temp_cc-cc*rr_inv*cs2_inv*(temp_pc+cc*temp_mc+rr*temp_z); geq_t=geq_t-0.5*temp_gc;//geq_bar heq_t=heq_t-0.5*temp_hc*gamma;//heq_bar /////////////////////////////////////////////////////// double temp_gm = cs2*wt*uugly*temp_cm*dr-(cc*temp_mm+rr*temp_z)*gamma+ez*rho_l*gravity*wt; double temp_hm = temp_cm-cc*rr_inv*cs2_inv*(temp_pm+cc*temp_mm+rr*temp_z); temp_hm = 0.5*dt*mobi*( gamma*lap_mu + gamma_et*lap_mu_et )+temp_hm*gamma; ////////////////////////collision////////////////////////////// g[index_l] = g[index_l]*(1.0-1.0/(tt +0.5))+geq_t/(tt +0.5)+temp_gm; h[index_l] = h[index_l]*(1.0-1.0/(tau_h +0.5))+heq_t/(tau_h +0.5)+temp_hm; } } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // macro // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void macro_h_bc(double *h,double *h_next,double *c) { int i=threadIdx.x+2; int j= blockIdx.x+2; int zd=nz/kp; int kk[4]= {2,3,zd,zd+1}; for (int t=0;t<4;t++){ int k=kk[t]; int index=index_3d(i,j,k); double sum_c=0.0; for(int l=0;l<q;l++){ int index_l =index_4d(i,j,k,l); int et=eet[l]; sum_c=h[index_l-et]+sum_c; h_next[index_l]=h[index_l-et]; } c[index]=sum_c; } } __global__ void macro_h_bc_x(double *h,double *h_next,double *c) { int k=threadIdx.x+4; int j= blockIdx.x+2; int xd=nx/ip; int ii[4]= {2,3,xd,xd+1}; for (int t=0;t<4;t++){ int i=ii[t]; int index=index_3d(i,j,k); double sum_c=0.0; for(int l=0;l<q;l++){ int index_l =index_4d(i,j,k,l); int et=eet[l]; sum_c=h[index_l-et]+sum_c; h_next[index_l]=h[index_l-et]; } c[index]=sum_c; } } __global__ void macro_h_in(double *h,double *h_next,double *c) { int i=threadIdx.x+4; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+4; int index=index_3d(i,j,k); double sum_c=0.0; for(int l=0;l<q;l++){ int index_l =index_4d(i,j,k,l); int et=eet[l]; sum_c=h[index_l-et]+sum_c; h_next[index_l]=h[index_l-et]; } c[index]=sum_c; } __global__ void macro_g_bc(double *g,double *g_next,double *c,double *m,double *p,double *gra_c,double *gra_m,double *u,double *v,double *w) { int i=threadIdx.x+2; int j= blockIdx.x+2; int zd=nz/kp; int kk[4]= {2,3,zd,zd+1}; double dr=rho_l-rho_g; double cs2_inv=3.0; double cs2=1.0/cs2_inv; for (int t=0;t<4;t++){ int k=kk[t]; int index=index_3d(i,j,k); double cc=c[index]; double rr=cc*rho_l+((double)1.0-cc)*rho_g; double gr_rx_c=gra_c[index_4d(i,j,k,0)]*dr; double gr_ry_c=gra_c[index_4d(i,j,k,1)]*dr; double gr_rz_c=gra_c[index_4d(i,j,k,2)]*dr; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double sum_u=0.0; double sum_v=0.0; double sum_w=0.0; double sum_p=0.0; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; int et=eet[l]; double temp_g=g[index_l-et]; sum_u=ex*temp_g+sum_u; sum_v=ey*temp_g+sum_v; sum_w=ez*temp_g+sum_w; sum_p= temp_g+sum_p; g_next[index_l]=temp_g; } double uu=(sum_u*cs2_inv-0.5*dt*cc*gr_mx_c)/rr; double vv=(sum_v*cs2_inv-0.5*dt*cc*gr_my_c)/rr; double ww=(sum_w*cs2_inv-0.5*dt*cc*gr_mz_c)/rr; u[index]=uu; v[index]=vv; w[index]=ww; p[index]=sum_p+0.5*dt*(uu*gr_rx_c+vv*gr_ry_c+ww*gr_rz_c)*cs2; } } __global__ void macro_g_bc_x(double *g,double *g_next,double *c,double *m,double *p,double *gra_c,double *gra_m,double *u,double *v,double *w) { int k=threadIdx.x+4; int j= blockIdx.x+2; int xd=nx/ip; int ii[4]= {2,3,xd,xd+1}; double cs2_inv=3.0; double cs2=1.0/cs2_inv; double dr=rho_l-rho_g; for (int t=0;t<4;t++){ int i=ii[t]; int index=index_3d(i,j,k); double cc=c[index]; double rr=cc*rho_l+((double)1.0-cc)*rho_g; double gr_rx_c=gra_c[index_4d(i,j,k,0)]*dr; double gr_ry_c=gra_c[index_4d(i,j,k,1)]*dr; double gr_rz_c=gra_c[index_4d(i,j,k,2)]*dr; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double sum_u=0.0; double sum_v=0.0; double sum_w=0.0; double sum_p=0.0; for(int l=0;l<q;l++) { int index_l=index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; int et=eet[l]; double temp_g=g[index_l-et]; sum_u=ex*temp_g+sum_u; sum_v=ey*temp_g+sum_v; sum_w=ez*temp_g+sum_w; sum_p= temp_g+sum_p; g_next[index_l]=temp_g; } double uu=(sum_u*cs2_inv-0.5*dt*cc*gr_mx_c)/rr; double vv=(sum_v*cs2_inv-0.5*dt*cc*gr_my_c)/rr; double ww=(sum_w*cs2_inv-0.5*dt*cc*gr_mz_c)/rr; u[index]=uu; v[index]=vv; w[index]=ww; p[index]=sum_p+0.5*dt*(uu*gr_rx_c+vv*gr_ry_c+ww*gr_rz_c)*cs2; } } __global__ void macro_g_in( double *g, double *g_next,double *c,double *m,double *p,double *gra_c,double *gra_m,double *u,double *v,double *w) { int i=threadIdx.x+4; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+4; int index=index_3d(i,j,k); double cs2_inv=3.0; double cs2=1.0/cs2_inv; double cc=c[index]; double rr=cc*rho_l+((double)1.0-cc)*rho_g; double dr=rho_l-rho_g; double gr_rx_c=gra_c[index_4d(i,j,k,0)]*dr; double gr_ry_c=gra_c[index_4d(i,j,k,1)]*dr; double gr_rz_c=gra_c[index_4d(i,j,k,2)]*dr; double gr_mx_c=gra_m[index_4d(i,j,k,0)]; double gr_my_c=gra_m[index_4d(i,j,k,1)]; double gr_mz_c=gra_m[index_4d(i,j,k,2)]; double sum_u=0.0; double sum_v=0.0; double sum_w=0.0; double sum_p=0.0; for(int l=0;l<q;l++){ int index_l =index_4d(i,j,k,l); double ex=eex[l]; double ey=eey[l]; double ez=eez[l]; int et=eet[l]; double temp_g=g[index_l-et]; sum_u=ex*temp_g+sum_u; sum_v=ey*temp_g+sum_v; sum_w=ez*temp_g+sum_w; sum_p= temp_g+sum_p; g_next[index_l]=temp_g; } double uu=(sum_u*cs2_inv-0.5*dt*cc*gr_mx_c)/rr; double vv=(sum_v*cs2_inv-0.5*dt*cc*gr_my_c)/rr; double ww=(sum_w*cs2_inv-0.5*dt*cc*gr_mz_c)/rr; u[index]=uu; v[index]=vv; w[index]=ww; p[index]=sum_p+0.5*dt*(uu*gr_rx_c+vv*gr_ry_c+ww*gr_rz_c)*cs2; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // post // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// __global__ void p_real(double *c,double *p,double *a,double beta,double kappa,double *gra_c) { int i=threadIdx.x+2; int j= blockIdx.x%ny+2; int k= blockIdx.x/ny+2; int index=index_3d(i,j,k); double gr_cx_c=gra_c[index_4d(i,j,k,0)]; double gr_cy_c=gra_c[index_4d(i,j,k,1)]; double gr_cz_c=gra_c[index_4d(i,j,k,2)]; double la_c =laplace_phi(c,index ); double cc=c[index]; double pp=p[index]; double th,cu,e0; e0=beta*cc*cc*(cc-1)*(cc-1); th=cc*beta*(4*cc*cc*cc-6*cc*cc+2*cc)-e0; cu=-kappa*cc*la_c+0.5*kappa*(gr_cx_c*gr_cx_c+gr_cy_c*gr_cy_c+gr_cz_c*gr_cz_c); a[index]=pp+th+cu; } double maxvalue(double *phi, int* indexx) { double max=0.0; int i,j,k; for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<nz;k++){ int index=nx*(k*ny+j)+i; if(max < phi[index]){ max=phi[index]; *indexx=index; }}}} return max; } void max_w(double *c,double *w,double *max) { *max=0.0; int i,j,k; for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<nz;k++){ int index=nx*(k*ny+j)+i; if(*max < w[index]){ *max = w[index]; }}}} } double minvalue(double *phi, int* indexx) { double min=100.0; int i,j,k; for(i=0;i<nx;i++){ for(j=0;j<ny;j++){ for(k=0;k<nz;k++){ int index=nx*(k*ny+j)+i; if(min > phi[index]){ min=phi[index]; *indexx=index; }}}} return min; } void Reynolds_Time(double w, double *Re, int step) { Re[step/2-1]=2*radd*3/tau_l*w; } //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// // main // //////////////////////////////////////////////////////////////////////////////////////////////////////////////////// int main(int argc, char *argv[]) { int i,j,k,index; //define matrix(會切割的) double *c_d_h,*c_f_h,*c_fdo_h,*c_d,*c; // dicom & final & transfered on host/ orifinal & transfered on device double *m_d_h,*m_f_h,*m_fdo_h,*m_d,*m; double *b_d_h,*b_f_h,*b_fdo_h,*b_d,*b; // wettability double *p_d_h,*p_f_h,*p_fdo_h,*p_d,*p; double *u_d_h,*u_f_h,*u_fdo_h,*u_d,*u; double *v_d_h,*v_f_h,*v_fdo_h,*v_d,*v; double *w_d_h,*w_f_h,*w_fdo_h,*w_d,*w; double *a_d_h,*a_f_h,*a_fdo_h,*a_d,*a; //total pressure double *xz_d_h,*xz_f_h,*xz_fdo_h,*xz_d; //define matrix(不會切割的) int *et_h;//方向 double *ex_h,*ey_h,*ez_h,*wt_h; double *h,*h_t; double *g,*g_t; //gradient matrix double *gra_c; double *gra_m; //define matrix(邊界交換的小矩陣) double *t_c_h,*t_c; double *t_m_h,*t_m; double *t_b_h,*t_b; double *t_p_h,*t_p; double *t_u_h,*t_u; double *t_v_h,*t_v; double *t_w_h,*t_w; double *t_g_h,*t_g; double *t_h_h,*t_h; double *t_c_x_h,*t_c_x; double *t_m_x_h,*t_m_x; double *t_b_x_h,*t_b_x; double *t_p_x_h,*t_p_x; double *t_u_x_h,*t_u_x; double *t_v_x_h,*t_v_x; double *t_w_x_h,*t_w_x; double *t_g_x_h,*t_g_x; double *t_h_x_h,*t_h_x; double *lx,*lz; double *Re; ////mpi int nproc,myid; int l_nbr, b_nbr, r_nbr, t_nbr, my_coord[ndim], iroot, itag; int ipart[ndim],periods[ndim],sideways,updown,right,up,reorder; int n_f; MPI_Status istat[8]; MPI_Comm comm; MPI_Init( &argc, &argv ); MPI_Comm_size(MPI_COMM_WORLD, &nproc); comm = MPI_COMM_WORLD; ipart[0]=ip; ipart[1]=kp; periods[0]=1; periods[1]=1; reorder=1; MPI_Cart_create(MPI_COMM_WORLD,ndim,ipart,periods,reorder,&comm); MPI_Comm_rank(comm,&myid); MPI_Cart_coords(comm,myid,ndim,my_coord); sideways=0; updown=1; right=1; up=1; MPI_Cart_shift(comm,sideways,right,&l_nbr,&r_nbr); MPI_Cart_shift(comm,updown ,up ,&b_nbr,&t_nbr); n_f=nx/ip*ny*nz/kp; if(myid==0){ printf("===============================================================\n"); printf("Checking devices...\n"); } MPI_Barrier(MPI_COMM_WORLD); printf("NPROC,MYID,i,k=%d\t%d\t%d\t%d\t\n",nproc,myid,my_coord[0],my_coord[1]); MPI_Barrier(MPI_COMM_WORLD); cudaSetDevice(myid%DPN); ////memory allocate on cpu int size_final = nx*ny*nz; int size_dicom = (nx/ip+4)*(ny+4)*(nz/kp+4); int size_difun = (nx/ip+4)*(ny+4)*(nz/kp+4)*q; int size_allgr = (nx/ip+4)*(ny+4)*(nz/kp+4)*3;//(x+y+z) int tran_mac_1 = (nx/ip+4)*(ny+4)*4*1; //u,v,w int tran_mac_2 = (nx/ip+4)*(ny+4)*4*2; //c,m,b,p int tran_difun = (nx/ip+4)*(ny+4)*4*5;//5個方向 int tran_mac_1_x = (nz/kp+4)*(ny+4)*4*1; //u,v,w x face int tran_mac_2_x = (nz/kp+4)*(ny+4)*4*2; //c,m,b,p x face int tran_difun_x = (nz/kp+4)*(ny+4)*4*5;//5個方向 x face cudaMallocHost((void**)&c_d_h ,sizeof(double)*size_dicom); cudaMallocHost((void**)&m_d_h ,sizeof(double)*size_dicom); cudaMallocHost((void**)&b_d_h ,sizeof(double)*size_dicom); cudaMallocHost((void**)&p_d_h ,sizeof(double)*size_dicom); cudaMallocHost((void**)&u_d_h ,sizeof(double)*size_dicom); cudaMallocHost((void**)&v_d_h ,sizeof(double)*size_dicom); cudaMallocHost((void**)&w_d_h ,sizeof(double)*size_dicom); cudaMallocHost((void**)&a_d_h ,sizeof(double)*size_dicom); cudaMallocHost((void**)&et_h ,sizeof(double)* q ); cudaMallocHost((void**)&ex_h ,sizeof(double)* q ); cudaMallocHost((void**)&ey_h ,sizeof(double)* q ); cudaMallocHost((void**)&ez_h ,sizeof(double)* q ); cudaMallocHost((void**)&wt_h ,sizeof(double)* q ); cudaMallocHost((void**)&t_c_h ,sizeof(double)* tran_mac_2 ); cudaMallocHost((void**)&t_m_h ,sizeof(double)* tran_mac_2 ); cudaMallocHost((void**)&t_b_h ,sizeof(double)* tran_mac_2 ); cudaMallocHost((void**)&t_p_h ,sizeof(double)* tran_mac_2 ); cudaMallocHost((void**)&t_u_h ,sizeof(double)* tran_mac_1 ); cudaMallocHost((void**)&t_v_h ,sizeof(double)* tran_mac_1 ); cudaMallocHost((void**)&t_w_h ,sizeof(double)* tran_mac_1 ); cudaMallocHost((void**)&t_g_h ,sizeof(double)* tran_difun ); cudaMallocHost((void**)&t_h_h ,sizeof(double)* tran_difun ); cudaMallocHost((void**)&t_c_x_h ,sizeof(double)* tran_mac_2_x ); cudaMallocHost((void**)&t_m_x_h ,sizeof(double)* tran_mac_2_x ); cudaMallocHost((void**)&t_b_x_h ,sizeof(double)* tran_mac_2_x ); cudaMallocHost((void**)&t_p_x_h ,sizeof(double)* tran_mac_2_x ); cudaMallocHost((void**)&t_u_x_h ,sizeof(double)* tran_mac_1_x ); cudaMallocHost((void**)&t_v_x_h ,sizeof(double)* tran_mac_1_x ); cudaMallocHost((void**)&t_w_x_h ,sizeof(double)* tran_mac_1_x ); cudaMallocHost((void**)&t_g_x_h ,sizeof(double)* tran_difun_x ); cudaMallocHost((void**)&t_h_x_h ,sizeof(double)* tran_difun_x ); cudaMallocHost((void**)&xz_d_h ,sizeof(double)*(nx/ip+4)*(nz/kp+4)); /////////////////////////////////////////////////////////////////////////////////////////// // zz // /////////////////////////////////////////////////////////////////////////////////////////// int step=0; double beta,zeta,mobi,kappa,phic,gravity; parameter (&beta,&zeta,&mobi,&kappa,&phic,&gravity,ex_h,ey_h,ez_h,wt_h,et_h); FILE *data_2d_t; FILE *data_3d_t; FILE *data_2d; FILE *data_3d; FILE *properties; FILE *final_2d; FILE *final_3d; if(myid == 0){ cudaMallocHost((void**)&c_f_h ,sizeof(double)*size_final); cudaMallocHost((void**)&m_f_h ,sizeof(double)*size_final); cudaMallocHost((void**)&b_f_h ,sizeof(double)*size_final); cudaMallocHost((void**)&p_f_h ,sizeof(double)*size_final); cudaMallocHost((void**)&u_f_h ,sizeof(double)*size_final); cudaMallocHost((void**)&v_f_h ,sizeof(double)*size_final); cudaMallocHost((void**)&w_f_h ,sizeof(double)*size_final); cudaMallocHost((void**)&a_f_h ,sizeof(double)*size_final); cudaMallocHost((void**)&xz_f_h ,sizeof(double)* nx*nz ); cudaMallocHost((void**)&c_fdo_h ,sizeof(double)*size_final); cudaMallocHost((void**)&m_fdo_h ,sizeof(double)*size_final); cudaMallocHost((void**)&b_fdo_h ,sizeof(double)*size_final); cudaMallocHost((void**)&p_fdo_h ,sizeof(double)*size_final); cudaMallocHost((void**)&u_fdo_h ,sizeof(double)*size_final); cudaMallocHost((void**)&v_fdo_h ,sizeof(double)*size_final); cudaMallocHost((void**)&w_fdo_h ,sizeof(double)*size_final); cudaMallocHost((void**)&a_fdo_h ,sizeof(double)*size_final); cudaMallocHost((void**)&xz_fdo_h ,sizeof(double)* nx*nz ); cudaMallocHost((void**)&lx ,sizeof(double)* stepall/2 ); cudaMallocHost((void**)&lz ,sizeof(double)* stepall/2 ); cudaMallocHost((void**)&Re ,sizeof(double)* stepall/2 ); initial_macro(c_f_h,m_f_h,b_f_h,p_f_h,u_f_h,v_f_h,w_f_h); array_2D_do(c_f_h,c_fdo_h); array_2D_do(m_f_h,m_fdo_h); array_2D_do(b_f_h,b_fdo_h); array_2D_do(p_f_h,p_fdo_h); array_2D_do(u_f_h,u_fdo_h); array_2D_do(v_f_h,v_fdo_h); array_2D_do(w_f_h,w_fdo_h); array_2D_do(a_f_h,a_fdo_h); //writing data properties = fopen("properties.txt","w"); if(condition==0){ double mo=gravity*(rho_l-rho_g)*pow(tau_l,4)*rho_l*rho_l/81.0/pow(sigma,3); printf("===============================================================\n"); fprintf( properties, "Three dimensional droplets - Bubble rising\n"); fprintf( properties, "Grid size nx=%d, ny=%d, nz=%d\n",nx,ny,nz); fprintf( properties, "Radius=%f, Thickness=%f\n",radd, thick); fprintf( properties, "Bo=%f\n",bo); fprintf( properties, "Mo=%f\n",mo); printf ("Bo=%f\n",bo); printf ("Mo=%f\n",mo); printf("Three dimensional droplets - One Bubble rising\n"); printf("===============================================================\n"); } else if(condition==1){ double mo=gravity*(rho_l-rho_g)*pow(tau_l,4)*rho_l*rho_l/81.0/pow(sigma,3); printf("===============================================================\n"); fprintf( properties, "Three dimensional droplets - Bubble rising\n"); fprintf( properties, "Grid size nx=%d, ny=%d, nz=%d\n",nx,ny,nz); fprintf( properties, "Radius=%f, Thickness=%f\n",radd, thick); fprintf( properties, "Bo=%f\n",bo); fprintf( properties, "Mo=%f\n",mo); printf ("Bo=%f\n",bo); printf ("Mo=%f\n",mo); printf("Three dimensional droplets - Two Bubble rising\n"); printf("===============================================================\n"); } printf("Initializing..."); fprintf( properties, "Tau_h =%f, Tau_g=%f, Tau_l=%f\n", tau_h,tau_g,tau_l); fprintf( properties, "rho_l =%f, rho_g=%f, sigma=%f\n", rho_l,rho_g,sigma); fclose(properties); data_2d = fopen("data_2d.dat","w"); fprintf( data_2d, "VARIABLES=\"X\",\"Z\",\"c\",\"u\",\"v\",\"w\",\"p\"\n"); fprintf( data_2d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_2d, "I=%d, J=%d\n", nx,nz); j=ny/2; for(k=0;k<nz;k++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; fprintf( data_2d, "%d\t%d\t%e\t%e\t%e\t%e\t%e\t\n", i,k,c_f_h[index],u_f_h[index],v_f_h[index],w_f_h[index],p_f_h[index]); }} fclose(data_2d); data_3d = fopen("data_3d.dat","w"); fprintf( data_3d, "VARIABLES=\"X\",\"Y\",\"Z\",\"c\"\n"); fprintf( data_3d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_3d, "I=%d, J=%d, K=%d\n", nx,ny,nz); for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=(nx)*(k*(ny)+j)+i; fprintf( data_3d, "%d\t%d\t%d\t%e\t\n", i,j,k,c_f_h[index]); }}} fclose(data_3d); printf("done\n"); printf("===============================================================\n"); printf("Iterating...\n"); } MPI_Barrier(MPI_COMM_WORLD); //scatter iroot = 0; MPI_Scatter((void *)&c_fdo_h[0],n_f, MPI_DOUBLE,(void *)&c_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&m_fdo_h[0],n_f, MPI_DOUBLE,(void *)&m_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&b_fdo_h[0],n_f, MPI_DOUBLE,(void *)&b_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&p_fdo_h[0],n_f, MPI_DOUBLE,(void *)&p_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&u_fdo_h[0],n_f, MPI_DOUBLE,(void *)&u_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&v_fdo_h[0],n_f, MPI_DOUBLE,(void *)&v_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&w_fdo_h[0],n_f, MPI_DOUBLE,(void *)&w_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Scatter((void *)&a_fdo_h[0],n_f, MPI_DOUBLE,(void *)&a_d_h[0],n_f, MPI_DOUBLE,iroot,comm); MPI_Barrier(MPI_COMM_WORLD); //memory allocation on gpu cudaMalloc((void**)&c_d ,sizeof(double)* size_dicom ); cudaMalloc((void**)&m_d ,sizeof(double)* size_dicom ); cudaMalloc((void**)&b_d ,sizeof(double)* size_dicom ); cudaMalloc((void**)&p_d ,sizeof(double)* size_dicom ); cudaMalloc((void**)&u_d ,sizeof(double)* size_dicom ); cudaMalloc((void**)&v_d ,sizeof(double)* size_dicom ); cudaMalloc((void**)&w_d ,sizeof(double)* size_dicom ); cudaMalloc((void**)&a_d ,sizeof(double)* size_dicom ); cudaMalloc((void**)&h ,sizeof(double)* size_difun ); cudaMalloc((void**)&g ,sizeof(double)* size_difun ); cudaMalloc((void**)&h_t ,sizeof(double)* size_difun ); cudaMalloc((void**)&g_t ,sizeof(double)* size_difun ); cudaMalloc((void**)&t_c ,sizeof(double)* tran_mac_2 ); cudaMalloc((void**)&t_m ,sizeof(double)* tran_mac_2 ); cudaMalloc((void**)&t_b ,sizeof(double)* tran_mac_2 ); cudaMalloc((void**)&t_p ,sizeof(double)* tran_mac_2 ); cudaMalloc((void**)&t_u ,sizeof(double)* tran_mac_1 ); cudaMalloc((void**)&t_v ,sizeof(double)* tran_mac_1 ); cudaMalloc((void**)&t_w ,sizeof(double)* tran_mac_1 ); cudaMalloc((void**)&t_g ,sizeof(double)* tran_difun ); cudaMalloc((void**)&t_h ,sizeof(double)* tran_difun ); cudaMalloc((void**)&t_c_x ,sizeof(double)* tran_mac_2_x ); cudaMalloc((void**)&t_m_x ,sizeof(double)* tran_mac_2_x ); cudaMalloc((void**)&t_b_x ,sizeof(double)* tran_mac_2_x ); cudaMalloc((void**)&t_p_x ,sizeof(double)* tran_mac_2_x ); cudaMalloc((void**)&t_u_x ,sizeof(double)* tran_mac_1_x ); cudaMalloc((void**)&t_v_x ,sizeof(double)* tran_mac_1_x ); cudaMalloc((void**)&t_w_x ,sizeof(double)* tran_mac_1_x ); cudaMalloc((void**)&t_g_x ,sizeof(double)* tran_difun_x ); cudaMalloc((void**)&t_h_x ,sizeof(double)* tran_difun_x ); cudaMalloc((void**)&gra_c ,sizeof(double)* size_allgr ); cudaMalloc((void**)&gra_m ,sizeof(double)* size_allgr ); cudaMalloc((void**)&xz_d,sizeof(double)*(nx/ip+4)*(nz/kp+4)); MPI_Barrier(MPI_COMM_WORLD); //cpu to gpu cudaMemcpy(c_d, c_d_h, sizeof(double)* size_dicom , cudaMemcpyHostToDevice); cudaMemcpy(m_d, m_d_h, sizeof(double)* size_dicom , cudaMemcpyHostToDevice); cudaMemcpy(b_d, b_d_h, sizeof(double)* size_dicom , cudaMemcpyHostToDevice); cudaMemcpy(p_d, p_d_h, sizeof(double)* size_dicom , cudaMemcpyHostToDevice); cudaMemcpy(u_d, u_d_h, sizeof(double)* size_dicom , cudaMemcpyHostToDevice); cudaMemcpy(v_d, v_d_h, sizeof(double)* size_dicom , cudaMemcpyHostToDevice); cudaMemcpy(w_d, w_d_h, sizeof(double)* size_dicom , cudaMemcpyHostToDevice); cudaMemcpy(a_d, a_d_h, sizeof(double)* size_dicom , cudaMemcpyHostToDevice); cudaMemcpy(t_c, t_c_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice); cudaMemcpy(t_m, t_m_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice); cudaMemcpy(t_b, t_b_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice); cudaMemcpy(t_p, t_p_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice); cudaMemcpy(t_u, t_u_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice); cudaMemcpy(t_v, t_v_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice); cudaMemcpy(t_w, t_w_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice); cudaMemcpy(t_g, t_g_h, sizeof(double)* tran_difun , cudaMemcpyHostToDevice); cudaMemcpy(t_h, t_h_h, sizeof(double)* tran_difun , cudaMemcpyHostToDevice); cudaMemcpy(t_c_x, t_c_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice); cudaMemcpy(t_m_x, t_m_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice); cudaMemcpy(t_b_x, t_b_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice); cudaMemcpy(t_p_x, t_p_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice); cudaMemcpy(t_u_x, t_u_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice); cudaMemcpy(t_v_x, t_v_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice); cudaMemcpy(t_w_x, t_w_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice); cudaMemcpy(t_g_x, t_g_x_h, sizeof(double)* tran_difun_x , cudaMemcpyHostToDevice); cudaMemcpy(t_h_x, t_h_x_h, sizeof(double)* tran_difun_x , cudaMemcpyHostToDevice); cudaMemcpyToSymbol ( eex , ex_h, sizeof(double)*q ); cudaMemcpyToSymbol ( eey , ey_h, sizeof(double)*q ); cudaMemcpyToSymbol ( eez , ez_h, sizeof(double)*q ); cudaMemcpyToSymbol ( wwt , wt_h, sizeof(double)*q ); cudaMemcpyToSymbol ( eet , et_h, sizeof(int )*q ); MPI_Barrier(MPI_COMM_WORLD); int xd=nx/ip; //x decomposition int zd=nz/kp; //z decomposition int grid_t0 =ny*zd; int block_t0 =xd; int grid_bc =ny; int block_t0_x =zd-2; int grid_in =ny*(zd-2); int grid_in2 =ny*(zd-4); cudaMalloc((void**)&c ,sizeof(double)* size_dicom ); cudaMalloc((void**)&m ,sizeof(double)* size_dicom ); cudaMalloc((void**)&b ,sizeof(double)* size_dicom ); cudaMalloc((void**)&p ,sizeof(double)* size_dicom ); cudaMalloc((void**)&u ,sizeof(double)* size_dicom ); cudaMalloc((void**)&v ,sizeof(double)* size_dicom ); cudaMalloc((void**)&w ,sizeof(double)* size_dicom ); cudaMalloc((void**)&a ,sizeof(double)* size_dicom ); array_do <<<grid_t0 , block_t0>>>( c_d,c ); array_do <<<grid_t0 , block_t0>>>( m_d,m ); array_do <<<grid_t0 , block_t0>>>( b_d,b ); array_do <<<grid_t0 , block_t0>>>( p_d,p ); array_do <<<grid_t0 , block_t0>>>( u_d,u ); array_do <<<grid_t0 , block_t0>>>( v_d,v ); array_do <<<grid_t0 , block_t0>>>( w_d,w ); array_do <<<grid_t0 , block_t0>>>( a_d,a ); MPI_Barrier(MPI_COMM_WORLD); /////////////////////////////////////////////////////////////////////////////////////////// int num_trans_m_2 =(xd+4)*(ny+4)*2; int num_trans_m_1 =(xd+4)*(ny+4)*1; int startb =(xd+4)*( 0 *(ny+4)+0)+0; int start =(xd+4)*( 2 *(ny+4)+0)+0; int end =(xd+4)*( 4 *(ny+4)+0)+0; int endb =(xd+4)*( 6 *(ny+4)+0)+0; int startb_1 =(xd+4)*( 0 *(ny+4)+0)+0; int start_1 =(xd+4)*( 1 *(ny+4)+0)+0; int end_1 =(xd+4)*( 2 *(ny+4)+0)+0; int endb_1 =(xd+4)*( 3 *(ny+4)+0)+0; int num_trans_d =(xd+4)*(ny+4)*5; int startb_d =((xd+4)*( 0 *(ny+4)+0)+0)*5; int start_d =((xd+4)*( 1 *(ny+4)+0)+0)*5; int end_d =((xd+4)*( 2 *(ny+4)+0)+0)*5; int endb_d =((xd+4)*( 3 *(ny+4)+0)+0)*5; int num_trans_m_2_x =(ny+4)*(zd+4)*2; int num_trans_m_1_x =(ny+4)*(zd+4)*1; int startb_x =(ny+4)*( 0 *(zd+4)+0)+0; int start_x =(ny+4)*( 2 *(zd+4)+0)+0; int end_x =(ny+4)*( 4 *(zd+4)+0)+0; int endb_x =(ny+4)*( 6 *(zd+4)+0)+0; int startb_1_x =(ny+4)*( 0 *(zd+4)+0)+0; int start_1_x =(ny+4)*( 1 *(zd+4)+0)+0; int end_1_x =(ny+4)*( 2 *(zd+4)+0)+0; int endb_1_x =(ny+4)*( 3 *(zd+4)+0)+0; int num_trans_d_x =(ny+4)*(zd+4)*5; int startb_d_x =((ny+4)*( 0 *(zd+4)+0)+0)*5; int start_d_x =((ny+4)*( 1 *(zd+4)+0)+0)*5; int end_d_x =((ny+4)*( 2 *(zd+4)+0)+0)*5; int endb_d_x =((ny+4)*( 3 *(zd+4)+0)+0)*5; /////////////////////////////////////////////////////////////////////////////////////////// /* checkk <<<grid_t2 , block_t2>>>( c_d,c ); cudaMemcpy(c_d_h,c_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); FILE *check; if(myid==1){ check = fopen("check.dat","w"); fprintf( check, "VARIABLES=\"X\",\"Z\",\"c\"\n"); fprintf( check, "ZONE T=\"gpu\" F=POINT\n"); fprintf( check, "I=%d, J=%d\n", nx+4,zd+4); j=ny/2; for(k=0;k<zd+4;k++){ for(i=0;i<nx+4;i++){ index_3d(i,j,k); fprintf( check, "%d\t%d\t%e\t\n", i,k,c_d_h[index]); }} fclose(check); } */ /////////////////////////////////////////////////////////////////////////////////////////// ////y boundary_ym <<< xd+4 , zd+4 >>>( c ); ////z boundary_zm2 <<< xd+4 , ny+4 >>>( c,t_c ); cudaMemcpy(t_c_h, t_c, sizeof(double)* tran_mac_2 , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=110; MPI_Sendrecv ((void *)&t_c_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_c_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_c_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_c_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpy(t_c, t_c_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice); boundary_zm2_undo <<< xd+4 , ny+4 >>>( c,t_c ); ////x boundary_xm2 <<< ny+4 , zd+4 >>>( c,t_c_x ); cudaMemcpy(t_c_x_h, t_c_x, sizeof(double)* tran_mac_2_x , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=11; MPI_Sendrecv ((void *)&t_c_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_c_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_c_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_c_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpy(t_c_x, t_c_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice); boundary_xm2_undo <<< ny+4 , zd+4 >>>( c,t_c_x ); /////////////////////////////////////////////////////////////////////////////////////////// chemical <<<grid_t0, block_t0>>>( c,m,kappa,beta ); // chemical_b <<<grid_t0, block_t0>>>( c,m,b,kappa,beta,phic );//wettability /////////////////////////////////////////////////////////////////////////////////////////// ////y boundary_ym <<< xd+4 , zd+4 >>>( m ); ////z boundary_zm2<<< xd+4 , ny+4 >>>( m,t_m ); cudaMemcpy(t_m_h, t_m, sizeof(double)* tran_mac_2 , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=110; MPI_Sendrecv ((void *)&t_m_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_m_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_m_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_m_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpy(t_m, t_m_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice); boundary_zm2_undo <<< xd+4 , ny+4 >>>( m,t_m ); ////x boundary_xm2<<< ny+4 , zd+4 >>>( m,t_m_x ); cudaMemcpy(t_m_x_h, t_m_x, sizeof(double)* tran_mac_2_x , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=11; MPI_Sendrecv ((void *)&t_m_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_m_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_m_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_m_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpy(t_m_x, t_m_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice); boundary_xm2_undo <<< ny+4 , zd+4 >>>( m,t_m_x ); /////////////////////////////////////////////////////////////////////////////////////////// ////y boundary_ym <<< xd+4 , zd+4 >>>( b ); ////z boundary_zm2 <<< xd+4 , ny+4 >>>( b,t_b ); cudaMemcpy(t_b_h, t_b, sizeof(double)* tran_mac_2 , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=130; MPI_Sendrecv ((void *)&t_b_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_b_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=140; MPI_Sendrecv ((void *)&t_b_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_b_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpy(t_b, t_b_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice); boundary_zm2_undo <<< xd+4 , ny+4 >>>( b,t_b ); ////x boundary_xm2 <<< ny+4 , zd+4 >>>( b,t_b_x ); cudaMemcpy(t_b_x_h, t_b_x, sizeof(double)* tran_mac_2_x , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=13; MPI_Sendrecv ((void *)&t_b_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_b_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=14; MPI_Sendrecv ((void *)&t_b_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_b_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpy(t_b_x, t_b_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice); boundary_xm2_undo <<< ny+4 , zd+4 >>>( b,t_b_x ); /////////////////////////////////////////////////////////////////////////////////////////// ////y boundary_ym <<< xd+4 , zd+4 >>>( p ); ////z boundary_zm2 <<< xd+4 , ny+4 >>>( p,t_p ); cudaMemcpy(t_p_h, t_p, sizeof(double)* tran_mac_2 , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=150; MPI_Sendrecv ((void *)&t_p_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_p_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=160; MPI_Sendrecv ((void *)&t_p_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_p_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpy(t_p, t_p_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice); boundary_zm2_undo <<< xd+4 , ny+4 >>>( p,t_p ); ////x boundary_xm2 <<< ny+4 , zd+4 >>>( p,t_p_x ); cudaMemcpy(t_p_x_h, t_p_x, sizeof(double)* tran_mac_2_x , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=15; MPI_Sendrecv ((void *)&t_p_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_p_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=16; MPI_Sendrecv ((void *)&t_p_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_p_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpy(t_p_x, t_p_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice); boundary_xm2_undo <<< ny+4 , zd+4 >>>( p,t_p_x ); /////////////////////////////////////////////////////////////////////////////////////////// ////y boundary_ym <<< xd+4 , zd+4 >>>( u ); ////z boundary_zm1 <<< xd+4 , ny+4 >>>( u,t_u ); cudaMemcpy(t_u_h, t_u, sizeof(double)* tran_mac_1 , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=170; MPI_Sendrecv ((void *)&t_u_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_u_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=180; MPI_Sendrecv ((void *)&t_u_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_u_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpy(t_u, t_u_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice); boundary_zm1_undo <<< xd+4 , ny+4 >>>( u,t_u ); ////x boundary_xm1 <<< ny+4 , zd+4 >>>( u,t_u_x ); cudaMemcpy(t_u_x_h, t_u_x, sizeof(double)* tran_mac_1_x , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=17; MPI_Sendrecv ((void *)&t_u_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_u_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=18; MPI_Sendrecv ((void *)&t_u_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_u_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpy(t_u_x, t_u_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice); boundary_xm1_undo <<< ny+4 , zd+4 >>>( u,t_u_x ); /////////////////////////////////////////////////////////////////////////////////////////// ////y boundary_ym <<< xd+4 , zd+4 >>>( v ); ////z boundary_zm1 <<< xd+4 , ny+4 >>>( v,t_v ); cudaMemcpy(t_v_h, t_v, sizeof(double)* tran_mac_1 , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=190; MPI_Sendrecv ((void *)&t_v_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_v_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=200; MPI_Sendrecv ((void *)&t_v_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_v_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpy(t_v, t_v_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice); boundary_zm1_undo <<< xd+4 , ny+4 >>>( v,t_v ); ////x boundary_xm1 <<< ny+4 , zd+4 >>>( v,t_v_x ); cudaMemcpy(t_v_x_h, t_v_x, sizeof(double)* tran_mac_1_x , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=19; MPI_Sendrecv ((void *)&t_v_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_v_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=20; MPI_Sendrecv ((void *)&t_v_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_v_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpy(t_v_x, t_v_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice); boundary_xm1_undo <<< ny+4 , zd+4 >>>( v,t_v_x ); /////////////////////////////////////////////////////////////////////////////////////////// ////y boundary_ym <<< xd+4 , zd+4 >>>( w ); ////z boundary_zm1 <<< xd+4 , ny+4 >>>( w,t_w ); cudaMemcpy(t_w_h, t_w, sizeof(double)* tran_mac_1 , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=210; MPI_Sendrecv ((void *)&t_w_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_w_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=220; MPI_Sendrecv ((void *)&t_w_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_w_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpy(t_w, t_w_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice); boundary_zm1_undo <<< xd+4 , ny+4 >>>( w,t_w ); ////x boundary_xm1 <<< ny+4 , zd+4 >>>( w,t_w_x ); cudaMemcpy(t_w_x_h, t_w_x, sizeof(double)* tran_mac_1_x , cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); itag=21; MPI_Sendrecv ((void *)&t_w_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_w_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=22; MPI_Sendrecv ((void *)&t_w_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_w_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpy(t_w_x, t_w_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice); boundary_xm1_undo <<< ny+4 , zd+4 >>>( w,t_w_x ); MPI_Barrier(MPI_COMM_WORLD); /////////////////////////////////////////////////////////////////////////////////////////// gradient_cen <<<grid_t0, block_t0,0>>>(gra_c,c); gradient_cen <<<grid_t0, block_t0,0>>>(gra_m,m); cudaThreadSynchronize(); eq_collision <<<grid_t0, block_t0 >>>( g,h,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi); cudaThreadSynchronize(); cudaStream_t stream0,stream1; int leastPriority; int greatestPriority; cudaDeviceGetStreamPriorityRange (&leastPriority,&greatestPriority); int priority=greatestPriority; cudaStreamCreateWithPriority(&stream0,0,priority); cudaStreamCreate(&stream1); //time cudaEvent_t gpu_start,gpu_start_temp,gpu_stop,gpu_stop_temp; cudaEventCreate(&gpu_start); cudaEventCreate(&gpu_stop); cudaEventCreate(&gpu_start_temp); cudaEventCreate(&gpu_stop_temp); cudaEventRecord(gpu_start_temp,0); cudaEventRecord(gpu_start,0); /////////////////////////////////////////////////////////////////////////////////////////// // sstart // /////////////////////////////////////////////////////////////////////////////////////////// for(step=1;step<=stepall;step++){ eq_collision_bc <<< grid_bc , block_t0 , 0, stream0 >>>( g,h,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); eq_collision_bc_x <<< grid_bc , block_t0_x , 0, stream0 >>>( g,h,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); boundary_yd_bc <<< xd , q , 0, stream0 >>>( g,h ); boundary_yd_bc_x <<< zd , q , 0, stream0 >>>( g,h ); ////z... boundary_zd <<< xd+2 , ny+2 , 0, stream0 >>>( g,t_g ); boundary_zd <<< xd+2 , ny+2 , 0, stream0 >>>( h,t_h ); eq_collision_in <<< grid_in , xd-2 , 0, stream1 >>>( g,h,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); ////...z cudaMemcpyAsync(t_g_h, t_g, sizeof(double)*tran_difun , cudaMemcpyDeviceToHost,stream0); cudaMemcpyAsync(t_h_h, t_h, sizeof(double)*tran_difun , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=230; MPI_Sendrecv ((void *)&t_g_h[end_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, ( void *)&t_g_h[startb_d], num_trans_d, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=231; MPI_Sendrecv ((void *)&t_g_h[start_d ], num_trans_d, MPI_DOUBLE, b_nbr, itag, ( void *)&t_g_h[endb_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, comm, istat); itag=232; MPI_Sendrecv ((void *)&t_h_h[end_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, ( void *)&t_h_h[startb_d], num_trans_d, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=233; MPI_Sendrecv ((void *)&t_h_h[start_d ], num_trans_d, MPI_DOUBLE, b_nbr, itag, ( void *)&t_h_h[endb_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_g, t_g_h, sizeof(double)*tran_difun , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_h, t_h_h, sizeof(double)*tran_difun , cudaMemcpyHostToDevice,stream0); boundary_zd_undo <<< xd+2 , ny+2 , 0, stream0 >>>( g,t_g ); boundary_zd_undo <<< xd+2 , ny+2 , 0, stream0 >>>( h,t_h ); ////x... boundary_xd <<< ny+2 , zd+2 , 0, stream0 >>>( g,t_g_x ); boundary_xd <<< ny+2 , zd+2 , 0, stream0 >>>( h,t_h_x ); boundary_yd_in <<< xd-2 , zd-2 , 0, stream1 >>>( g,h ); ////...x cudaMemcpyAsync(t_g_x_h, t_g_x, sizeof(double)*tran_difun_x , cudaMemcpyDeviceToHost,stream0); cudaMemcpyAsync(t_h_x_h, t_h_x, sizeof(double)*tran_difun_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=23; MPI_Sendrecv ((void *)&t_g_x_h[end_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_g_x_h[startb_d_x], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=24; MPI_Sendrecv ((void *)&t_g_x_h[start_d_x ], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_g_x_h[endb_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, comm, istat); itag=25; MPI_Sendrecv ((void *)&t_h_x_h[end_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_h_x_h[startb_d_x], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=26; MPI_Sendrecv ((void *)&t_h_x_h[start_d_x ], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_h_x_h[endb_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_g_x, t_g_x_h, sizeof(double)*tran_difun_x , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_h_x, t_h_x_h, sizeof(double)*tran_difun_x , cudaMemcpyHostToDevice,stream0); boundary_xd_undo <<< ny+2 , zd+2 , 0, stream0 >>>( g,t_g_x ); boundary_xd_undo <<< ny+2 , zd+2 , 0, stream0 >>>( h,t_h_x ); /////////////////////////////////////////////////////////////////////////////////////////// cudaDeviceSynchronize(); macro_h_bc <<< grid_bc , block_t0 , 0, stream0 >>>( h,h_t,c ); macro_h_bc_x <<< grid_bc , zd-4 , 0, stream0 >>>( h,h_t,c ); boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( c ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( c ); ////z... boundary_zm2 <<< xd+4 , ny+4 , 0, stream0 >>>( c,t_c ); macro_h_in <<< grid_in2 , xd-4 , 0, stream1 >>>( h,h_t,c ); ////...z cudaMemcpyAsync(t_c_h, t_c, sizeof(double)* tran_mac_2 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=110; MPI_Sendrecv ((void *)&t_c_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_c_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_c_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_c_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_c, t_c_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice,stream0); boundary_zm2_undo <<< xd+4 , ny+4 , 0, stream0 >>>( c,t_c ); ////x... boundary_xm2 <<< ny+4 , zd+4 , 0, stream0 >>>( c,t_c_x ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( c ); ////...x cudaMemcpyAsync(t_c_x_h, t_c_x, sizeof(double)* tran_mac_2_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=11; MPI_Sendrecv ((void *)&t_c_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_c_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_c_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_c_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_c_x, t_c_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice,stream0); boundary_xm2_undo <<< ny+4 , zd+4 , 0, stream0 >>>( c,t_c_x ); /////////////////////////////////////////////////////////////////////////////////////////// cudaDeviceSynchronize(); chemical_bc <<< grid_bc , block_t0 , 0, stream0 >>>( c,m,kappa,beta ); chemical_bc_x <<< grid_bc , zd-4 , 0, stream0 >>>( c,m,kappa,beta ); boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( m ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( m ); ////z... boundary_zm2 <<< xd+4 , ny+4 , 0, stream0 >>>( m,t_m ); chemical_in <<< grid_in2 , xd-4 , 0, stream1 >>>( c,m,kappa,beta ); ////...z cudaMemcpyAsync(t_m_h, t_m, sizeof(double)* tran_mac_2 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=110; MPI_Sendrecv ((void *)&t_m_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_m_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_m_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_m_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_m, t_m_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice,stream0); boundary_zm2_undo <<< xd+4 , ny+4 , 0, stream0 >>>( m,t_m ); ////x... boundary_xm2 <<< ny+4 , zd+4 , 0, stream0 >>>( m,t_m_x ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( m ); gradient_cen <<< grid_t0 , block_t0 , 0, stream1 >>>( gra_c,c ); ////...x cudaMemcpyAsync(t_m_x_h, t_m_x, sizeof(double)* tran_mac_2_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=11; MPI_Sendrecv ((void *)&t_m_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_m_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_m_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_m_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_m_x, t_m_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice,stream0); boundary_xm2_undo <<< ny+4 , zd+4 , 0, stream0 >>>( m,t_m_x ); /////////////////////////////////////////////////////////////////////////////////////////// cudaDeviceSynchronize(); gradient_cen <<< grid_t0 , block_t0 >>>( gra_m,m ); macro_g_bc <<< grid_bc , block_t0 , 0, stream0 >>>( g,g_t,c,m,p,gra_c,gra_m,u,v,w ); macro_g_bc_x <<< grid_bc , zd-4 , 0, stream0 >>>( g,g_t,c,m,p,gra_c,gra_m,u,v,w ); ////y bc boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( u ); boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( v ); boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( w ); boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( p ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( u ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( v ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( w ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( p ); ////z... boundary_zm2 <<< xd+4 , ny+4 , 0, stream0 >>>( p,t_p ); boundary_zm1 <<< xd+4 , ny+4 , 0, stream0 >>>( u,t_u ); boundary_zm1 <<< xd+4 , ny+4 , 0, stream0 >>>( v,t_v ); boundary_zm1 <<< xd+4 , ny+4 , 0, stream0 >>>( w,t_w ); macro_g_in <<< grid_in2 , xd-4 , 0, stream1 >>>( g,g_t,c,m,p,gra_c,gra_m,u,v,w); ////...z cudaMemcpyAsync(t_p_h, t_p, sizeof(double)* tran_mac_2 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=150; MPI_Sendrecv ((void *)&t_p_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_p_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=160; MPI_Sendrecv ((void *)&t_p_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_p_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpyAsync(t_u_h, t_u, sizeof(double)* tran_mac_1 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=170; MPI_Sendrecv ((void *)&t_u_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_u_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=180; MPI_Sendrecv ((void *)&t_u_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_u_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpyAsync(t_v_h, t_v, sizeof(double)* tran_mac_1 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=190; MPI_Sendrecv ((void *)&t_v_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_v_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=200; MPI_Sendrecv ((void *)&t_v_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_v_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpyAsync(t_w_h, t_w, sizeof(double)* tran_mac_1 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=210; MPI_Sendrecv ((void *)&t_w_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_w_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=220; MPI_Sendrecv ((void *)&t_w_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_w_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_p, t_p_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_u, t_u_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_v, t_v_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_w, t_w_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice,stream0); cudaStreamSynchronize(stream0); boundary_zm2_undo <<< xd+4 , ny+4 , 0, stream0 >>>( p,t_p ); boundary_zm1_undo <<< xd+4 , ny+4 , 0, stream0 >>>( u,t_u ); boundary_zm1_undo <<< xd+4 , ny+4 , 0, stream0 >>>( v,t_v ); boundary_zm1_undo <<< xd+4 , ny+4 , 0, stream0 >>>( w,t_w ); ////x... boundary_xm2 <<< ny+4 , zd+4 , 0, stream0 >>>( p,t_p_x ); boundary_xm1 <<< ny+4 , zd+4 , 0, stream0 >>>( u,t_u_x ); boundary_xm1 <<< ny+4 , zd+4 , 0, stream0 >>>( v,t_v_x ); boundary_xm1 <<< ny+4 , zd+4 , 0, stream0 >>>( w,t_w_x ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( p ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( u ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( v ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( w ); ////...x cudaMemcpyAsync(t_p_x_h, t_p_x, sizeof(double)* tran_mac_2_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=15; MPI_Sendrecv ((void *)&t_p_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_p_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=16; MPI_Sendrecv ((void *)&t_p_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_p_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpyAsync(t_u_x_h, t_u_x, sizeof(double)* tran_mac_1_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=17; MPI_Sendrecv ((void *)&t_u_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_u_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=18; MPI_Sendrecv ((void *)&t_u_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_u_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpyAsync(t_v_x_h, t_v_x, sizeof(double)* tran_mac_1_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=19; MPI_Sendrecv ((void *)&t_v_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_v_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=20; MPI_Sendrecv ((void *)&t_v_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_v_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpyAsync(t_w_x_h, t_w_x, sizeof(double)* tran_mac_1_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=21; MPI_Sendrecv ((void *)&t_w_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_w_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=22; MPI_Sendrecv ((void *)&t_w_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_w_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_p_x, t_p_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_u_x, t_u_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_v_x, t_v_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_w_x, t_w_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice,stream0); cudaStreamSynchronize(stream0); boundary_xm2_undo <<< ny+4 , zd+4 , 0, stream0 >>>( p,t_p_x ); boundary_xm1_undo <<< ny+4 , zd+4 , 0, stream0 >>>( u,t_u_x ); boundary_xm1_undo <<< ny+4 , zd+4 , 0, stream0 >>>( v,t_v_x ); boundary_xm1_undo <<< ny+4 , zd+4 , 0, stream0 >>>( w,t_w_x ); /////////////////////////////////////////////////////////////////////////////////////////// // nnext time step // /////////////////////////////////////////////////////////////////////////////////////////// step=step+1; cudaDeviceSynchronize(); eq_collision_bc <<< grid_bc , block_t0 , 0, stream0 >>>( g_t,h_t,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); eq_collision_bc_x <<< grid_bc , block_t0_x , 0, stream0 >>>( g_t,h_t,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); boundary_yd_bc <<< xd , q , 0, stream0 >>>( g_t,h_t ); boundary_yd_bc_x <<< zd , q , 0, stream0 >>>( g_t,h_t ); ////z... boundary_zd <<< xd+2 , ny+2 , 0, stream0 >>>( g_t,t_g ); boundary_zd <<< xd+2 , ny+2 , 0, stream0 >>>( h_t,t_h ); eq_collision_in <<< grid_in , xd-2 , 0, stream1 >>>( g_t,h_t,c,m,p,gravity,gra_c,gra_m,u,v,w,mobi ); ////...z cudaMemcpyAsync(t_g_h, t_g, sizeof(double)*tran_difun , cudaMemcpyDeviceToHost,stream0); cudaMemcpyAsync(t_h_h, t_h, sizeof(double)*tran_difun , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=230; MPI_Sendrecv ((void *)&t_g_h[end_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, ( void *)&t_g_h[startb_d], num_trans_d, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=231; MPI_Sendrecv ((void *)&t_g_h[start_d ], num_trans_d, MPI_DOUBLE, b_nbr, itag, ( void *)&t_g_h[endb_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, comm, istat); itag=232; MPI_Sendrecv ((void *)&t_h_h[end_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, ( void *)&t_h_h[startb_d], num_trans_d, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=233; MPI_Sendrecv ((void *)&t_h_h[start_d ], num_trans_d, MPI_DOUBLE, b_nbr, itag, ( void *)&t_h_h[endb_d ], num_trans_d, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_g, t_g_h, sizeof(double)*tran_difun , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_h, t_h_h, sizeof(double)*tran_difun , cudaMemcpyHostToDevice,stream0); boundary_zd_undo <<< xd+2 , ny+2 , 0, stream0 >>>( g_t,t_g ); boundary_zd_undo <<< xd+2 , ny+2 , 0, stream0 >>>( h_t,t_h ); ////x... boundary_xd <<< ny+2 , zd+2 , 0, stream0 >>>( g_t,t_g_x ); boundary_xd <<< ny+2 , zd+2 , 0, stream0 >>>( h_t,t_h_x ); boundary_yd_in <<< xd-2 , zd-2 , 0, stream1 >>>( g_t,h_t ); ////...x cudaMemcpyAsync(t_g_x_h, t_g_x, sizeof(double)*tran_difun_x , cudaMemcpyDeviceToHost,stream0); cudaMemcpyAsync(t_h_x_h, t_h_x, sizeof(double)*tran_difun_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=23; MPI_Sendrecv ((void *)&t_g_x_h[end_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_g_x_h[startb_d_x], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=24; MPI_Sendrecv ((void *)&t_g_x_h[start_d_x ], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_g_x_h[endb_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, comm, istat); itag=25; MPI_Sendrecv ((void *)&t_h_x_h[end_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_h_x_h[startb_d_x], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=26; MPI_Sendrecv ((void *)&t_h_x_h[start_d_x ], num_trans_d_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_h_x_h[endb_d_x ], num_trans_d_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_g_x, t_g_x_h, sizeof(double)*tran_difun_x , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_h_x, t_h_x_h, sizeof(double)*tran_difun_x , cudaMemcpyHostToDevice,stream0); boundary_xd_undo <<< ny+2 , zd+2 , 0, stream0 >>>( g_t,t_g_x ); boundary_xd_undo <<< ny+2 , zd+2 , 0, stream0 >>>( h_t,t_h_x ); /////////////////////////////////////////////////////////////////////////////////////////// cudaDeviceSynchronize(); macro_h_bc <<< grid_bc , block_t0 , 0, stream0 >>>( h_t,h,c ); macro_h_bc_x <<< grid_bc , zd-4 , 0, stream0 >>>( h_t,h,c ); boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( c ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( c ); ////z... boundary_zm2 <<< xd+4 , ny+4 , 0, stream0 >>>( c,t_c ); macro_h_in <<< grid_in2 , xd-4 , 0, stream1 >>>( h_t,h,c ); ////...z cudaMemcpyAsync(t_c_h, t_c, sizeof(double)* tran_mac_2 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=110; MPI_Sendrecv ((void *)&t_c_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_c_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_c_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_c_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_c, t_c_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice,stream0); boundary_zm2_undo <<< xd+4 , ny+4 , 0, stream0 >>>( c,t_c ); ////x... boundary_xm2 <<< ny+4 , zd+4 , 0, stream0 >>>( c,t_c_x ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( c ); ////...x cudaMemcpyAsync(t_c_x_h, t_c_x, sizeof(double)* tran_mac_2_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=11; MPI_Sendrecv ((void *)&t_c_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_c_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_c_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_c_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_c_x, t_c_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice,stream0); boundary_xm2_undo <<< ny+4 , zd+4 , 0, stream0 >>>( c,t_c_x ); /////////////////////////////////////////////////////////////////////////////////////////// cudaDeviceSynchronize(); chemical_bc <<< grid_bc , block_t0 , 0, stream0 >>>( c,m,kappa,beta ); chemical_bc_x <<< grid_bc , zd-4 , 0, stream0 >>>( c,m,kappa,beta ); boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( m ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( m ); ////z... boundary_zm2 <<< xd+4 , ny+4 , 0, stream0 >>>( m,t_m ); chemical_in <<< grid_in2 , xd-4 , 0, stream1 >>>( c,m,kappa,beta ); ////...z cudaMemcpyAsync(t_m_h, t_m, sizeof(double)* tran_mac_2 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=110; MPI_Sendrecv ((void *)&t_m_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_m_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=120; MPI_Sendrecv ((void *)&t_m_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_m_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_m, t_m_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice,stream0); boundary_zm2_undo <<< xd+4 , ny+4 , 0, stream0 >>>( m,t_m ); ////x... boundary_xm2 <<< ny+4 , zd+4 , 0, stream0 >>>( m,t_m_x ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( m ); gradient_cen <<< grid_t0 , block_t0 , 0, stream1 >>>( gra_c,c ); ////...x cudaMemcpyAsync(t_m_x_h, t_m_x, sizeof(double)* tran_mac_2_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=11; MPI_Sendrecv ((void *)&t_m_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_m_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=12; MPI_Sendrecv ((void *)&t_m_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_m_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_m_x, t_m_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice,stream0); boundary_xm2_undo <<< ny+4 , zd+4 , 0, stream0 >>>( m,t_m_x ); /////////////////////////////////////////////////////////////////////////////////////////// cudaDeviceSynchronize(); gradient_cen <<< grid_t0 , block_t0 >>>( gra_m,m ); macro_g_bc <<< grid_bc , block_t0 , 0, stream0 >>>( g_t,g,c,m,p,gra_c,gra_m,u,v,w ); macro_g_bc_x <<< grid_bc , zd-4 , 0, stream0 >>>( g_t,g,c,m,p,gra_c,gra_m,u,v,w ); ////y bc boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( u ); boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( v ); boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( w ); boundary_ym_bc <<< 1 , xd , 0, stream0 >>>( p ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( u ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( v ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( w ); boundary_ym_bc_x <<< 1 , zd , 0, stream0 >>>( p ); ////z... boundary_zm2 <<< xd+4 , ny+4 , 0, stream0 >>>( p,t_p ); boundary_zm1 <<< xd+4 , ny+4 , 0, stream0 >>>( u,t_u ); boundary_zm1 <<< xd+4 , ny+4 , 0, stream0 >>>( v,t_v ); boundary_zm1 <<< xd+4 , ny+4 , 0, stream0 >>>( w,t_w ); macro_g_in <<< grid_in2 , xd-4 , 0, stream1 >>>( g_t,g,c,m,p,gra_c,gra_m,u,v,w); ////...z cudaMemcpyAsync(t_p_h, t_p, sizeof(double)* tran_mac_2 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=150; MPI_Sendrecv ((void *)&t_p_h[end ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, ( void *)&t_p_h[startb], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=160; MPI_Sendrecv ((void *)&t_p_h[start ], num_trans_m_2, MPI_DOUBLE, b_nbr, itag, ( void *)&t_p_h[endb ], num_trans_m_2, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpyAsync(t_u_h, t_u, sizeof(double)* tran_mac_1 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=170; MPI_Sendrecv ((void *)&t_u_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_u_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=180; MPI_Sendrecv ((void *)&t_u_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_u_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpyAsync(t_v_h, t_v, sizeof(double)* tran_mac_1 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=190; MPI_Sendrecv ((void *)&t_v_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_v_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=200; MPI_Sendrecv ((void *)&t_v_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_v_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaMemcpyAsync(t_w_h, t_w, sizeof(double)* tran_mac_1 , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=210; MPI_Sendrecv ((void *)&t_w_h[end_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, ( void *)&t_w_h[startb_1], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, comm, istat); itag=220; MPI_Sendrecv ((void *)&t_w_h[start_1 ], num_trans_m_1, MPI_DOUBLE, b_nbr, itag, ( void *)&t_w_h[endb_1 ], num_trans_m_1, MPI_DOUBLE, t_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_p, t_p_h, sizeof(double)* tran_mac_2 , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_u, t_u_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_v, t_v_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_w, t_w_h, sizeof(double)* tran_mac_1 , cudaMemcpyHostToDevice,stream0); cudaStreamSynchronize(stream0); boundary_zm2_undo <<< xd+4 , ny+4 , 0, stream0 >>>( p,t_p ); boundary_zm1_undo <<< xd+4 , ny+4 , 0, stream0 >>>( u,t_u ); boundary_zm1_undo <<< xd+4 , ny+4 , 0, stream0 >>>( v,t_v ); boundary_zm1_undo <<< xd+4 , ny+4 , 0, stream0 >>>( w,t_w ); ////x... boundary_xm2 <<< ny+4 , zd+4 , 0, stream0 >>>( p,t_p_x ); boundary_xm1 <<< ny+4 , zd+4 , 0, stream0 >>>( u,t_u_x ); boundary_xm1 <<< ny+4 , zd+4 , 0, stream0 >>>( v,t_v_x ); boundary_xm1 <<< ny+4 , zd+4 , 0, stream0 >>>( w,t_w_x ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( p ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( u ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( v ); boundary_ym_in <<< xd-4 , zd-4 , 0, stream1 >>>( w ); ////...x cudaMemcpyAsync(t_p_x_h, t_p_x, sizeof(double)* tran_mac_2_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=15; MPI_Sendrecv ((void *)&t_p_x_h[end_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_p_x_h[startb_x], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=16; MPI_Sendrecv ((void *)&t_p_x_h[start_x ], num_trans_m_2_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_p_x_h[endb_x ], num_trans_m_2_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpyAsync(t_u_x_h, t_u_x, sizeof(double)* tran_mac_1_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=17; MPI_Sendrecv ((void *)&t_u_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_u_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=18; MPI_Sendrecv ((void *)&t_u_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_u_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpyAsync(t_v_x_h, t_v_x, sizeof(double)* tran_mac_1_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=19; MPI_Sendrecv ((void *)&t_v_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_v_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=20; MPI_Sendrecv ((void *)&t_v_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_v_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaMemcpyAsync(t_w_x_h, t_w_x, sizeof(double)* tran_mac_1_x , cudaMemcpyDeviceToHost,stream0); cudaStreamSynchronize(stream0); itag=21; MPI_Sendrecv ((void *)&t_w_x_h[end_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, ( void *)&t_w_x_h[startb_1_x], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, comm, istat); itag=22; MPI_Sendrecv ((void *)&t_w_x_h[start_1_x ], num_trans_m_1_x, MPI_DOUBLE, l_nbr, itag, ( void *)&t_w_x_h[endb_1_x ], num_trans_m_1_x, MPI_DOUBLE, r_nbr, itag, comm, istat); cudaStreamSynchronize(stream0); cudaMemcpyAsync(t_p_x, t_p_x_h, sizeof(double)* tran_mac_2_x , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_u_x, t_u_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_v_x, t_v_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice,stream0); cudaMemcpyAsync(t_w_x, t_w_x_h, sizeof(double)* tran_mac_1_x , cudaMemcpyHostToDevice,stream0); cudaStreamSynchronize(stream0); boundary_xm2_undo <<< ny+4 , zd+4 , 0, stream0 >>>( p,t_p_x ); boundary_xm1_undo <<< ny+4 , zd+4 , 0, stream0 >>>( u,t_u_x ); boundary_xm1_undo <<< ny+4 , zd+4 , 0, stream0 >>>( v,t_v_x ); boundary_xm1_undo <<< ny+4 , zd+4 , 0, stream0 >>>( w,t_w_x ); if(condition == 0){ array_undo <<<grid_t0 , block_t0>>>( c_d,c ); array_undo <<<grid_t0 , block_t0>>>( w_d,w ); MPI_Barrier(MPI_COMM_WORLD); cudaMemcpy(c_d_h,c_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(w_d_h,w_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); MPI_Gather((void *)&c_d_h[0], n_f, MPI_DOUBLE,(void *)&c_f_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&w_d_h[0], n_f, MPI_DOUBLE,(void *)&w_f_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Barrier(MPI_COMM_WORLD); if(myid==0){ double maxw; max_w(c_f_h,w_f_h,&maxw); Reynolds_Time( maxw, Re, step ); }} if(step%iprint ==0){ p_real <<<grid_t0 , block_t0>>>(c,p,a,beta,kappa,gra_c); array_undo <<<grid_t0 , block_t0>>>( c_d,c ); array_undo <<<grid_t0 , block_t0>>>( m_d,m ); array_undo <<<grid_t0 , block_t0>>>( b_d,b ); array_undo <<<grid_t0 , block_t0>>>( p_d,p ); array_undo <<<grid_t0 , block_t0>>>( u_d,u ); array_undo <<<grid_t0 , block_t0>>>( v_d,v ); array_undo <<<grid_t0 , block_t0>>>( w_d,w ); array_undo <<<grid_t0 , block_t0>>>( a_d,a ); MPI_Barrier(MPI_COMM_WORLD); cudaMemcpy(c_d_h,c_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(m_d_h,m_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(b_d_h,b_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(p_d_h,p_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(u_d_h,u_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(v_d_h,v_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(w_d_h,w_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(a_d_h,a_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); MPI_Gather((void *)&c_d_h[0], n_f, MPI_DOUBLE,(void *)&c_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&m_d_h[0], n_f, MPI_DOUBLE,(void *)&m_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&b_d_h[0], n_f, MPI_DOUBLE,(void *)&b_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&p_d_h[0], n_f, MPI_DOUBLE,(void *)&p_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&u_d_h[0], n_f, MPI_DOUBLE,(void *)&u_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&v_d_h[0], n_f, MPI_DOUBLE,(void *)&v_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&w_d_h[0], n_f, MPI_DOUBLE,(void *)&w_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&a_d_h[0], n_f, MPI_DOUBLE,(void *)&a_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Barrier(MPI_COMM_WORLD); if(myid==0){ array_2D_undo(c_f_h,c_fdo_h); array_2D_undo(m_f_h,m_fdo_h); array_2D_undo(b_f_h,b_fdo_h); array_2D_undo(p_f_h,p_fdo_h); array_2D_undo(u_f_h,u_fdo_h); array_2D_undo(v_f_h,v_fdo_h); array_2D_undo(w_f_h,w_fdo_h); array_2D_undo(a_f_h,a_fdo_h); printf("step=%d\n",step); cudaEventRecord(gpu_stop_temp,0); cudaEventSynchronize(gpu_stop_temp); float cudatime_temp; cudaEventElapsedTime(&cudatime_temp,gpu_start_temp,gpu_stop_temp); cudatime_temp=cudatime_temp/1000.0;//unit sec int remain_time=(int)(cudatime_temp/iprint*(stepall-step)); printf("time remaining: %d hr,%d min,%d sec\n",(int)remain_time/3600,(int)(remain_time%3600)/60,(int)remain_time%60); int indexx; printf("c max=%lf\n",maxvalue(c_f_h,&indexx)); printf("c min=%lf\n",minvalue(c_f_h,&indexx)); printf("p max=%e\n" ,maxvalue(p_f_h,&indexx)); printf("u max=%e\n" ,maxvalue(u_f_h,&indexx)); printf("v max=%e\n" ,maxvalue(v_f_h,&indexx)); printf("w max=%e\n" ,maxvalue(w_f_h,&indexx)); data_2d = fopen("data_2d.dat","a"); fprintf( data_2d, "VARIABLES=\"X\",\"Z\",\"c\",\"u\",\"v\",\"w\",\"p\"\n"); fprintf( data_2d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_2d, "I=%d, J=%d\n", nx,nz); j=ny/2; for(k=0;k<nz;k++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; fprintf( data_2d, "%d\t%d\t%e\t%e\t%e\t%e\t%e\t\n", i,k,c_f_h[index],u_f_h[index],v_f_h[index],w_f_h[index],p_f_h[index]); }} fclose(data_2d); data_2d_t = fopen("data_2d_t.dat","w"); fprintf( data_2d_t, "VARIABLES=\"X\",\"Z\",\"c\",\"u\",\"v\",\"w\",\"p\"\n"); fprintf( data_2d_t, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_2d_t, "I=%d, J=%d\n", nx,nz); j=ny/2; for(k=0;k<nz;k++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; fprintf( data_2d_t, "%d\t%d\t%e\t%e\t%e\t%e\t%e\t\n", i,k,c_f_h[index],u_f_h[index],v_f_h[index],w_f_h[index],p_f_h[index]); }} fclose(data_2d_t); if(step%idata_3d ==0){ data_3d = fopen("data_3d.dat","a"); fprintf( data_3d, "VARIABLES=\"X\",\"Y\",\"Z\",\"c\"\n"); fprintf( data_3d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_3d, "I=%d, J=%d, K=%d\n", nx,ny,nz); for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=(nx)*(k*(ny)+j)+i; fprintf( data_3d, "%d\t%d\t%d\t%e\t\n", i,j,k,c_f_h[index]); }}} fclose(data_3d); data_3d_t = fopen("data_3d_t.dat","w"); fprintf( data_3d_t, "VARIABLES=\"X\",\"Y\",\"Z\",\"c\"\n"); fprintf( data_3d_t, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( data_3d_t, "I=%d, J=%d, K=%d\n", nx,ny,nz); for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=(nx)*(k*(ny)+j)+i; fprintf( data_3d_t, "%d\t%d\t%d\t%e\t\n", i,j,k,c_f_h[index]); }}} fclose(data_3d_t); } printf("===============================================================\n"); } cudaEventRecord(gpu_start_temp,0); } } /////////////////////////////////////////////////////////////////////////////////////////// // eend // /////////////////////////////////////////////////////////////////////////////////////////// MPI_Barrier(MPI_COMM_WORLD); cudaEventRecord(gpu_stop,0); cudaEventSynchronize(gpu_stop); float cudatime; if(myid==0){ printf("===============================================================\n"); printf("Iteration terminated!\n"); cudaEventElapsedTime(&cudatime,gpu_start,gpu_stop); printf("GPU total time = %f ms\n",cudatime); //unit = ms printf("mlups=%lf \n",(double)(nx*ny*nz)*stepall*pow(10.0,-6.0)/(cudatime/1000.0)); printf("===============================================================\n"); } cudaEventDestroy(gpu_start); cudaEventDestroy(gpu_stop); array_undo <<<grid_t0 , block_t0>>>( c_d,c ); array_undo <<<grid_t0 , block_t0>>>( m_d,m ); array_undo <<<grid_t0 , block_t0>>>( b_d,b ); array_undo <<<grid_t0 , block_t0>>>( p_d,p ); array_undo <<<grid_t0 , block_t0>>>( u_d,u ); array_undo <<<grid_t0 , block_t0>>>( v_d,v ); array_undo <<<grid_t0 , block_t0>>>( w_d,w ); array_undo <<<grid_t0 , block_t0>>>( a_d,a ); MPI_Barrier(MPI_COMM_WORLD); cudaMemcpy(c_d_h,c_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(m_d_h,m_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(b_d_h,b_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(p_d_h,p_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(u_d_h,u_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(v_d_h,v_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(w_d_h,w_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); cudaMemcpy(a_d_h,a_d,sizeof(double)*size_dicom,cudaMemcpyDeviceToHost); MPI_Barrier(MPI_COMM_WORLD); //////////////////////////////////////////////////////////////////////////////////////////////////////////////// MPI_Gather((void *)&c_d_h[0], n_f, MPI_DOUBLE,(void *)&c_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&m_d_h[0], n_f, MPI_DOUBLE,(void *)&m_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&b_d_h[0], n_f, MPI_DOUBLE,(void *)&b_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&p_d_h[0], n_f, MPI_DOUBLE,(void *)&p_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&u_d_h[0], n_f, MPI_DOUBLE,(void *)&u_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&v_d_h[0], n_f, MPI_DOUBLE,(void *)&v_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&w_d_h[0], n_f, MPI_DOUBLE,(void *)&w_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Gather((void *)&a_d_h[0], n_f, MPI_DOUBLE,(void *)&a_fdo_h[0], n_f, MPI_DOUBLE,iroot,comm); MPI_Barrier(MPI_COMM_WORLD); //////////////////////////////////////////////////////////////////////////////////////////////////////////////// if(myid==0){ array_2D_undo(c_f_h,c_fdo_h); array_2D_undo(m_f_h,m_fdo_h); array_2D_undo(b_f_h,b_fdo_h); array_2D_undo(p_f_h,p_fdo_h); array_2D_undo(u_f_h,u_fdo_h); array_2D_undo(v_f_h,v_fdo_h); array_2D_undo(w_f_h,w_fdo_h); array_2D_undo(a_f_h,a_fdo_h); final_2d = fopen("final_2d.dat","w"); fprintf( final_2d, "VARIABLES=\"X\",\"Z\",\"c\",\"u\",\"v\",\"w\",\"p\",\"p_real\"\n"); fprintf( final_2d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( final_2d, "I=%d, J=%d\n", nx,nz); j=ny/2; for(k=0;k<nz;k++){ for(i=0;i<nx;i++){ index=nx*(k*ny+j)+i; fprintf( final_2d, "%d\t%d\t%e\t%e\t%e\t%e\t%e\t%e\t\n", i,k,c_f_h[index],u_f_h[index],v_f_h[index],w_f_h[index],p_f_h[index],a_f_h[index]); }} fclose(final_2d); final_3d = fopen("final_3d.dat","w"); fprintf( final_3d, "VARIABLES=\"X\",\"Y\",\"Z\",\"c\",\"u\",\"v\",\"w\",\"p\",\"p_real\"\n"); fprintf( final_3d, "ZONE T=\"STEP=%d\" F=POINT\n",step); fprintf( final_3d, "I=%d, J=%d, K=%d\n", nx,ny,nz); for(k=0;k<nz;k++){ for(j=0;j<ny;j++){ for(i=0;i<nx;i++){ index=(nx)*(k*(ny)+j)+i; fprintf( final_3d, "%d\t%d\t%d\t%e\t%e\t%e\t%e\t%e\t%e\t\n", i,j,k,c_f_h[index],u_f_h[index],v_f_h[index],w_f_h[index],p_f_h[index],a_f_h[index]); }}} fclose(final_3d); properties = fopen("properties.txt","a"); fprintf( properties,"MLUPS =%e\n",(double)(nx*ny*nz)*stepall*pow(10.0,-6.0)/(cudatime/1000.0)); if(condition == 0){ FILE *Reynolds; double T=sqrt(radd*2/gravity); Reynolds = fopen("Reynolds.dat","w"); fprintf( Reynolds, "VARIABLES=\"T\",\"Reynolds\"\n"); fprintf( Reynolds, "ZONE T=\"Reynolds\" F=POINT\n"); fprintf( Reynolds, "I=%d\n", stepall/2); for(i=0;i<stepall/2;i++){ fprintf( Reynolds, "%e\t%e\n",(double)2*(i+1)/T,Re[i]);} fclose ( Reynolds); } } // Free memory cudaFreeHost( c_d_h ); cudaFreeHost( m_d_h ); cudaFreeHost( b_d_h ); cudaFreeHost( p_d_h ); cudaFreeHost( u_d_h ); cudaFreeHost( v_d_h ); cudaFreeHost( w_d_h ); cudaFreeHost( a_d_h ); cudaFreeHost( et_h ); cudaFreeHost( ex_h ); cudaFreeHost( ey_h ); cudaFreeHost( ez_h ); cudaFreeHost( wt_h ); cudaFreeHost( t_c_h ); cudaFreeHost( t_m_h ); cudaFreeHost( t_b_h ); cudaFreeHost( t_p_h ); cudaFreeHost( t_u_h ); cudaFreeHost( t_v_h ); cudaFreeHost( t_w_h ); cudaFreeHost( t_g_h ); cudaFreeHost( t_h_h ); if(myid==0){ cudaFreeHost( c_f_h ); cudaFreeHost( m_f_h ); cudaFreeHost( b_f_h ); cudaFreeHost( p_f_h ); cudaFreeHost( u_f_h ); cudaFreeHost( v_f_h ); cudaFreeHost( w_f_h ); cudaFreeHost( a_f_h ); cudaFreeHost( xz_f_h ); cudaFreeHost( lx ); cudaFreeHost( lz ); } cudaFreeHost( xz_d_h ); cudaFree( xz_d ); cudaFree( c_d ); cudaFree( m_d ); cudaFree( b_d ); cudaFree( p_d ); cudaFree( u_d ); cudaFree( v_d ); cudaFree( w_d ); cudaFree( a_d ); cudaFree( h ); cudaFree( g ); cudaFree( h_t ); cudaFree( g_t ); cudaFree( gra_c ); cudaFree( gra_m ); cudaFree( t_c ); cudaFree( t_m ); cudaFree( t_b ); cudaFree( t_p ); cudaFree( t_u ); cudaFree( t_v ); cudaFree( t_w ); cudaFree( t_g ); cudaFree( t_h ); cudaFree( c ); cudaFree( m ); cudaFree( b ); cudaFree( p ); cudaFree( u ); cudaFree( v ); cudaFree( w ); cudaFree( a ); MPI_Finalize(); return 0; }
5d2ef193b35bcad6c79d4e4caa13f8f8bb70be92.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <cfloat> #include <vector> #include <limits> #include "./eltwise_layer.hpp" #include "../util/math_functions.hpp" namespace caffe { __global__ void MaxForward(const int nthreads, const real_t* bottom_data_a, const real_t* bottom_data_b, const int blob_idx, real_t* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { real_t maxval = static_cast<real_t>(-FLT_MAX); if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; } } } void EltwiseLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { const int count = top[0]->count(); real_t* top_data = top[0]->mutable_gpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, static_cast<real_t>(0), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_MAX: // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( MaxForward), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_data, bottom[i]->gpu_data(), i-1, top_data); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } // namespace caffe
5d2ef193b35bcad6c79d4e4caa13f8f8bb70be92.cu
#include <cfloat> #include <vector> #include <limits> #include "./eltwise_layer.hpp" #include "../util/math_functions.hpp" namespace caffe { __global__ void MaxForward(const int nthreads, const real_t* bottom_data_a, const real_t* bottom_data_b, const int blob_idx, real_t* top_data) { CUDA_KERNEL_LOOP(index, nthreads) { real_t maxval = static_cast<real_t>(-FLT_MAX); if (bottom_data_a[index] > bottom_data_b[index]) { // only update for very first bottom_data blob (blob_idx == 0) if (blob_idx == 0) { maxval = bottom_data_a[index]; top_data[index] = maxval; } } else { maxval = bottom_data_b[index]; top_data[index] = maxval; } } } void EltwiseLayer::Forward_gpu(const vector<Blob*>& bottom, const vector<Blob*>& top) { const int count = top[0]->count(); real_t* top_data = top[0]->mutable_gpu_data(); switch (op_) { case EltwiseParameter_EltwiseOp_PROD: caffe_gpu_mul(count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), top_data); for (int i = 2; i < bottom.size(); ++i) { caffe_gpu_mul(count, top_data, bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_SUM: caffe_gpu_set(count, static_cast<real_t>(0), top_data); // TODO(shelhamer) does cuBLAS optimize to sum for coeff = 1? for (int i = 0; i < bottom.size(); ++i) { caffe_gpu_axpy(count, coeffs_[i], bottom[i]->gpu_data(), top_data); } break; case EltwiseParameter_EltwiseOp_MAX: // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom[0]->gpu_data(), bottom[1]->gpu_data(), 0, top_data); for (int i = 2; i < bottom.size(); ++i) { // NOLINT_NEXT_LINE(whitespace/operators) MaxForward<<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_data, bottom[i]->gpu_data(), i-1, top_data); } break; default: LOG(FATAL) << "Unknown elementwise operation."; } } } // namespace caffe
7bcc56a01bb9fb9deb26720cebd9c27e2d4527ca.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "hip/hip_runtime.h" #include "hip/hip_runtime_api.h" #include "device_launch_parameters.h" #include "hip/device_functions.h" #include <iostream> #include <stdio.h> #include <ctime> void random_longs(long* a, int size) //randomizing elements in both vectors { srand(time(0)); int i; for (i = 0; i < size; ++i) { a[i] = rand() % 100; } } __global__ void add(long* a, long* b, long* c, long N) { //core from ScalarMultiplication_example1 long baseIdx = threadIdx.x; long idx = baseIdx; while (idx < N) { c[idx] = a[idx] * b[idx]; idx += blockDim.x; } __syncthreads(); long step = N / 2; while (step != 0) { idx = baseIdx; while (idx < step) { c[idx] += c[idx + step]; idx += blockDim.x; } step /= 2; __syncthreads(); } } __global__ void add_1024(long* a, long* b, long* c, long N) { //more simple and probably faster core but works only with 1024 or less elements in vector in this example c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x]; __syncthreads(); long step = N / 2; while (step != 0) { if (threadIdx.x < step) { c[threadIdx.x] += c[threadIdx.x + step]; } step /= 2; __syncthreads(); } } int main(void) { long N; clock_t start_t_gpu, start_t_cpu, end_t_gpu, end_t_cpu; long res_CPU; long* a, * b, * c, * d; // host copies of a, b, c long* d_a, * d_b, * d_c; // device copies of a, b, c int size; for (int i = 0; i < 3; i++) { if (i == 0) N = 512; if (i == 1) N = 1024; if (i == 2) N = 131072; size = N * sizeof(long); std::cout << "Vector size: " << N << std::endl; // Alloc space for device copies of a, b, c hipMalloc((void**)& d_a, size); hipMalloc((void**)& d_b, size); hipMalloc((void**)& d_c, size); // Alloc space for host copies of a, b, c and setup input values a = (long*)malloc(size); random_longs(a, N); b = (long*)malloc(size); random_longs(b, N); c = (long*)malloc(sizeof(long)); d = (long*)malloc(size); // Copy inputs to device hipMemcpy(d_a, a, size, hipMemcpyHostToDevice); hipMemcpy(d_b, b, size, hipMemcpyHostToDevice); // Launch add() kernel on GPU with N blocks if (N <= 1024) { start_t_gpu = clock(); for (int i = 0; i < 10000; i++) { add_1024 << <1, N >> > (d_a, d_b, d_c, N); } end_t_gpu = clock(); hipMemcpy(c, d_c, sizeof(long), hipMemcpyDeviceToHost); // Copy result back to host std::cout << "ScalarMultiplication on GPU (simple core): " << std::endl << "result: " << c[0] << std::endl; std::cout << "time: " << ((double)end_t_gpu - start_t_gpu)/ CLOCKS_PER_SEC << " seconds" << std::endl; } else { start_t_gpu = clock(); for (int i = 0; i < 10000; i++) { add << <1, 1024 >> > (d_a, d_b, d_c, N); } hipMemcpy(c, d_c, sizeof(long), hipMemcpyDeviceToHost); end_t_gpu = clock(); // Copy result back to host std::cout << "ScalarMultiplication on GPU (core from ScalarMultiplication_example1): " << std::endl << "result: " << c[0] << std::endl; std::cout << "time: " << ((double)end_t_gpu - start_t_gpu)/ CLOCKS_PER_SEC << " seconds" << std::endl; } start_t_cpu = clock(); for (int i = 0; i < 10000; i++) //cycle just to see average time { res_CPU = 0; for (long i = 0; i < N; ++i) { res_CPU += a[i] * b[i]; } } end_t_cpu = clock(); std::cout << "ScalarMultiplication on CPU: " << std::endl << "result: " << res_CPU << std::endl; std::cout << "time: " << ((double)end_t_cpu - start_t_cpu)/ CLOCKS_PER_SEC << " seconds" << std::endl << std::endl; // Cleanup free(a); free(b); free(c); hipFree(d_a); hipFree(d_b); hipFree(d_c); } return 0; }
7bcc56a01bb9fb9deb26720cebd9c27e2d4527ca.cu
#include <cuda.h> #include "cuda_runtime.h" #include "cuda_runtime_api.h" #include "device_launch_parameters.h" #include "device_functions.h" #include <iostream> #include <stdio.h> #include <ctime> void random_longs(long* a, int size) //randomizing elements in both vectors { srand(time(0)); int i; for (i = 0; i < size; ++i) { a[i] = rand() % 100; } } __global__ void add(long* a, long* b, long* c, long N) { //core from ScalarMultiplication_example1 long baseIdx = threadIdx.x; long idx = baseIdx; while (idx < N) { c[idx] = a[idx] * b[idx]; idx += blockDim.x; } __syncthreads(); long step = N / 2; while (step != 0) { idx = baseIdx; while (idx < step) { c[idx] += c[idx + step]; idx += blockDim.x; } step /= 2; __syncthreads(); } } __global__ void add_1024(long* a, long* b, long* c, long N) { //more simple and probably faster core but works only with 1024 or less elements in vector in this example c[threadIdx.x] = a[threadIdx.x] * b[threadIdx.x]; __syncthreads(); long step = N / 2; while (step != 0) { if (threadIdx.x < step) { c[threadIdx.x] += c[threadIdx.x + step]; } step /= 2; __syncthreads(); } } int main(void) { long N; clock_t start_t_gpu, start_t_cpu, end_t_gpu, end_t_cpu; long res_CPU; long* a, * b, * c, * d; // host copies of a, b, c long* d_a, * d_b, * d_c; // device copies of a, b, c int size; for (int i = 0; i < 3; i++) { if (i == 0) N = 512; if (i == 1) N = 1024; if (i == 2) N = 131072; size = N * sizeof(long); std::cout << "Vector size: " << N << std::endl; // Alloc space for device copies of a, b, c cudaMalloc((void**)& d_a, size); cudaMalloc((void**)& d_b, size); cudaMalloc((void**)& d_c, size); // Alloc space for host copies of a, b, c and setup input values a = (long*)malloc(size); random_longs(a, N); b = (long*)malloc(size); random_longs(b, N); c = (long*)malloc(sizeof(long)); d = (long*)malloc(size); // Copy inputs to device cudaMemcpy(d_a, a, size, cudaMemcpyHostToDevice); cudaMemcpy(d_b, b, size, cudaMemcpyHostToDevice); // Launch add() kernel on GPU with N blocks if (N <= 1024) { start_t_gpu = clock(); for (int i = 0; i < 10000; i++) { add_1024 << <1, N >> > (d_a, d_b, d_c, N); } end_t_gpu = clock(); cudaMemcpy(c, d_c, sizeof(long), cudaMemcpyDeviceToHost); // Copy result back to host std::cout << "ScalarMultiplication on GPU (simple core): " << std::endl << "result: " << c[0] << std::endl; std::cout << "time: " << ((double)end_t_gpu - start_t_gpu)/ CLOCKS_PER_SEC << " seconds" << std::endl; } else { start_t_gpu = clock(); for (int i = 0; i < 10000; i++) { add << <1, 1024 >> > (d_a, d_b, d_c, N); } cudaMemcpy(c, d_c, sizeof(long), cudaMemcpyDeviceToHost); end_t_gpu = clock(); // Copy result back to host std::cout << "ScalarMultiplication on GPU (core from ScalarMultiplication_example1): " << std::endl << "result: " << c[0] << std::endl; std::cout << "time: " << ((double)end_t_gpu - start_t_gpu)/ CLOCKS_PER_SEC << " seconds" << std::endl; } start_t_cpu = clock(); for (int i = 0; i < 10000; i++) //cycle just to see average time { res_CPU = 0; for (long i = 0; i < N; ++i) { res_CPU += a[i] * b[i]; } } end_t_cpu = clock(); std::cout << "ScalarMultiplication on CPU: " << std::endl << "result: " << res_CPU << std::endl; std::cout << "time: " << ((double)end_t_cpu - start_t_cpu)/ CLOCKS_PER_SEC << " seconds" << std::endl << std::endl; // Cleanup free(a); free(b); free(c); cudaFree(d_a); cudaFree(d_b); cudaFree(d_c); } return 0; }
4a01ea51e6c0a384e36fae080cc1c74b54b81eeb.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> __global__ void MatrixMut(int *A, int *B, int *C, int N) { __shared__ int As[256]; __shared__ int Bs[256]; int tx=threadIdx.x, ty=threadIdx.y; int row=blockDim.x*blockIdx.x+threadIdx.x; int col=blockDim.y*blockIdx.y+threadIdx.y; int temp=0; // 16*16 subblock every time, 64 times, for each time // each thread get 1 element, a block get 256 elements, then sysnchronize // loop 16 multiply and summation, complete 1/64. Sync for next subblock for(int k=0;k<64;k++) { As[tx*16+ty]=A[row*N+k*16+ty]; Bs[tx*16+ty]=B[col+(k*16+tx)*N]; __syncthreads(); for(int x=0;x<16;x++) temp+=As[tx*16+x]*Bs[x*16+ty]; __syncthreads(); } C[row*N+col]=temp; } int main() { const int dim=1<<10; const int size=dim*dim; int *A= (int *)malloc(size*sizeof(int)); int *B= (int *)malloc(size*sizeof(int)); int *C= (int *)malloc(size*sizeof(int)); for (int i = 0; i < size; ++i) { A[i]=1; B[i]=2; } int *d_A=NULL; int *d_B=NULL; int *d_C=NULL; hipMalloc((void**) &d_A,size*sizeof(int)); hipMalloc((void**) &d_B,size*sizeof(int)); hipMalloc((void**) &d_C,size*sizeof(int)); hipMemcpy(d_A,A,size*sizeof(int),hipMemcpyHostToDevice); hipMemcpy(d_B,B,size*sizeof(int),hipMemcpyHostToDevice); dim3 grid_size(64,64); dim3 block_size(16,16); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( MatrixMut), dim3(grid_size),dim3(block_size), 0, 0, d_A,d_B,d_C,dim); hipEventRecord(stop, 0); hipEventSynchronize(stop); float msecTotal = 0.0f; hipEventElapsedTime(&msecTotal, start, stop); printf("Eclapsed time is %f ms \n", msecTotal); hipMemcpy(C,d_C,size*sizeof(int),hipMemcpyDeviceToHost); for (int i = 0; i < size; ++i) { if(C[i]!=dim*2) { printf("Test Failed!\n"); exit(-1); } } printf("Test Passed \n"); free(A); free(B); free(C); hipFree(d_A); hipFree(d_B); hipFree(d_C); return 0; }
4a01ea51e6c0a384e36fae080cc1c74b54b81eeb.cu
#include <stdio.h> #include <cuda_runtime.h> __global__ void MatrixMut(int *A, int *B, int *C, int N) { __shared__ int As[256]; __shared__ int Bs[256]; int tx=threadIdx.x, ty=threadIdx.y; int row=blockDim.x*blockIdx.x+threadIdx.x; int col=blockDim.y*blockIdx.y+threadIdx.y; int temp=0; // 16*16 subblock every time, 64 times, for each time // each thread get 1 element, a block get 256 elements, then sysnchronize // loop 16 multiply and summation, complete 1/64. Sync for next subblock for(int k=0;k<64;k++) { As[tx*16+ty]=A[row*N+k*16+ty]; Bs[tx*16+ty]=B[col+(k*16+tx)*N]; __syncthreads(); for(int x=0;x<16;x++) temp+=As[tx*16+x]*Bs[x*16+ty]; __syncthreads(); } C[row*N+col]=temp; } int main() { const int dim=1<<10; const int size=dim*dim; int *A= (int *)malloc(size*sizeof(int)); int *B= (int *)malloc(size*sizeof(int)); int *C= (int *)malloc(size*sizeof(int)); for (int i = 0; i < size; ++i) { A[i]=1; B[i]=2; } int *d_A=NULL; int *d_B=NULL; int *d_C=NULL; cudaMalloc((void**) &d_A,size*sizeof(int)); cudaMalloc((void**) &d_B,size*sizeof(int)); cudaMalloc((void**) &d_C,size*sizeof(int)); cudaMemcpy(d_A,A,size*sizeof(int),cudaMemcpyHostToDevice); cudaMemcpy(d_B,B,size*sizeof(int),cudaMemcpyHostToDevice); dim3 grid_size(64,64); dim3 block_size(16,16); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); MatrixMut<<<grid_size,block_size>>>(d_A,d_B,d_C,dim); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float msecTotal = 0.0f; cudaEventElapsedTime(&msecTotal, start, stop); printf("Eclapsed time is %f ms \n", msecTotal); cudaMemcpy(C,d_C,size*sizeof(int),cudaMemcpyDeviceToHost); for (int i = 0; i < size; ++i) { if(C[i]!=dim*2) { printf("Test Failed!\n"); exit(-1); } } printf("Test Passed \n"); free(A); free(B); free(C); cudaFree(d_A); cudaFree(d_B); cudaFree(d_C); return 0; }
5987bffeb8a069192395c6389fc5a1cee96f7112.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdlib.h> #include <stdio.h> #define N 4 #define T 2 __global__ void sum_matrix(int**&d_mat_a, int**&dd_mat_a, int n, int m){ //int x = threadIdx.x + blockIdx.x*blockDim.x; //int y = threadIdx.y + blockIdx.y*blockDim.y; //int* temp; //temp = dd_mat_a[0]; //temp[2]=-1; d_mat_a[0][0] = -1; d_mat_a[0][1] = -2; d_mat_a[0][2] = -3; dd_mat_a[0][-1] = -4; //dd_mat_a[0][-2] = -5; //dd_mat_a[0][-3] = -6; } void create(int**&mat,int n){ mat = (int **)malloc(sizeof(int*)*n); int i; for(i=0;i<n;i++){ mat[i] = (int*)malloc(sizeof(int)*n); } } void create2(int** & mat,int n, int m){ mat = (int** )malloc(sizeof(int*)*n); mat[0] = (int* )malloc(sizeof(int)*n*m); int i; for(i=1;i<=n;i++){ mat[i] = (*mat+i*m); } } void fill(int** mat,int n){ int i,j; for(i=0;i<n;i++){ for(j=0;j<n;j++) mat[i][j] = rand()%10; } } void fill_zero(int** mat,int n, int value=0){ int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++) mat[i][j] = value; } void print(int** mat,int n){ int i,j; for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%d",mat[i][j]); printf("\n"); } } /* void create_matrix(int**&mat, int**&h_mat, int**&d_mat, int n, int m){ int size_n=sizeof(int)*n; int size_m=sizeof(int)*m; h_mat = (int**)malloc(size_n); int i; for(i=0;i<n;i++){ printf(">>>>>\n"); hipMalloc((void**)& h_mat[i],size_n); hipMemcpy(h_mat[i],&mat[i][0],size_m,hipMemcpyHostToDevice); } hipMalloc((void*** )& d_mat,size_n); hipMemcpy(d_mat,h_mat,size_n,hipMemcpyHostToDevice); }*/ int main(){ int n = N; int m = N; int** h_mat_a; create2(h_mat_a,n,m); fill_zero(h_mat_a,n); print(h_mat_a,n); printf("//////////////////\n"); int ** d_mat_a; int ** dd_mat_a; int i; /////////////////////// d_mat_a = (int** )malloc(sizeof(int*)*n); hipMalloc((void** )& d_mat_a[0], n*sizeof(int)*m); hipMemcpy(d_mat_a[0],&h_mat_a[0][0], n*sizeof(int)*m,hipMemcpyHostToDevice); for(i=1;i<n;i++){ printf(">wwww>>>>\n"); d_mat_a[i] = &(d_mat_a[0][i*m]); //d_mat_a[i] = (*d_mat_a+i*m); } hipMalloc((void*** )& dd_mat_a,sizeof(int*)*n); hipMemcpy(dd_mat_a,d_mat_a,sizeof(int*)*n,hipMemcpyHostToDevice); /// ////////////////////////// dim3 grid(ceil(N/T),ceil(N/T),1); dim3 blockNum(T,T,1); hipLaunchKernelGGL(( sum_matrix), dim3(grid),dim3(blockNum), 0, 0, d_mat_a,dd_mat_a,n,m); hipMemcpy(h_mat_a[0],&d_mat_a[0][0],n*sizeof(int)*m,hipMemcpyDeviceToHost); printf(">copiado>>>>\n"); printf("///////CCCCCC///////////\n"); print(h_mat_a,n); return 0; }
5987bffeb8a069192395c6389fc5a1cee96f7112.cu
#include <stdlib.h> #include <stdio.h> #define N 4 #define T 2 __global__ void sum_matrix(int**&d_mat_a, int**&dd_mat_a, int n, int m){ //int x = threadIdx.x + blockIdx.x*blockDim.x; //int y = threadIdx.y + blockIdx.y*blockDim.y; //int* temp; //temp = dd_mat_a[0]; //temp[2]=-1; d_mat_a[0][0] = -1; d_mat_a[0][1] = -2; d_mat_a[0][2] = -3; dd_mat_a[0][-1] = -4; //dd_mat_a[0][-2] = -5; //dd_mat_a[0][-3] = -6; } void create(int**&mat,int n){ mat = (int **)malloc(sizeof(int*)*n); int i; for(i=0;i<n;i++){ mat[i] = (int*)malloc(sizeof(int)*n); } } void create2(int** & mat,int n, int m){ mat = (int** )malloc(sizeof(int*)*n); mat[0] = (int* )malloc(sizeof(int)*n*m); int i; for(i=1;i<=n;i++){ mat[i] = (*mat+i*m); } } void fill(int** mat,int n){ int i,j; for(i=0;i<n;i++){ for(j=0;j<n;j++) mat[i][j] = rand()%10; } } void fill_zero(int** mat,int n, int value=0){ int i,j; for(i=0;i<n;i++) for(j=0;j<n;j++) mat[i][j] = value; } void print(int** mat,int n){ int i,j; for(i=0;i<n;i++){ for(j=0;j<n;j++) printf("%d",mat[i][j]); printf("\n"); } } /* void create_matrix(int**&mat, int**&h_mat, int**&d_mat, int n, int m){ int size_n=sizeof(int)*n; int size_m=sizeof(int)*m; h_mat = (int**)malloc(size_n); int i; for(i=0;i<n;i++){ printf(">>>>>\n"); cudaMalloc((void**)& h_mat[i],size_n); cudaMemcpy(h_mat[i],&mat[i][0],size_m,cudaMemcpyHostToDevice); } cudaMalloc((void*** )& d_mat,size_n); cudaMemcpy(d_mat,h_mat,size_n,cudaMemcpyHostToDevice); }*/ int main(){ int n = N; int m = N; int** h_mat_a; create2(h_mat_a,n,m); fill_zero(h_mat_a,n); print(h_mat_a,n); printf("//////////////////\n"); int ** d_mat_a; int ** dd_mat_a; int i; /////////////////////// d_mat_a = (int** )malloc(sizeof(int*)*n); cudaMalloc((void** )& d_mat_a[0], n*sizeof(int)*m); cudaMemcpy(d_mat_a[0],&h_mat_a[0][0], n*sizeof(int)*m,cudaMemcpyHostToDevice); for(i=1;i<n;i++){ printf(">wwww>>>>\n"); d_mat_a[i] = &(d_mat_a[0][i*m]); //d_mat_a[i] = (*d_mat_a+i*m); } cudaMalloc((void*** )& dd_mat_a,sizeof(int*)*n); cudaMemcpy(dd_mat_a,d_mat_a,sizeof(int*)*n,cudaMemcpyHostToDevice); /// ////////////////////////// dim3 grid(ceil(N/T),ceil(N/T),1); dim3 blockNum(T,T,1); sum_matrix<<<grid,blockNum>>>(d_mat_a,dd_mat_a,n,m); cudaMemcpy(h_mat_a[0],&d_mat_a[0][0],n*sizeof(int)*m,cudaMemcpyDeviceToHost); printf(">copiado>>>>\n"); printf("///////CCCCCC///////////\n"); print(h_mat_a,n); return 0; }
c8ad0460d212af26c3a66e0ec20eae39cfaf8020.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Author : Hasindu Gamaarachchi CPA for 128/128 bit SPECK software implementation To derive right half key K2 */ #include <stdio.h> #include "helpers.cuh" #include "data.cuh" #include <stdint.h> //file name for all key-correlation pairs sorted in key order #define FILEALL "all.txt" //file name for all key-correlation pairs sorted using correlation coefficient #define FILEALLSORTED "allsorted.txt" //set 1 if your samples are hexadecimal separated by spaces //set 0 if your samples are hexadecimal with no spaces #define FORMAT 0 //set 0 if need to take fabs(), 1 if negative, 2 if positive #define CORRELATION_SIGN 0 //Change SAMPLES to the number of power traces #define SAMPLES 1000 //Change ALLWAVELEGTH to the number of sampling points you have in a single power trace #define ALLWAVELEN 100000 //Due to memory restrictions on GPU if SAMPLES is large cannot keep all the things at once in memory //In such case of a memory allocation failure reduce WAVELENGTH //But make sure that ALLWAVELENGTH is divisible by WAVELENGTH #define WAVELENGTH 2000 //define for 128/128 Speck #define KEYBYTES 16 #define KEYBYTESPART 4 #define KEYS 65536 //struct used for sorting correlation key pairs struct key_corr{ unsigned int key; double corr; }; __device__ uint16_t hammingweight(uint16_t H){ //byte H=M^R; // Count the number of set bits uint16_t dist=0; while(H){ dist++; H &= H - 1; } return dist; } __device__ uint16_t hamming(unsigned int *sample, unsigned int i,unsigned int n,unsigned int key) { //n is byteno i is the sample byte pt0[8]; copy2(pt0,&sample[i*KEYBYTES]); byte pt1[8]; copy2(pt1,&sample[i*KEYBYTES+8]); byte ans[8]; ROR(ans,pt1,8); copy(pt1,ans); _add(ans,pt1,pt0); copy(pt1,ans); uint16_t answer=(uint16_t)pt1[n*2]<<8|(uint16_t)pt1[n*2+1]; uint16_t inter; if(n<4){ inter= (uint16_t)(answer ^ key); } else{ inter = (uint16_t)(answer ^ key); } uint16_t dist = hammingweight(inter); return dist; } __global__ void maxCorelationkernel(double *corelation,double *wavestat,double *wavestat2,double *hammingstat){ int keyguess=blockDim.y*blockIdx.y+threadIdx.y; int keybyte=blockDim.x*blockIdx.x+threadIdx.x; if (keybyte<KEYBYTESPART && keyguess<KEYS ){ double sigmaH,sigmaH2,sigmaW=0,sigmaW2=0,sigmaWH=0; sigmaH=hammingstat[KEYBYTESPART*keyguess+keybyte]; sigmaH2=hammingstat[KEYS*KEYBYTESPART+KEYBYTESPART*keyguess+keybyte]; double temp_corelation=0;; double corelationmax=0;; unsigned int j; for(j=0;j<WAVELENGTH;j++){ sigmaWH=wavestat2[j*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]; sigmaW=wavestat[j]; sigmaW2=wavestat[WAVELENGTH+j]; double numerator=SAMPLES*sigmaWH - sigmaW*sigmaH; double denominator=sqrt(SAMPLES*sigmaW2 - sigmaW*sigmaW)*sqrt(SAMPLES*sigmaH2 - sigmaH*sigmaH); if(CORRELATION_SIGN==0){ temp_corelation=fabs(numerator/denominator); } else if(CORRELATION_SIGN==1){ temp_corelation=-numerator/denominator; } else if(CORRELATION_SIGN==2){ temp_corelation=numerator/denominator; } else{ temp_corelation=fabs(numerator/denominator); } if(temp_corelation>corelationmax){ corelationmax=temp_corelation; } } if(corelationmax>corelation[keyguess*KEYBYTESPART+keybyte]){ corelation[keyguess*KEYBYTESPART+keybyte]=corelationmax; } } return; } __global__ void wavestatkernel(double *wavedata, double *wavestat,double *wavestat2,byte *hammingArray){ int keyguess=blockDim.y*blockIdx.y+threadIdx.y; int keybyte=blockDim.x*blockIdx.x+threadIdx.x; int wave=blockDim.z*blockIdx.z+threadIdx.z; if (keyguess<KEYS && keybyte<KEYBYTESPART && wave<WAVELENGTH ){ unsigned int i; double sigmaWH=0; for(i=0;i<SAMPLES;i++){ sigmaWH+=wavedata[i*WAVELENGTH+wave]*(double)hammingArray[i*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]; } wavestat2[wave*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte ]=sigmaWH; } if (keyguess==0 && keybyte==0 && wave<WAVELENGTH ){ unsigned int i; double sigmaW=0,sigmaW2=0,W=0; for(i=0;i<SAMPLES;i++){ W=wavedata[i*WAVELENGTH+wave]; sigmaW+=W; sigmaW2+=W*W; } wavestat[wave]=sigmaW; wavestat[WAVELENGTH+wave]=sigmaW2; } return; } __global__ void hammingkernel(unsigned int *sample,byte *hammingArray,double *hammingstat){ int keyguess=blockDim.y*blockIdx.y+threadIdx.y; int keybyte=blockDim.x*blockIdx.x+threadIdx.x; if (keybyte<KEYBYTESPART && keyguess<KEYS ){ double sigmaH=0,sigmaH2=0; byte H; unsigned int i; for(i=0;i<SAMPLES;i++){ H=hamming(sample,i,keybyte,keyguess); hammingArray[i*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]=H; sigmaH+=(double)H; sigmaH2+=(double)H*(double)H; } hammingstat[KEYBYTESPART*keyguess+keybyte]=sigmaH; hammingstat[KEYS*KEYBYTESPART+KEYBYTESPART*keyguess+keybyte]=sigmaH2; } return; } int main(int argc, char *argv[]){ unsigned int i,j; //check args if(argc!=3){ fprintf(stderr,"%s\n", "Not enough args. eg ./cpa wavedata.txt sample.txt"); exit(EXIT_FAILURE); } if(ALLWAVELEN%WAVELENGTH !=0){ fprintf(stderr,"Make sure that ALLWAVELEN is divisible by WAVELEN\n"); exit(1); } //get wave data double *wavedata=(double *)malloc(sizeof(double) * SAMPLES* WAVELENGTH); isMemoryFull(wavedata); //get sample texts unsigned int *sample=(unsigned int *)malloc(sizeof(unsigned int)*SAMPLES*KEYBYTES); isMemoryFull(sample); FILE *file=fopen(argv[2],"r"); isFileOK(file); if(FORMAT==1){ for(i=0; i<SAMPLES ;i++){ for(j=0; j<KEYBYTES; j++){ fscanf(file,"%x",&sample[i*KEYBYTES+j]); } } } else if(FORMAT==0){ char str[100]; for(i=0; i<SAMPLES ;i++){ fscanf(file,"%s",str); for(j=0; j<KEYBYTES; j++){ sscanf(&str[2*j],"%02X",&sample[i*KEYBYTES+j]); } } } else{ fprintf(stderr,"Unknown FORMAT for sample text\n"); exit(1); } fclose(file); //space for corelation double *corelation=(double *)malloc(sizeof(double) * KEYS * KEYBYTESPART); isMemoryFull(corelation); //Time hipEvent_t start,stop; float elapsedtime; hipEventCreate(&start); hipEventRecord(start,0); //cuda arrays and copying double *dev_wavedata; unsigned int *dev_sample; double *dev_corelation,*dev_wavestat,*dev_wavestat2,*dev_hammingstat; byte *dev_hammingArray; checkCudaError(hipMalloc((void**)&dev_wavedata, SAMPLES*WAVELENGTH*sizeof(double))); checkCudaError(hipMalloc((void**)&dev_sample, SAMPLES*KEYBYTES*sizeof(unsigned int))); checkCudaError(hipMalloc((void**)&dev_corelation, KEYS*KEYBYTESPART*sizeof(double))); checkCudaError(hipMalloc((void**)&dev_hammingArray, KEYS*KEYBYTESPART*SAMPLES*sizeof(byte))); checkCudaError(hipMalloc((void**)&dev_wavestat, 2*WAVELENGTH*sizeof(double))); checkCudaError(hipMalloc((void**)&dev_wavestat2, KEYS*KEYBYTESPART*WAVELENGTH*sizeof(double))); checkCudaError(hipMalloc((void**)&dev_hammingstat, 2*KEYS*KEYBYTESPART*sizeof(double))); checkCudaError(hipMemset(dev_corelation,0, KEYS*KEYBYTESPART*sizeof(double))); checkCudaError(hipMemcpy(dev_sample,sample, SAMPLES*KEYBYTES*sizeof(unsigned int),hipMemcpyHostToDevice)); dim3 grid(KEYBYTES/4,KEYS/64); dim3 block(4,64); //findhamming hipLaunchKernelGGL(( hammingkernel), dim3(grid),dim3(block), 0, 0, dev_sample,dev_hammingArray,dev_hammingstat); checkCudaError(hipGetLastError()); int loops=0; for(loops=0;loops<ALLWAVELEN/WAVELENGTH;loops++){ FILE *file=fopen(argv[1],"r"); isFileOK(file); for(i=0; i<SAMPLES ;i++){ unsigned int k=0; for(j=0; j<ALLWAVELEN; j++){ float dat; fscanf(file,"%f",&dat); if(j<WAVELENGTH*(loops+1) && j>=WAVELENGTH*loops){ wavedata[i*WAVELENGTH+k]=(double)dat; k++; } } } fclose(file); checkCudaError(hipMemcpy(dev_wavedata,wavedata,SAMPLES*WAVELENGTH*sizeof(double),hipMemcpyHostToDevice)); dim3 block3d(4,32,4); dim3 grid3d(KEYBYTESPART/4,KEYS/32,WAVELENGTH/4); //find wave stats hipLaunchKernelGGL(( wavestatkernel), dim3(grid3d),dim3(block3d), 0, 0, dev_wavedata,dev_wavestat,dev_wavestat2,dev_hammingArray); checkCudaError(hipGetLastError()); //deploy double hipLaunchKernelGGL(( maxCorelationkernel), dim3(grid),dim3(block), 0, 0, dev_corelation,dev_wavestat,dev_wavestat2,dev_hammingstat); checkCudaError(hipGetLastError()); //progress fprintf(stderr,"%d of %d completed\n",loops+1,ALLWAVELEN/WAVELENGTH); } //copy back checkCudaError(hipMemcpy(corelation,dev_corelation,KEYS*KEYBYTESPART*sizeof(double),hipMemcpyDeviceToHost)); checkCudaError(hipFree(dev_wavedata)); checkCudaError(hipFree(dev_sample)); checkCudaError(hipFree(dev_corelation)); checkCudaError(hipFree(dev_wavestat)); checkCudaError(hipFree(dev_wavestat2)); checkCudaError(hipFree(dev_hammingstat)); checkCudaError(hipFree(dev_hammingArray)); //Time hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedtime,start,stop); fprintf(stderr,"Time spent for CUDA operation : %.10f\n",elapsedtime/(float)1000); //form struct array struct key_corr key_corrpairs[KEYS][KEYBYTESPART]; //print all information while putting to structs file=fopen(FILEALL,"w"); for (i=0;i<KEYS;i++){ for(j=0;j<KEYBYTESPART;j++){ key_corrpairs[i][j].key=i; key_corrpairs[i][j].corr=corelation[i*KEYBYTESPART+j]; fprintf(file,"%.4X : %f\t",i,corelation[i*KEYBYTESPART+j]); } fprintf(file,"\n"); } int k; //sort using insertion sort for (j=0;j<KEYBYTESPART;j++){ for (i=1;i<KEYS;i++){ double corr=key_corrpairs[i][j].corr; unsigned int key=key_corrpairs[i][j].key; for (k=(int)(i-1);k>=0 && corr>key_corrpairs[k][j].corr;k--){ key_corrpairs[k+1][j].corr=key_corrpairs[k][j].corr; key_corrpairs[k+1][j].key=key_corrpairs[k][j].key; } key_corrpairs[k+1][j].key=key; key_corrpairs[k+1][j].corr=corr; } } //print all in ascending order file=fopen(FILEALLSORTED,"w"); for (i=0;i<KEYS;i++){ for(j=0;j<KEYBYTESPART;j++){ fprintf(file,"%.4X : %f\t",key_corrpairs[i][j].key,key_corrpairs[i][j].corr); } fprintf(file,"\n"); } //print the best five to the stdout for (i=0;i<5;i++){ for(j=0;j<KEYBYTESPART;j++){ printf("%.4X\t\t",key_corrpairs[i][j].key); } printf("\n"); for(j=0;j<KEYBYTESPART;j++){ printf("%f\t",key_corrpairs[i][j].corr); } printf("\n\n"); } return 0; }
c8ad0460d212af26c3a66e0ec20eae39cfaf8020.cu
/* Author : Hasindu Gamaarachchi CPA for 128/128 bit SPECK software implementation To derive right half key K2 */ #include <stdio.h> #include "helpers.cuh" #include "data.cuh" #include <stdint.h> //file name for all key-correlation pairs sorted in key order #define FILEALL "all.txt" //file name for all key-correlation pairs sorted using correlation coefficient #define FILEALLSORTED "allsorted.txt" //set 1 if your samples are hexadecimal separated by spaces //set 0 if your samples are hexadecimal with no spaces #define FORMAT 0 //set 0 if need to take fabs(), 1 if negative, 2 if positive #define CORRELATION_SIGN 0 //Change SAMPLES to the number of power traces #define SAMPLES 1000 //Change ALLWAVELEGTH to the number of sampling points you have in a single power trace #define ALLWAVELEN 100000 //Due to memory restrictions on GPU if SAMPLES is large cannot keep all the things at once in memory //In such case of a memory allocation failure reduce WAVELENGTH //But make sure that ALLWAVELENGTH is divisible by WAVELENGTH #define WAVELENGTH 2000 //define for 128/128 Speck #define KEYBYTES 16 #define KEYBYTESPART 4 #define KEYS 65536 //struct used for sorting correlation key pairs struct key_corr{ unsigned int key; double corr; }; __device__ uint16_t hammingweight(uint16_t H){ //byte H=M^R; // Count the number of set bits uint16_t dist=0; while(H){ dist++; H &= H - 1; } return dist; } __device__ uint16_t hamming(unsigned int *sample, unsigned int i,unsigned int n,unsigned int key) { //n is byteno i is the sample byte pt0[8]; copy2(pt0,&sample[i*KEYBYTES]); byte pt1[8]; copy2(pt1,&sample[i*KEYBYTES+8]); byte ans[8]; ROR(ans,pt1,8); copy(pt1,ans); _add(ans,pt1,pt0); copy(pt1,ans); uint16_t answer=(uint16_t)pt1[n*2]<<8|(uint16_t)pt1[n*2+1]; uint16_t inter; if(n<4){ inter= (uint16_t)(answer ^ key); } else{ inter = (uint16_t)(answer ^ key); } uint16_t dist = hammingweight(inter); return dist; } __global__ void maxCorelationkernel(double *corelation,double *wavestat,double *wavestat2,double *hammingstat){ int keyguess=blockDim.y*blockIdx.y+threadIdx.y; int keybyte=blockDim.x*blockIdx.x+threadIdx.x; if (keybyte<KEYBYTESPART && keyguess<KEYS ){ double sigmaH,sigmaH2,sigmaW=0,sigmaW2=0,sigmaWH=0; sigmaH=hammingstat[KEYBYTESPART*keyguess+keybyte]; sigmaH2=hammingstat[KEYS*KEYBYTESPART+KEYBYTESPART*keyguess+keybyte]; double temp_corelation=0;; double corelationmax=0;; unsigned int j; for(j=0;j<WAVELENGTH;j++){ sigmaWH=wavestat2[j*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]; sigmaW=wavestat[j]; sigmaW2=wavestat[WAVELENGTH+j]; double numerator=SAMPLES*sigmaWH - sigmaW*sigmaH; double denominator=sqrt(SAMPLES*sigmaW2 - sigmaW*sigmaW)*sqrt(SAMPLES*sigmaH2 - sigmaH*sigmaH); if(CORRELATION_SIGN==0){ temp_corelation=fabs(numerator/denominator); } else if(CORRELATION_SIGN==1){ temp_corelation=-numerator/denominator; } else if(CORRELATION_SIGN==2){ temp_corelation=numerator/denominator; } else{ temp_corelation=fabs(numerator/denominator); } if(temp_corelation>corelationmax){ corelationmax=temp_corelation; } } if(corelationmax>corelation[keyguess*KEYBYTESPART+keybyte]){ corelation[keyguess*KEYBYTESPART+keybyte]=corelationmax; } } return; } __global__ void wavestatkernel(double *wavedata, double *wavestat,double *wavestat2,byte *hammingArray){ int keyguess=blockDim.y*blockIdx.y+threadIdx.y; int keybyte=blockDim.x*blockIdx.x+threadIdx.x; int wave=blockDim.z*blockIdx.z+threadIdx.z; if (keyguess<KEYS && keybyte<KEYBYTESPART && wave<WAVELENGTH ){ unsigned int i; double sigmaWH=0; for(i=0;i<SAMPLES;i++){ sigmaWH+=wavedata[i*WAVELENGTH+wave]*(double)hammingArray[i*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]; } wavestat2[wave*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte ]=sigmaWH; } if (keyguess==0 && keybyte==0 && wave<WAVELENGTH ){ unsigned int i; double sigmaW=0,sigmaW2=0,W=0; for(i=0;i<SAMPLES;i++){ W=wavedata[i*WAVELENGTH+wave]; sigmaW+=W; sigmaW2+=W*W; } wavestat[wave]=sigmaW; wavestat[WAVELENGTH+wave]=sigmaW2; } return; } __global__ void hammingkernel(unsigned int *sample,byte *hammingArray,double *hammingstat){ int keyguess=blockDim.y*blockIdx.y+threadIdx.y; int keybyte=blockDim.x*blockIdx.x+threadIdx.x; if (keybyte<KEYBYTESPART && keyguess<KEYS ){ double sigmaH=0,sigmaH2=0; byte H; unsigned int i; for(i=0;i<SAMPLES;i++){ H=hamming(sample,i,keybyte,keyguess); hammingArray[i*KEYS*KEYBYTESPART + keyguess*KEYBYTESPART + keybyte]=H; sigmaH+=(double)H; sigmaH2+=(double)H*(double)H; } hammingstat[KEYBYTESPART*keyguess+keybyte]=sigmaH; hammingstat[KEYS*KEYBYTESPART+KEYBYTESPART*keyguess+keybyte]=sigmaH2; } return; } int main(int argc, char *argv[]){ unsigned int i,j; //check args if(argc!=3){ fprintf(stderr,"%s\n", "Not enough args. eg ./cpa wavedata.txt sample.txt"); exit(EXIT_FAILURE); } if(ALLWAVELEN%WAVELENGTH !=0){ fprintf(stderr,"Make sure that ALLWAVELEN is divisible by WAVELEN\n"); exit(1); } //get wave data double *wavedata=(double *)malloc(sizeof(double) * SAMPLES* WAVELENGTH); isMemoryFull(wavedata); //get sample texts unsigned int *sample=(unsigned int *)malloc(sizeof(unsigned int)*SAMPLES*KEYBYTES); isMemoryFull(sample); FILE *file=fopen(argv[2],"r"); isFileOK(file); if(FORMAT==1){ for(i=0; i<SAMPLES ;i++){ for(j=0; j<KEYBYTES; j++){ fscanf(file,"%x",&sample[i*KEYBYTES+j]); } } } else if(FORMAT==0){ char str[100]; for(i=0; i<SAMPLES ;i++){ fscanf(file,"%s",str); for(j=0; j<KEYBYTES; j++){ sscanf(&str[2*j],"%02X",&sample[i*KEYBYTES+j]); } } } else{ fprintf(stderr,"Unknown FORMAT for sample text\n"); exit(1); } fclose(file); //space for corelation double *corelation=(double *)malloc(sizeof(double) * KEYS * KEYBYTESPART); isMemoryFull(corelation); //Time cudaEvent_t start,stop; float elapsedtime; cudaEventCreate(&start); cudaEventRecord(start,0); //cuda arrays and copying double *dev_wavedata; unsigned int *dev_sample; double *dev_corelation,*dev_wavestat,*dev_wavestat2,*dev_hammingstat; byte *dev_hammingArray; checkCudaError(cudaMalloc((void**)&dev_wavedata, SAMPLES*WAVELENGTH*sizeof(double))); checkCudaError(cudaMalloc((void**)&dev_sample, SAMPLES*KEYBYTES*sizeof(unsigned int))); checkCudaError(cudaMalloc((void**)&dev_corelation, KEYS*KEYBYTESPART*sizeof(double))); checkCudaError(cudaMalloc((void**)&dev_hammingArray, KEYS*KEYBYTESPART*SAMPLES*sizeof(byte))); checkCudaError(cudaMalloc((void**)&dev_wavestat, 2*WAVELENGTH*sizeof(double))); checkCudaError(cudaMalloc((void**)&dev_wavestat2, KEYS*KEYBYTESPART*WAVELENGTH*sizeof(double))); checkCudaError(cudaMalloc((void**)&dev_hammingstat, 2*KEYS*KEYBYTESPART*sizeof(double))); checkCudaError(cudaMemset(dev_corelation,0, KEYS*KEYBYTESPART*sizeof(double))); checkCudaError(cudaMemcpy(dev_sample,sample, SAMPLES*KEYBYTES*sizeof(unsigned int),cudaMemcpyHostToDevice)); dim3 grid(KEYBYTES/4,KEYS/64); dim3 block(4,64); //findhamming hammingkernel<<<grid,block>>>(dev_sample,dev_hammingArray,dev_hammingstat); checkCudaError(cudaGetLastError()); int loops=0; for(loops=0;loops<ALLWAVELEN/WAVELENGTH;loops++){ FILE *file=fopen(argv[1],"r"); isFileOK(file); for(i=0; i<SAMPLES ;i++){ unsigned int k=0; for(j=0; j<ALLWAVELEN; j++){ float dat; fscanf(file,"%f",&dat); if(j<WAVELENGTH*(loops+1) && j>=WAVELENGTH*loops){ wavedata[i*WAVELENGTH+k]=(double)dat; k++; } } } fclose(file); checkCudaError(cudaMemcpy(dev_wavedata,wavedata,SAMPLES*WAVELENGTH*sizeof(double),cudaMemcpyHostToDevice)); dim3 block3d(4,32,4); dim3 grid3d(KEYBYTESPART/4,KEYS/32,WAVELENGTH/4); //find wave stats wavestatkernel<<<grid3d,block3d>>>(dev_wavedata,dev_wavestat,dev_wavestat2,dev_hammingArray); checkCudaError(cudaGetLastError()); //deploy double maxCorelationkernel<<<grid,block>>>(dev_corelation,dev_wavestat,dev_wavestat2,dev_hammingstat); checkCudaError(cudaGetLastError()); //progress fprintf(stderr,"%d of %d completed\n",loops+1,ALLWAVELEN/WAVELENGTH); } //copy back checkCudaError(cudaMemcpy(corelation,dev_corelation,KEYS*KEYBYTESPART*sizeof(double),cudaMemcpyDeviceToHost)); checkCudaError(cudaFree(dev_wavedata)); checkCudaError(cudaFree(dev_sample)); checkCudaError(cudaFree(dev_corelation)); checkCudaError(cudaFree(dev_wavestat)); checkCudaError(cudaFree(dev_wavestat2)); checkCudaError(cudaFree(dev_hammingstat)); checkCudaError(cudaFree(dev_hammingArray)); //Time cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedtime,start,stop); fprintf(stderr,"Time spent for CUDA operation : %.10f\n",elapsedtime/(float)1000); //form struct array struct key_corr key_corrpairs[KEYS][KEYBYTESPART]; //print all information while putting to structs file=fopen(FILEALL,"w"); for (i=0;i<KEYS;i++){ for(j=0;j<KEYBYTESPART;j++){ key_corrpairs[i][j].key=i; key_corrpairs[i][j].corr=corelation[i*KEYBYTESPART+j]; fprintf(file,"%.4X : %f\t",i,corelation[i*KEYBYTESPART+j]); } fprintf(file,"\n"); } int k; //sort using insertion sort for (j=0;j<KEYBYTESPART;j++){ for (i=1;i<KEYS;i++){ double corr=key_corrpairs[i][j].corr; unsigned int key=key_corrpairs[i][j].key; for (k=(int)(i-1);k>=0 && corr>key_corrpairs[k][j].corr;k--){ key_corrpairs[k+1][j].corr=key_corrpairs[k][j].corr; key_corrpairs[k+1][j].key=key_corrpairs[k][j].key; } key_corrpairs[k+1][j].key=key; key_corrpairs[k+1][j].corr=corr; } } //print all in ascending order file=fopen(FILEALLSORTED,"w"); for (i=0;i<KEYS;i++){ for(j=0;j<KEYBYTESPART;j++){ fprintf(file,"%.4X : %f\t",key_corrpairs[i][j].key,key_corrpairs[i][j].corr); } fprintf(file,"\n"); } //print the best five to the stdout for (i=0;i<5;i++){ for(j=0;j<KEYBYTESPART;j++){ printf("%.4X\t\t",key_corrpairs[i][j].key); } printf("\n"); for(j=0;j<KEYBYTESPART;j++){ printf("%f\t",key_corrpairs[i][j].corr); } printf("\n\n"); } return 0; }
63abb1f5ea0346a3255c2289d7ccc40aa8e118d0.hip
// !!! This is a file automatically generated by hipify!!! #include "Grid.h" #include <iostream> #include <iomanip> #include <ostream> #include <sstream> #include <fstream> using std::setw; using std::setprecision; using std::endl; using std::cout; //construct the grid - set dimensions and allocate memory on host and device Grid::Grid(int gx, int gy) : gx_(gx), gy_(gy) { //resize and set ICs hGrid_.resize(gx_ * gy_); hipError_t err = hipMalloc(&dGrid_, gx_ * gy_ * sizeof(float)); if (err != hipSuccess) { std::cerr << "Could not allocate memory for Grid!" << std::endl; exit(1); } } //host side memory will automatically freed in vector destructor //we must manually free the device memory Grid::~Grid() { hipFree(dGrid_); } //copy constructor, copy both the host contents and gpu contents Grid::Grid(const Grid &other) : gx_(other.gx()), gy_(other.gy()), hGrid_(other.hGrid_) { hipError_t err = hipMalloc(&dGrid_, gx_ * gy_ * sizeof(float)); if (err != hipSuccess) { std::cerr << "Error allocating Grid!" << std::endl; exit(1); } hipMemcpy(dGrid_, other.dGrid_, gx_ * gy_ * sizeof(float), hipMemcpyDeviceToDevice); } //copy from host -> device void Grid::toGPU() { hipError_t err = hipMemcpy(dGrid_, &hGrid_[0], gx_ * gy_ * sizeof(float), hipMemcpyHostToDevice); if (err != hipSuccess) { std::cerr << "Error copying grid to GPU" << std::endl; exit(1); } } //copy from device -> host void Grid::fromGPU() { hipError_t err = hipMemcpy(&hGrid_[0], dGrid_, gx_ * gy_ * sizeof(float), hipMemcpyDeviceToHost); if (err != hipSuccess) { std::cerr << "Error copying grid from GPU" << std::endl; exit(1); } } //swap two grids by exchanging pointers //host and device pointers //std::vector does this under the hood with a specialized version of swap void Grid::swap(Grid &a, Grid &b) { std::swap(a.hGrid_, b.hGrid_); std::swap(a.dGrid_, b.dGrid_); } //save the host grid to a file for debugging / visualization void Grid::saveStateToFile(const std::string &identifier) { std::stringstream ss; ss << "grid" << "_" << identifier << ".txt"; std::ofstream ofs(ss.str().c_str()); ofs << *this << std::endl; ofs.close(); } std::ostream& operator<<(std::ostream& os, const Grid &grid) { os << setprecision(7); for (int y = 0; y < grid.gy(); ++y) { for (int x = 0; x < grid.gx(); x++) { os << setw(5) << x << " " << setw(5) << y << " " << setw(15) << grid.hGrid_[x + grid.gx() * y] << endl; } os << endl; } os << endl; return os; }
63abb1f5ea0346a3255c2289d7ccc40aa8e118d0.cu
#include "Grid.h" #include <iostream> #include <iomanip> #include <ostream> #include <sstream> #include <fstream> using std::setw; using std::setprecision; using std::endl; using std::cout; //construct the grid - set dimensions and allocate memory on host and device Grid::Grid(int gx, int gy) : gx_(gx), gy_(gy) { //resize and set ICs hGrid_.resize(gx_ * gy_); cudaError_t err = cudaMalloc(&dGrid_, gx_ * gy_ * sizeof(float)); if (err != cudaSuccess) { std::cerr << "Could not allocate memory for Grid!" << std::endl; exit(1); } } //host side memory will automatically freed in vector destructor //we must manually free the device memory Grid::~Grid() { cudaFree(dGrid_); } //copy constructor, copy both the host contents and gpu contents Grid::Grid(const Grid &other) : gx_(other.gx()), gy_(other.gy()), hGrid_(other.hGrid_) { cudaError_t err = cudaMalloc(&dGrid_, gx_ * gy_ * sizeof(float)); if (err != cudaSuccess) { std::cerr << "Error allocating Grid!" << std::endl; exit(1); } cudaMemcpy(dGrid_, other.dGrid_, gx_ * gy_ * sizeof(float), cudaMemcpyDeviceToDevice); } //copy from host -> device void Grid::toGPU() { cudaError_t err = cudaMemcpy(dGrid_, &hGrid_[0], gx_ * gy_ * sizeof(float), cudaMemcpyHostToDevice); if (err != cudaSuccess) { std::cerr << "Error copying grid to GPU" << std::endl; exit(1); } } //copy from device -> host void Grid::fromGPU() { cudaError_t err = cudaMemcpy(&hGrid_[0], dGrid_, gx_ * gy_ * sizeof(float), cudaMemcpyDeviceToHost); if (err != cudaSuccess) { std::cerr << "Error copying grid from GPU" << std::endl; exit(1); } } //swap two grids by exchanging pointers //host and device pointers //std::vector does this under the hood with a specialized version of swap void Grid::swap(Grid &a, Grid &b) { std::swap(a.hGrid_, b.hGrid_); std::swap(a.dGrid_, b.dGrid_); } //save the host grid to a file for debugging / visualization void Grid::saveStateToFile(const std::string &identifier) { std::stringstream ss; ss << "grid" << "_" << identifier << ".txt"; std::ofstream ofs(ss.str().c_str()); ofs << *this << std::endl; ofs.close(); } std::ostream& operator<<(std::ostream& os, const Grid &grid) { os << setprecision(7); for (int y = 0; y < grid.gy(); ++y) { for (int x = 0; x < grid.gx(); x++) { os << setw(5) << x << " " << setw(5) << y << " " << setw(15) << grid.hGrid_[x + grid.gx() * y] << endl; } os << endl; } os << endl; return os; }
fa83ae2458678e156e452eceefde7363bf04a021.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // RUN: echo "GPU binary would be here" > %t // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-include-gpubinary %t -o - \ // RUN: | FileCheck %s --check-prefixes=ALL,NORDC,CUDA,CUDANORDC // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-include-gpubinary %t -o - -DNOGLOBALS \ // RUN: | FileCheck %s -check-prefixes=NOGLOBALS,CUDANOGLOBALS // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-rdc -fcuda-include-gpubinary %t -o - \ // RUN: | FileCheck %s --check-prefixes=ALL,RDC,CUDA,CUDARDC // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=NOGPUBIN // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-include-gpubinary %t -o - -x hip\ // RUN: | FileCheck %s --check-prefixes=ALL,NORDC,HIP // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-include-gpubinary %t -o - -DNOGLOBALS -x hip \ // RUN: | FileCheck %s -check-prefixes=NOGLOBALS,HIPNOGLOBALS // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-rdc -fcuda-include-gpubinary %t -o - -x hip \ // RUN: | FileCheck %s --check-prefixes=ALL,RDC,HIP,HIPRDC // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - -x hip\ // RUN: | FileCheck %s -check-prefix=NOGPUBIN #include "Inputs/cuda.h" #ifndef NOGLOBALS // ALL-DAG: @device_var = internal global i32 __device__ int device_var; // ALL-DAG: @constant_var = internal global i32 __constant__ int constant_var; // ALL-DAG: @shared_var = internal global i32 __shared__ int shared_var; // Make sure host globals don't get internalized... // ALL-DAG: @host_var = global i32 int host_var; // ... and that extern vars remain external. // ALL-DAG: @ext_host_var = external global i32 extern int ext_host_var; // Shadows for external device-side variables are *definitions* of // those variables. // ALL-DAG: @ext_device_var = internal global i32 extern __device__ int ext_device_var; // ALL-DAG: @ext_device_var = internal global i32 extern __constant__ int ext_constant_var; void use_pointers() { int *p; p = &device_var; p = &constant_var; p = &shared_var; p = &host_var; p = &ext_device_var; p = &ext_constant_var; p = &ext_host_var; } // Make sure that all parts of GPU code init/cleanup are there: // * constant unnamed string with the kernel name // ALL: private unnamed_addr constant{{.*}}kernelfunc{{.*}}\00" // * constant unnamed string with GPU binary // HIP: @[[FATBIN:__hip_fatbin]] = external constant i8, section ".hip_fatbin" // CUDA: @[[FATBIN:.*]] = private constant{{.*GPU binary would be here.*}}\00", // CUDANORDC-SAME: section ".nv_fatbin", align 8 // CUDARDC-SAME: section "__nv_relfatbin", align 8 // * constant struct that wraps GPU binary // ALL: @__[[PREFIX:cuda|hip]]_fatbin_wrapper = internal constant // ALL-SAME: { i32, i32, i8*, i8* } // CUDA-SAME: { i32 1180844977, i32 1, // HIP-SAME: { i32 1212764230, i32 1, // CUDA-SAME: i8* getelementptr inbounds ({{.*}}@[[FATBIN]], i64 0, i64 0), // HIP-SAME: i8* @[[FATBIN]], // ALL-SAME: i8* null } // CUDA-SAME: section ".nvFatBinSegment" // HIP-SAME: section ".hipFatBinSegment" // * variable to save GPU binary handle after initialization // NORDC: @__[[PREFIX]]_gpubin_handle = internal global i8** null // * constant unnamed string with NVModuleID // RDC: [[MODULE_ID_GLOBAL:@.*]] = private constant // CUDARDC-SAME: c"[[MODULE_ID:.+]]\00", section "__nv_module_id", align 32 // HIPRDC-SAME: c"[[MODULE_ID:.+]]\00", section "__hip_module_id", align 32 // * Make sure our constructor was added to global ctor list. // ALL: @llvm.global_ctors = appending global {{.*}}@__[[PREFIX]]_module_ctor // * Alias to global symbol containing the NVModuleID. // RDC: @__fatbinwrap[[MODULE_ID]] = alias { i32, i32, i8*, i8* } // RDC-SAME: { i32, i32, i8*, i8* }* @__[[PREFIX]]_fatbin_wrapper // Test that we build the correct number of calls to hipSetupArgument followed // by a call to hipLaunch. // ALL: define{{.*}}kernelfunc // ALL: call{{.*}}[[PREFIX]]SetupArgument // ALL: call{{.*}}[[PREFIX]]SetupArgument // ALL: call{{.*}}[[PREFIX]]SetupArgument // ALL: call{{.*}}[[PREFIX]]Launch __global__ void kernelfunc(int i, int j, int k) {} // Test that we've built correct kernel launch sequence. // ALL: define{{.*}}hostfunc // ALL: call{{.*}}[[PREFIX]]ConfigureCall // ALL: call{{.*}}kernelfunc void hostfunc(void) {hipLaunchKernelGGL(( kernelfunc), dim3(1), dim3(1), 0, 0, 1, 1, 1); } #endif // Test that we've built a function to register kernels and global vars. // ALL: define internal void @__[[PREFIX]]_register_globals // ALL: call{{.*}}[[PREFIX]]RegisterFunction(i8** %0, {{.*}}kernelfunc // ALL-DAG: call{{.*}}[[PREFIX]]RegisterVar(i8** %0, {{.*}}device_var{{.*}}i32 0, i32 4, i32 0, i32 0 // ALL-DAG: call{{.*}}[[PREFIX]]RegisterVar(i8** %0, {{.*}}constant_var{{.*}}i32 0, i32 4, i32 1, i32 0 // ALL-DAG: call{{.*}}[[PREFIX]]RegisterVar(i8** %0, {{.*}}ext_device_var{{.*}}i32 1, i32 4, i32 0, i32 0 // ALL-DAG: call{{.*}}[[PREFIX]]RegisterVar(i8** %0, {{.*}}ext_constant_var{{.*}}i32 1, i32 4, i32 1, i32 0 // ALL: ret void // Test that we've built a constructor. // ALL: define internal void @__[[PREFIX]]_module_ctor // In separate mode it calls __[[PREFIX]]RegisterFatBinary(&__[[PREFIX]]_fatbin_wrapper) // NORDC: call{{.*}}[[PREFIX]]RegisterFatBinary{{.*}}__[[PREFIX]]_fatbin_wrapper // .. stores return value in __[[PREFIX]]_gpubin_handle // NORDC-NEXT: store{{.*}}__[[PREFIX]]_gpubin_handle // .. and then calls __[[PREFIX]]_register_globals // NORDC-NEXT: call void @__[[PREFIX]]_register_globals // * In separate mode we also register a destructor. // NORDC-NEXT: call i32 @atexit(void (i8*)* @__[[PREFIX]]_module_dtor) // With relocatable device code we call __[[PREFIX]]RegisterLinkedBinary%NVModuleID% // RDC: call{{.*}}__[[PREFIX]]RegisterLinkedBinary[[MODULE_ID]]( // RDC-SAME: __[[PREFIX]]_register_globals, {{.*}}__[[PREFIX]]_fatbin_wrapper // RDC-SAME: [[MODULE_ID_GLOBAL]] // Test that we've created destructor. // NORDC: define internal void @__[[PREFIX]]_module_dtor // NORDC: load{{.*}}__[[PREFIX]]_gpubin_handle // NORDC-NEXT: call void @__[[PREFIX]]UnregisterFatBinary // There should be no __[[PREFIX]]_register_globals if we have no // device-side globals, but we still need to register GPU binary. // Skip GPU binary string first. // CUDANOGLOBALS: @{{.*}} = private constant{{.*}} // HIPNOGLOBALS: @{{.*}} = external constant{{.*}} // NOGLOBALS-NOT: define internal void @__{{.*}}_register_globals // NOGLOBALS: define internal void @__[[PREFIX:cuda|hip]]_module_ctor // NOGLOBALS: call{{.*}}[[PREFIX]]RegisterFatBinary{{.*}}__[[PREFIX]]_fatbin_wrapper // NOGLOBALS-NOT: call void @__[[PREFIX]]_register_globals // NOGLOBALS: define internal void @__[[PREFIX]]_module_dtor // NOGLOBALS: call void @__[[PREFIX]]UnregisterFatBinary // There should be no constructors/destructors if we have no GPU binary. // NOGPUBIN-NOT: define internal void @__[[PREFIX]]_register_globals // NOGPUBIN-NOT: define internal void @__[[PREFIX]]_module_ctor // NOGPUBIN-NOT: define internal void @__[[PREFIX]]_module_dtor
fa83ae2458678e156e452eceefde7363bf04a021.cu
// RUN: echo "GPU binary would be here" > %t // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-include-gpubinary %t -o - \ // RUN: | FileCheck %s --check-prefixes=ALL,NORDC,CUDA,CUDANORDC // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-include-gpubinary %t -o - -DNOGLOBALS \ // RUN: | FileCheck %s -check-prefixes=NOGLOBALS,CUDANOGLOBALS // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-rdc -fcuda-include-gpubinary %t -o - \ // RUN: | FileCheck %s --check-prefixes=ALL,RDC,CUDA,CUDARDC // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - \ // RUN: | FileCheck %s -check-prefix=NOGPUBIN // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-include-gpubinary %t -o - -x hip\ // RUN: | FileCheck %s --check-prefixes=ALL,NORDC,HIP // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-include-gpubinary %t -o - -DNOGLOBALS -x hip \ // RUN: | FileCheck %s -check-prefixes=NOGLOBALS,HIPNOGLOBALS // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s \ // RUN: -fcuda-rdc -fcuda-include-gpubinary %t -o - -x hip \ // RUN: | FileCheck %s --check-prefixes=ALL,RDC,HIP,HIPRDC // RUN: %clang_cc1 -triple x86_64-linux-gnu -emit-llvm %s -o - -x hip\ // RUN: | FileCheck %s -check-prefix=NOGPUBIN #include "Inputs/cuda.h" #ifndef NOGLOBALS // ALL-DAG: @device_var = internal global i32 __device__ int device_var; // ALL-DAG: @constant_var = internal global i32 __constant__ int constant_var; // ALL-DAG: @shared_var = internal global i32 __shared__ int shared_var; // Make sure host globals don't get internalized... // ALL-DAG: @host_var = global i32 int host_var; // ... and that extern vars remain external. // ALL-DAG: @ext_host_var = external global i32 extern int ext_host_var; // Shadows for external device-side variables are *definitions* of // those variables. // ALL-DAG: @ext_device_var = internal global i32 extern __device__ int ext_device_var; // ALL-DAG: @ext_device_var = internal global i32 extern __constant__ int ext_constant_var; void use_pointers() { int *p; p = &device_var; p = &constant_var; p = &shared_var; p = &host_var; p = &ext_device_var; p = &ext_constant_var; p = &ext_host_var; } // Make sure that all parts of GPU code init/cleanup are there: // * constant unnamed string with the kernel name // ALL: private unnamed_addr constant{{.*}}kernelfunc{{.*}}\00" // * constant unnamed string with GPU binary // HIP: @[[FATBIN:__hip_fatbin]] = external constant i8, section ".hip_fatbin" // CUDA: @[[FATBIN:.*]] = private constant{{.*GPU binary would be here.*}}\00", // CUDANORDC-SAME: section ".nv_fatbin", align 8 // CUDARDC-SAME: section "__nv_relfatbin", align 8 // * constant struct that wraps GPU binary // ALL: @__[[PREFIX:cuda|hip]]_fatbin_wrapper = internal constant // ALL-SAME: { i32, i32, i8*, i8* } // CUDA-SAME: { i32 1180844977, i32 1, // HIP-SAME: { i32 1212764230, i32 1, // CUDA-SAME: i8* getelementptr inbounds ({{.*}}@[[FATBIN]], i64 0, i64 0), // HIP-SAME: i8* @[[FATBIN]], // ALL-SAME: i8* null } // CUDA-SAME: section ".nvFatBinSegment" // HIP-SAME: section ".hipFatBinSegment" // * variable to save GPU binary handle after initialization // NORDC: @__[[PREFIX]]_gpubin_handle = internal global i8** null // * constant unnamed string with NVModuleID // RDC: [[MODULE_ID_GLOBAL:@.*]] = private constant // CUDARDC-SAME: c"[[MODULE_ID:.+]]\00", section "__nv_module_id", align 32 // HIPRDC-SAME: c"[[MODULE_ID:.+]]\00", section "__hip_module_id", align 32 // * Make sure our constructor was added to global ctor list. // ALL: @llvm.global_ctors = appending global {{.*}}@__[[PREFIX]]_module_ctor // * Alias to global symbol containing the NVModuleID. // RDC: @__fatbinwrap[[MODULE_ID]] = alias { i32, i32, i8*, i8* } // RDC-SAME: { i32, i32, i8*, i8* }* @__[[PREFIX]]_fatbin_wrapper // Test that we build the correct number of calls to cudaSetupArgument followed // by a call to cudaLaunch. // ALL: define{{.*}}kernelfunc // ALL: call{{.*}}[[PREFIX]]SetupArgument // ALL: call{{.*}}[[PREFIX]]SetupArgument // ALL: call{{.*}}[[PREFIX]]SetupArgument // ALL: call{{.*}}[[PREFIX]]Launch __global__ void kernelfunc(int i, int j, int k) {} // Test that we've built correct kernel launch sequence. // ALL: define{{.*}}hostfunc // ALL: call{{.*}}[[PREFIX]]ConfigureCall // ALL: call{{.*}}kernelfunc void hostfunc(void) { kernelfunc<<<1, 1>>>(1, 1, 1); } #endif // Test that we've built a function to register kernels and global vars. // ALL: define internal void @__[[PREFIX]]_register_globals // ALL: call{{.*}}[[PREFIX]]RegisterFunction(i8** %0, {{.*}}kernelfunc // ALL-DAG: call{{.*}}[[PREFIX]]RegisterVar(i8** %0, {{.*}}device_var{{.*}}i32 0, i32 4, i32 0, i32 0 // ALL-DAG: call{{.*}}[[PREFIX]]RegisterVar(i8** %0, {{.*}}constant_var{{.*}}i32 0, i32 4, i32 1, i32 0 // ALL-DAG: call{{.*}}[[PREFIX]]RegisterVar(i8** %0, {{.*}}ext_device_var{{.*}}i32 1, i32 4, i32 0, i32 0 // ALL-DAG: call{{.*}}[[PREFIX]]RegisterVar(i8** %0, {{.*}}ext_constant_var{{.*}}i32 1, i32 4, i32 1, i32 0 // ALL: ret void // Test that we've built a constructor. // ALL: define internal void @__[[PREFIX]]_module_ctor // In separate mode it calls __[[PREFIX]]RegisterFatBinary(&__[[PREFIX]]_fatbin_wrapper) // NORDC: call{{.*}}[[PREFIX]]RegisterFatBinary{{.*}}__[[PREFIX]]_fatbin_wrapper // .. stores return value in __[[PREFIX]]_gpubin_handle // NORDC-NEXT: store{{.*}}__[[PREFIX]]_gpubin_handle // .. and then calls __[[PREFIX]]_register_globals // NORDC-NEXT: call void @__[[PREFIX]]_register_globals // * In separate mode we also register a destructor. // NORDC-NEXT: call i32 @atexit(void (i8*)* @__[[PREFIX]]_module_dtor) // With relocatable device code we call __[[PREFIX]]RegisterLinkedBinary%NVModuleID% // RDC: call{{.*}}__[[PREFIX]]RegisterLinkedBinary[[MODULE_ID]]( // RDC-SAME: __[[PREFIX]]_register_globals, {{.*}}__[[PREFIX]]_fatbin_wrapper // RDC-SAME: [[MODULE_ID_GLOBAL]] // Test that we've created destructor. // NORDC: define internal void @__[[PREFIX]]_module_dtor // NORDC: load{{.*}}__[[PREFIX]]_gpubin_handle // NORDC-NEXT: call void @__[[PREFIX]]UnregisterFatBinary // There should be no __[[PREFIX]]_register_globals if we have no // device-side globals, but we still need to register GPU binary. // Skip GPU binary string first. // CUDANOGLOBALS: @{{.*}} = private constant{{.*}} // HIPNOGLOBALS: @{{.*}} = external constant{{.*}} // NOGLOBALS-NOT: define internal void @__{{.*}}_register_globals // NOGLOBALS: define internal void @__[[PREFIX:cuda|hip]]_module_ctor // NOGLOBALS: call{{.*}}[[PREFIX]]RegisterFatBinary{{.*}}__[[PREFIX]]_fatbin_wrapper // NOGLOBALS-NOT: call void @__[[PREFIX]]_register_globals // NOGLOBALS: define internal void @__[[PREFIX]]_module_dtor // NOGLOBALS: call void @__[[PREFIX]]UnregisterFatBinary // There should be no constructors/destructors if we have no GPU binary. // NOGPUBIN-NOT: define internal void @__[[PREFIX]]_register_globals // NOGPUBIN-NOT: define internal void @__[[PREFIX]]_module_ctor // NOGPUBIN-NOT: define internal void @__[[PREFIX]]_module_dtor
3b7eedb195417cd411f0fcaadb1d9c16f92d1ee6.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include "definitions.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include "MatrixLib.cuh" #include "GPUgaussLib.cuh" //******************************************************************************************* //theta is: {N,bg} __global__ void kernel_MLERatio(const float *d_data, float PSFSigma, int sz, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ float M[NV_RH1*NV_RH1], Diag[NV_RH1], Minv[NV_RH1*NV_RH1]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; float dudt[NV_RH1]; float d2udt2[NV_RH1]; float NR_Numerator[NV_RH1], NR_Denominator[NV_RH1]; float thetaH1[NV_RH1]; float thetaH0[NV_RH0]; //float maxjump[NV_RH1] = { 1e2f, 2e0f }; //float gamma[NV_RH1] = { 0.5f, 1.0f }; float Nmax; float logModel; float LR[Q_R]; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_RH1*NV_RH1*sizeof(float)); memset(Minv, 0, NV_RH1*NV_RH1*sizeof(float)); //load data const float *s_data = d_data + (sz*sz*bx*BlockSize + sz*sz*tx); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &thetaH1[1]); thetaH1[0] = max(0.1f, (Nmax - thetaH1[1]) * 4 * pi*PSFSigma*PSFSigma); for (kk = 0; kk<iterations; kk++) {//main iterative loop //initialize memset(NR_Numerator, 0, NV_RH1*sizeof(float)); memset(NR_Denominator, 0, NV_RH1*sizeof(float)); for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, (sz-1) / 2.0, PSFSigma); PSFy = kernel_IntGauss1D(jj, (sz-1) / 2.0, PSFSigma); model = thetaH1[1] + thetaH1[0] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives dudt[0] = PSFx*PSFy; d2udt2[0] = 0.0f; dudt[1] = 1.0f; d2udt2[1] = 0.0f; cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_RH1; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } // Any other constraints thetaH1[0] -= min(max(NR_Numerator[0] / NR_Denominator[0] / 2.0, -thetaH1[0]), thetaH1[0] / 2.0); thetaH1[0] = max(thetaH1[0], Nmax/2.0f); thetaH1[1] -= NR_Numerator[1] / NR_Denominator[1]; thetaH1[1] = max(thetaH1[1], 0.01f); } // ML estimate of background model thetaH0[0] = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { thetaH0[0] += s_data[sz*jj + ii]; } thetaH0[0] = thetaH0[0] / pow((float)sz, 2); // Calculating the CRLB and LogLikelihoodRatio Div = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, (sz - 1) / 2.0, PSFSigma); PSFy = kernel_IntGauss1D(jj, (sz - 1) / 2.0, PSFSigma); model = thetaH1[1] + thetaH1[0] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives dudt[0] = PSFx*PSFy; dudt[1] = 1.0f; //Building the Fisher Information Matrix for (kk = 0; kk<NV_RH1; kk++)for (ll = kk; ll<NV_RH1; ll++){ M[kk*NV_RH1 + ll] += dudt[ll] * dudt[kk] / model; M[ll*NV_RH1 + kk] = M[kk*NV_RH1 + ll]; } //LogLikelyhood logModel = model / (thetaH0[0] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[0]); } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV_RH1); kernel_CalcLLRProp(Diag[0], thetaH1[0], Div, LR); //write to global arrays for (kk = 0; kk<NV_RH1; kk++) d_Parameters[kk + (NV_RH1 + NV_RH0)*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<NV_RH0; kk++) d_Parameters[(NV_RH1 + kk) + (NV_RH1 + NV_RH0)*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_R; kk++) d_LogLikelihood[kk + Q_R * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk<NV_RH1; kk++) d_CRLBs[kk + NV_RH1*(BlockSize*bx + tx)] = Diag[kk]; return; } //******************************************************************************************* //theta is: {x,y,N,bg} __global__ void kernel_MLEFit(const float *d_data, float PSFSigma, int sz, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ float M[NV_PH1*NV_PH1], Diag[NV_PH1], Minv[NV_PH1*NV_PH1]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; //int NV = NV_PH1; float dudt[NV_PH1]; float d2udt2[NV_PH1]; float NR_Numerator[NV_PH1], NR_Denominator[NV_PH1]; float thetaH1[NV_PH1]; float thetaH0[NV_PH0]; float maxjump[NV_PH1] = { 1e0f, 1e0f, 1e2f, 2e0f }; float gamma[NV_PH1] = { 1.0f, 1.0f, 0.5f, 1.0f }; float LR[Q_P]; float Nmax; float logModel; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_PH1*NV_PH1*sizeof(float)); memset(Minv, 0, NV_PH1*NV_PH1*sizeof(float)); const float *s_data = d_data + (sz*sz*bx*BlockSize + sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &thetaH1[0], &thetaH1[1], 0, 0); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &thetaH1[3]); thetaH1[2] = max(0.1f, (Nmax - thetaH1[3]) * 2 * pi*PSFSigma*PSFSigma); for (kk = 0; kk<iterations; kk++) {//main iterative loop memset(NR_Numerator, 0, NV_PH1*sizeof(float)); memset(NR_Denominator, 0, NV_PH1*sizeof(float)); for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], PSFSigma); PSFy = kernel_IntGauss1D(jj, thetaH1[1], PSFSigma); model = thetaH1[3] + thetaH1[2] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, thetaH1[0], PSFSigma, thetaH1[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, thetaH1[1], PSFSigma, thetaH1[2], PSFx, &dudt[1], &d2udt2[1]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_PH1; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll = 0; ll<NV_PH1; ll++) thetaH1[ll] -= gamma[ll] * min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll = 0; ll<NV_PH1; ll++) thetaH1[ll] -= min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints thetaH1[2] = max(thetaH1[2], 1.0f); thetaH1[3] = max(thetaH1[3], 0.01f); } //Estimate background model thetaH0[0] = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { thetaH0[0] += s_data[sz*jj + ii]; } thetaH0[0] = thetaH0[0] / pow((float)sz, 2); // Calculating the CRLB and LogLikelihood Div = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], PSFSigma); PSFy = kernel_IntGauss1D(jj, thetaH1[1], PSFSigma); model = thetaH1[3] + max(thetaH1[2], thetaH1[3])*PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, thetaH1[0], PSFSigma, thetaH1[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, thetaH1[1], PSFSigma, thetaH1[2], PSFx, &dudt[1], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk = 0; kk<NV_PH1; kk++)for (ll = kk; ll<NV_PH1; ll++){ M[kk*NV_PH1 + ll] += dudt[ll] * dudt[kk] / model; M[ll*NV_PH1 + kk] = M[kk*NV_PH1 + ll]; } //LogLikelyhood logModel = model / (thetaH0[0] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[0]); } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV_PH1); kernel_CalcLLRProp(Diag[2], thetaH1[2], Div, LR); for (kk = 0; kk<NV_PH1; kk++) d_Parameters[kk + (NV_PH1 + NV_PH0)*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<NV_PH0; kk++) d_Parameters[(NV_PH1 + kk) + (NV_PH1 + NV_PH0)*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_P; kk++) d_LogLikelihood[kk + Q_P * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk<NV_PH1; kk++) d_CRLBs[kk + NV_PH1*(BlockSize*bx + tx)] = Diag[kk]; return; } //******************************************************************************************* __global__ void kernel_MLEFitSigma(float *d_data, float PSFSigma, int sz, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ float M[NV_PSH1*NV_PSH1], Diag[NV_PSH1], Minv[NV_PSH1*NV_PSH1]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; float dudt[NV_PSH1]; float d2udt2[NV_PSH1]; float NR_Numerator[NV_PSH1], NR_Denominator[NV_PSH1]; float thetaH1[NV_PSH1]; float thetaH0[NV_PSH0]; float maxjump[NV_PSH1] = { 1e0f, 1e0f, 1e2f, 2e0f, 5e-1f }; float gamma[NV_PSH1] = { 1.0f, 1.0f, 0.5f, 1.0f, 1.0f }; float Nmax; float logModel; float LR[Q_PS]; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_PSH1*NV_PSH1*sizeof(float)); memset(Minv, 0, NV_PSH1*NV_PSH1*sizeof(float)); //load data const float *s_data = d_data + (sz*sz*bx*BlockSize + sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &thetaH1[0], &thetaH1[1], 0, 0); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &thetaH1[3]); thetaH1[2] = max(0.1f, (Nmax - thetaH1[3]) * 2 * pi*PSFSigma*PSFSigma); thetaH1[4] = PSFSigma; for (kk = 0; kk<iterations; kk++) {//main iterative loop //initialize memset(NR_Numerator, 0, NV_PSH1*sizeof(float)); memset(NR_Denominator, 0, NV_PSH1*sizeof(float)); for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], thetaH1[4]); PSFy = kernel_IntGauss1D(jj, thetaH1[1], thetaH1[4]); model = thetaH1[3] + thetaH1[2] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], thetaH1[4], thetaH1[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGaussPSF2DSigma(ii, jj, thetaH1[0], thetaH1[1], thetaH1[4], thetaH1[2], PSFx, PSFy, &dudt[4], &d2udt2[4]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_PSH1; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } // The update if (kk<5) for (ll = 0; ll<NV_PSH1; ll++) thetaH1[ll] -= gamma[ll] * min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll = 0; ll<NV_PSH1; ll++) thetaH1[ll] -= min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints thetaH1[2] = max(thetaH1[2], 1.0f); thetaH1[3] = max(thetaH1[3], 0.01f); thetaH1[4] = max(thetaH1[4], 0.5f); thetaH1[4] = min(thetaH1[4], sz / 2.0f); } //Estimate background model thetaH0[0] = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { thetaH0[0] += s_data[sz*jj + ii]; } thetaH0[0] = thetaH0[0] / pow((float)sz, 2); // Calculating the CRLB and LogLikelihood Div = 0.0f; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], PSFSigma); PSFy = kernel_IntGauss1D(jj, thetaH1[1], PSFSigma); model = thetaH1[3] + thetaH1[2] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], thetaH1[4], thetaH1[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGaussPSF2DSigma(ii, jj, thetaH1[0], thetaH1[1], thetaH1[4], thetaH1[2], PSFx, PSFy, &dudt[4], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk = 0; kk<NV_PSH1; kk++)for (ll = kk; ll<NV_PSH1; ll++){ M[kk*NV_PSH1 + ll] += dudt[ll] * dudt[kk] / model; M[ll*NV_PSH1 + kk] = M[kk*NV_PSH1 + ll]; } //LogLikelyhood logModel = model / (thetaH0[0] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[0]); } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV_PSH1); kernel_CalcLLRProp(Diag[2], thetaH1[2], Div, LR); for (kk = 0; kk<NV_PSH1; kk++) d_Parameters[kk + (NV_PSH1 + NV_PSH0)*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<NV_PSH0; kk++) d_Parameters[(NV_PSH1 + kk) + (NV_PSH1 + NV_PSH0)*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_PS; kk++) d_LogLikelihood[kk + Q_PS * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk<NV_PSH1; kk++) d_CRLBs[kk + NV_PSH1*(BlockSize*bx + tx)] = Diag[kk]; return; } __global__ void kernel_MLEFit3D(const float *d_data, float PSFSigmax, float PSFSigmaz, int szXY, int szZ, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //Dynamic allocation is slow. Maybe this will change overtime but right now keep it static! float M[NV_PH13*NV_PH13], Diag[NV_PH13], Minv[NV_PH13*NV_PH13]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int hh, ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx, PSFz; float *dudt = new float[NV_PH13 + szZ - 1]; float *d2udt2 = new float[NV_PH13 + szZ - 1]; float NR_Numerator[NV_PH13], NR_Denominator[NV_PH13]; float *thetaH1 = new float[NV_PH13 + szZ - 1]; float *thetaH0 = new float[NV_PH03+szZ-1]; // last value is used for all variables bigger then NV_PH13 float maxjump[NV_PH13] = { 1e0f, 1e0f, 1e0f, 1e2f, 2e0f }; float gamma[NV_PH13] = { 1.0f, 1.0f, 1.0f, 0.5f, 1.0f }; float LR[Q_P3]; float Nmax; float logModel; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_PH13*NV_PH13*sizeof(float)); memset(Minv, 0, NV_PH13*NV_PH13*sizeof(float)); const float *s_data = d_data + (szXY*szXY*szZ*bx*BlockSize + szXY*szXY*szZ*tx); ////initial values kernel_CenterofMass3D(szXY, szZ, s_data, &thetaH1[0], &thetaH1[1], &thetaH1[2], 0, 0, 0); kernel_GaussFMaxMin3D(szXY, szZ, PSFSigmax, PSFSigmaz, s_data, &Nmax, &thetaH1[4]); thetaH1[3] = max(0.1f, (Nmax - thetaH1[4]) * pow(sqrt(2 * pi),3) * PSFSigmax * PSFSigmax * PSFSigmaz); for (kk = 0; kk<iterations; kk++) {//main iterative loop memset(NR_Numerator, 0, NV_PH13*sizeof(float)); memset(NR_Denominator, 0, NV_PH13*sizeof(float)); for (hh = 0; hh < szZ; hh++) for (ii = 0; ii < szXY; ii++) for (jj = 0; jj < szXY; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], PSFSigmax); PSFy = kernel_IntGauss1D(jj, thetaH1[1], PSFSigmax); PSFz = kernel_IntGauss1D(hh, thetaH1[2], PSFSigmaz); model = thetaH1[4+hh] + thetaH1[3] * PSFx*PSFy*PSFz; data = s_data[hh*szXY*szXY +szXY*ii+jj]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], PSFSigmax, thetaH1[3], PSFy, PSFz, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], PSFSigmax, thetaH1[3], PSFx, PSFz, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGaussPSF1D(hh, thetaH1[2], PSFSigmaz, thetaH1[3], PSFx, PSFy, &dudt[2], &d2udt2[2]); dudt[3] = PSFx*PSFy*PSFz; d2udt2[3] = 0.0f; dudt[4+hh] = 1.0f; d2udt2[4+hh] = 0.0f; cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_PH13; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } //// The update if (kk<3) for (ll = 0; ll<NV_PH13 + szZ - 1; ll++) thetaH1[ll] -= gamma[min(NV_PH13, ll)] * min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[min(NV_PH13, ll)]), maxjump[min(NV_PH13, ll)]); else for (ll = 0; ll<NV_PH13 + szZ - 1; ll++) thetaH1[ll] -= min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[min(NV_PH13, ll)]), maxjump[min(NV_PH13, ll)]); // Any other constraints thetaH1[3] = max(thetaH1[3], 1.0f); for (hh = 4; hh<NV_PH13 + szZ - 1; hh++) thetaH1[hh] = max(thetaH1[hh], 0.1f); } //Estimate background model thetaH0[0] = 0.0; for (hh = 0; hh < szZ; hh++){ for (ii = 0; ii < szXY; ii++) for (jj = 0; jj < szXY; jj++) { //TODO add oo estimation!! thetaH0[hh] += s_data[hh*szXY*szXY + szXY*ii + jj]; } thetaH0[hh] = thetaH0[hh] / pow((float)szXY, 2); } // Calculating the CRLB and LogLikelihood Div = 0.0; for (hh = 0; hh < szZ; hh++) for (ii = 0; ii<szXY; ii++) for (jj = 0; jj<szXY; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], PSFSigmax); PSFy = kernel_IntGauss1D(jj, thetaH1[1], PSFSigmax); PSFz = kernel_IntGauss1D(hh, thetaH1[2], PSFSigmaz); model = thetaH1[4+hh] + max(thetaH1[3], thetaH1[4+hh])*PSFx*PSFy*PSFz; data = s_data[hh*szXY*szXY +szXY*ii+jj]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], PSFSigmax, thetaH1[3], PSFy, PSFz, &dudt[0], NULL); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], PSFSigmax, thetaH1[3], PSFx, PSFz, &dudt[1], NULL); kernel_DerivativeIntGaussPSF1D(hh, thetaH1[2], PSFSigmaz, thetaH1[3], PSFx, PSFy, &dudt[2], NULL); dudt[3] = PSFx*PSFy*PSFz; dudt[4] = 1.0f; //Building the Fisher Information Matrix for (kk = 0; kk<(NV_PH13 + szZ - 1); kk++)for (ll = kk; ll<(NV_PH13 + szZ - 1); ll++){ M[kk*(NV_PH13 + szZ - 1) + ll] += dudt[ll] * dudt[kk] / model; M[ll*(NV_PH13 + szZ - 1) + kk] = M[kk*(NV_PH13 + szZ - 1) + ll]; } //LogLikelyhood logModel = model / (thetaH0[hh] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[hh]); } //// Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, (NV_PH13 + szZ - 1)); kernel_CalcLLRProp(Diag[3], thetaH1[3], Div, LR); for (kk = 0; kk<(NV_PH13 + szZ - 1); kk++) d_Parameters[kk + ((NV_PH13 + szZ - 1) + (NV_PH03 + szZ - 1))*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<(NV_PH03 + szZ - 1); kk++) d_Parameters[((NV_PH13 + szZ - 1) + kk) + ((NV_PH13 + szZ - 1) + (NV_PH03 + szZ - 1))*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_P3; kk++) d_LogLikelihood[kk + Q_P3 * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk<(NV_PH13 + szZ - 1); kk++) d_CRLBs[kk + (NV_PH13 + szZ - 1)*(BlockSize*bx + tx)] = Diag[kk]; return; } __global__ void kernel_MLEFit3DSigma(const float *d_data, float PSFSigmax, float PSFSigmaz, int szXY, int szZ, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //Dynamic allocation is slow. Maybe this will change overtime but right now keep it static! float M[NV_PH15*NV_PH15], Diag[NV_PH15], Minv[NV_PH15*NV_PH15]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int hh, ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx, PSFz; float dudt[NV_PH15]; float d2udt2[NV_PH15]; float NR_Numerator[NV_PH15], NR_Denominator[NV_PH15]; float thetaH1[NV_PH15]; float thetaH0[NV_PH05]; float maxjump[NV_PH15] = { 1e0f, 1e0f, 1e0f, 1e2f, 2e0f, 5e-1f, 5e-1f }; float gamma[NV_PH15] = { 1.0f, 1.0f, 1.0f, 0.5f, 1.0f, 1.0f, 1.0f }; float LR[Q_P5]; float Nmax; float logModel; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_PH15*NV_PH15*sizeof(float)); memset(Minv, 0, NV_PH15*NV_PH15*sizeof(float)); const float *s_data = d_data + (szXY*szXY*szZ*bx*BlockSize + szXY*szXY*szZ*tx); ////initial values kernel_CenterofMass3D(szXY, szZ, s_data, &thetaH1[0], &thetaH1[1], &thetaH1[2], 0, 0, 0); kernel_GaussFMaxMin3D(szXY, szZ, PSFSigmax, PSFSigmaz, s_data, &Nmax, &thetaH1[4]); thetaH1[5] = PSFSigmax; thetaH1[6] = PSFSigmaz; thetaH1[3] = max(0.1f, (Nmax - thetaH1[4]) * pow(sqrt(2 * pi), 3) * PSFSigmax * PSFSigmax * PSFSigmaz); for (kk = 0; kk<iterations; kk++) {//main iterative loop memset(NR_Numerator, 0, NV_PH15*sizeof(float)); memset(NR_Denominator, 0, NV_PH15*sizeof(float)); for (hh = 0; hh < szZ; hh++) for (ii = 0; ii < szXY; ii++) for (jj = 0; jj < szXY; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], thetaH1[5]); PSFy = kernel_IntGauss1D(jj, thetaH1[1], thetaH1[5]); PSFz = kernel_IntGauss1D(hh, thetaH1[2], thetaH1[6]); model = thetaH1[4] + thetaH1[3] * PSFx*PSFy*PSFz; data = s_data[hh*szXY*szXY + szXY*ii + jj]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], thetaH1[5], thetaH1[3], PSFy, PSFz, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], thetaH1[5], thetaH1[3], PSFx, PSFz, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGaussPSF1D(hh, thetaH1[2], thetaH1[6], thetaH1[3], PSFx, PSFy, &dudt[2], &d2udt2[2]); dudt[3] = PSFx*PSFy*PSFz; d2udt2[3] = 0.0f; dudt[4] = 1.0f; d2udt2[4] = 0.0f; kernel_DerivativeIntGaussPSF3DSigma(ii, jj, hh, thetaH1[0], thetaH1[1], thetaH1[2], thetaH1[5], thetaH1[6], thetaH1[3], PSFx, PSFy, PSFz, &dudt[5], &dudt[6], &d2udt2[5], &d2udt2[6]); cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_PH15; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } // The update if (kk<3) for (ll = 0; ll<NV_PH15; ll++) thetaH1[ll] -= gamma[ll] * min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll = 0; ll<NV_PH15; ll++) thetaH1[ll] -= min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints thetaH1[3] = max(thetaH1[3], 1.0f); thetaH1[4] = max(thetaH1[4], 0.1f); thetaH1[5] = max(thetaH1[5], 0.5f); thetaH1[5] = min(thetaH1[5], szXY / 2.0f); thetaH1[6] = max(thetaH1[6], 0.5f); thetaH1[6] = min(thetaH1[6], szZ / 2.0f); } //Estimate background model thetaH0[0] = 0.0; for (hh = 0; hh < szZ; hh++) for (ii = 0; ii<szXY; ii++) for (jj = 0; jj<szXY; jj++) { //TODO add oo estimation!! thetaH0[0] += s_data[hh*szXY*szXY + szXY*ii + jj]; } thetaH0[0] = thetaH0[0] / pow((float)szXY, 2) / szZ; // Calculating the CRLB and LogLikelihood Div = 0.0; for (hh = 0; hh < szZ; hh++) for (ii = 0; ii<szXY; ii++) for (jj = 0; jj<szXY; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], thetaH1[5]); PSFy = kernel_IntGauss1D(jj, thetaH1[1], thetaH1[5]); PSFz = kernel_IntGauss1D(hh, thetaH1[2], thetaH1[6]); model = thetaH1[4] + max(thetaH1[3], thetaH1[4])*PSFx*PSFy*PSFz; data = s_data[hh*szXY*szXY + szXY*ii + jj]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], thetaH1[5], thetaH1[3], PSFy, PSFz, &dudt[0], NULL); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], thetaH1[5], thetaH1[3], PSFx, PSFz, &dudt[1], NULL); kernel_DerivativeIntGaussPSF1D(hh, thetaH1[2], thetaH1[6], thetaH1[3], PSFx, PSFy, &dudt[2], NULL); dudt[3] = PSFx*PSFy*PSFz; dudt[4] = 1.0f; kernel_DerivativeIntGaussPSF3DSigma(ii, jj, kk, thetaH1[0], thetaH1[1], thetaH1[2], thetaH1[5], thetaH1[6], thetaH1[3], PSFx, PSFy, PSFz, &dudt[5], &dudt[6], NULL, NULL); //Building the Fisher Information Matrix for (kk = 0; kk<NV_PH15; kk++)for (ll = kk; ll<NV_PH15; ll++){ M[kk*NV_PH15 + ll] += dudt[ll] * dudt[kk] / model; M[ll*NV_PH15 + kk] = M[kk*NV_PH15 + ll]; } //LogLikelyhood logModel = model / (thetaH0[0] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[0]); } //// Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV_PH15); kernel_CalcLLRProp(Diag[3], thetaH1[3], Div, LR); for (kk = 0; kk<NV_PH15; kk++) d_Parameters[kk + (NV_PH15 + NV_PH05)*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<NV_PH05; kk++) d_Parameters[(NV_PH15 + kk) + (NV_PH15 + NV_PH05)*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_P5; kk++) d_LogLikelihood[kk + Q_P5 * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk < NV_PH15; kk++) d_CRLBs[kk + NV_PH15*(BlockSize*bx + tx)] = Diag[kk]; return; } //******************************************************************************************* __global__ void kernel_MLEFitSigmaXX(const float *d_data, float PSFSigma, int sz, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[NV_PS2H1*NV_PS2H1], Diag[NV_PS2H1], Minv[NV_PS2H1*NV_PS2H1]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; float dudt[NV_PS2H1]; float d2udt2[NV_PS2H1]; float NR_Numerator[NV_PS2H1], NR_Denominator[NV_PS2H1]; float thetaH1[NV_PS2H1]; float thetaH0[NV_PS2H0]; float maxjump[NV_PS2H1] = { 1e0f, 1e0f, 1e2f, 2e0f, 1e-1f, 1e-1f }; float g[NV_PS2H1] = { 1.0f, 1.0f, 0.5f, 1.0f, 1.0f, 1.0f }; float Nmax; float logModel; float LR[Q_PS2]; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_PSH1*NV_PSH1*sizeof(float)); memset(Minv, 0, NV_PSH1*NV_PSH1*sizeof(float)); //load data const float *s_data = d_data + (sz*sz*bx*BlockSize + sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &thetaH1[0], &thetaH1[1], 0, 0); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &thetaH1[3]); thetaH1[2] = max(0.0f, (Nmax - thetaH1[3]) * 2 * pi*PSFSigma*PSFSigma); thetaH1[4] = PSFSigma; thetaH1[5] = PSFSigma; for (kk = 0; kk<iterations; kk++) {//main iterative loop //initialize memset(NR_Numerator, 0, NV_PS2H1*sizeof(float)); memset(NR_Denominator, 0, NV_PS2H1*sizeof(float)); for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], thetaH1[4]); PSFy = kernel_IntGauss1D(jj, thetaH1[1], thetaH1[5]); model = thetaH1[3] + thetaH1[2] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, thetaH1[1], thetaH1[5], thetaH1[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGauss1DSigma(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[4], &d2udt2[4]); kernel_DerivativeIntGauss1DSigma(jj, thetaH1[1], thetaH1[5], thetaH1[2], PSFx, &dudt[5], &d2udt2[5]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_PS2H1; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } // The update for (ll = 0; ll<NV_PS2H1; ll++) thetaH1[ll] -= g[ll] * min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints thetaH1[2] = max(thetaH1[2], 1.0f); thetaH1[3] = max(thetaH1[3], 0.01f); thetaH1[4] = max(thetaH1[4], PSFSigma / 10.0f); thetaH1[5] = max(thetaH1[5], PSFSigma / 10.0f); } thetaH0[0] = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { thetaH0[0] += s_data[sz*jj + ii]; } thetaH0[0] = thetaH0[0] / pow((float)sz, 2); // Calculating the CRLB and LogLikelihood Div = 0.0f; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], thetaH1[4]); PSFy = kernel_IntGauss1D(jj, thetaH1[1], thetaH1[5]); model = thetaH1[3] + thetaH1[2] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, thetaH1[1], thetaH1[5], thetaH1[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGauss1DSigma(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[4], NULL); kernel_DerivativeIntGauss1DSigma(jj, thetaH1[1], thetaH1[5], thetaH1[2], PSFx, &dudt[5], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk = 0; kk<NV_PS2H1; kk++)for (ll = kk; ll<NV_PS2H1; ll++){ M[kk*NV_PS2H1 + ll] += dudt[ll] * dudt[kk] / model; M[ll*NV_PS2H1 + kk] = M[kk*NV_PS2H1 + ll]; } //LogLikelyhood logModel = model / (thetaH0[0] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[0]); } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV_PS2H1); kernel_CalcLLRProp(Diag[2], thetaH1[2], Div, LR); //write to global arrays //for (kk = 0; kk<NV_PS2H1; kk++) // d_Parameters[Nfits*kk + (NV_PSH1 + NV_PSH0)*BlockSize*bx + tx] = thetaH1[kk]; //for (kk = 0; kk<NV_PS2H0; kk++) // d_Parameters[Nfits*(NV_PS2H1 + kk) + BlockSize*bx + tx] = thetaH0[kk]; //for (kk = 0; kk<Q_PS2; kk++) // d_LogLikelihood[Nfits*kk + BlockSize*bx + tx] = LR[kk]; //for (kk = 0; kk<NV_PS2H1; kk++) // d_CRLBs[Nfits*kk + BlockSize*bx + tx] = Diag[kk]; for (kk = 0; kk<NV_PS2H1; kk++) d_Parameters[kk + (NV_PS2H1 + NV_PS2H0)*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<NV_PS2H0; kk++) d_Parameters[(NV_PS2H1 + kk) + (NV_PS2H1 + NV_PS2H0)*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_PS2; kk++) d_LogLikelihood[kk + Q_PS2 * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk<NV_PS2H1; kk++) d_CRLBs[kk + NV_PS2H1*(BlockSize*bx + tx)] = Diag[kk]; return; }
3b7eedb195417cd411f0fcaadb1d9c16f92d1ee6.cu
#include <cuda_runtime.h> #include "definitions.h" #include <stdio.h> #include <stdlib.h> #include <string.h> #include "MatrixLib.cuh" #include "GPUgaussLib.cuh" //******************************************************************************************* //theta is: {N,bg} __global__ void kernel_MLERatio(const float *d_data, float PSFSigma, int sz, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ float M[NV_RH1*NV_RH1], Diag[NV_RH1], Minv[NV_RH1*NV_RH1]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; float dudt[NV_RH1]; float d2udt2[NV_RH1]; float NR_Numerator[NV_RH1], NR_Denominator[NV_RH1]; float thetaH1[NV_RH1]; float thetaH0[NV_RH0]; //float maxjump[NV_RH1] = { 1e2f, 2e0f }; //float gamma[NV_RH1] = { 0.5f, 1.0f }; float Nmax; float logModel; float LR[Q_R]; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_RH1*NV_RH1*sizeof(float)); memset(Minv, 0, NV_RH1*NV_RH1*sizeof(float)); //load data const float *s_data = d_data + (sz*sz*bx*BlockSize + sz*sz*tx); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &thetaH1[1]); thetaH1[0] = max(0.1f, (Nmax - thetaH1[1]) * 4 * pi*PSFSigma*PSFSigma); for (kk = 0; kk<iterations; kk++) {//main iterative loop //initialize memset(NR_Numerator, 0, NV_RH1*sizeof(float)); memset(NR_Denominator, 0, NV_RH1*sizeof(float)); for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, (sz-1) / 2.0, PSFSigma); PSFy = kernel_IntGauss1D(jj, (sz-1) / 2.0, PSFSigma); model = thetaH1[1] + thetaH1[0] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives dudt[0] = PSFx*PSFy; d2udt2[0] = 0.0f; dudt[1] = 1.0f; d2udt2[1] = 0.0f; cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_RH1; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } // Any other constraints thetaH1[0] -= min(max(NR_Numerator[0] / NR_Denominator[0] / 2.0, -thetaH1[0]), thetaH1[0] / 2.0); thetaH1[0] = max(thetaH1[0], Nmax/2.0f); thetaH1[1] -= NR_Numerator[1] / NR_Denominator[1]; thetaH1[1] = max(thetaH1[1], 0.01f); } // ML estimate of background model thetaH0[0] = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { thetaH0[0] += s_data[sz*jj + ii]; } thetaH0[0] = thetaH0[0] / pow((float)sz, 2); // Calculating the CRLB and LogLikelihoodRatio Div = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, (sz - 1) / 2.0, PSFSigma); PSFy = kernel_IntGauss1D(jj, (sz - 1) / 2.0, PSFSigma); model = thetaH1[1] + thetaH1[0] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives dudt[0] = PSFx*PSFy; dudt[1] = 1.0f; //Building the Fisher Information Matrix for (kk = 0; kk<NV_RH1; kk++)for (ll = kk; ll<NV_RH1; ll++){ M[kk*NV_RH1 + ll] += dudt[ll] * dudt[kk] / model; M[ll*NV_RH1 + kk] = M[kk*NV_RH1 + ll]; } //LogLikelyhood logModel = model / (thetaH0[0] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[0]); } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV_RH1); kernel_CalcLLRProp(Diag[0], thetaH1[0], Div, LR); //write to global arrays for (kk = 0; kk<NV_RH1; kk++) d_Parameters[kk + (NV_RH1 + NV_RH0)*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<NV_RH0; kk++) d_Parameters[(NV_RH1 + kk) + (NV_RH1 + NV_RH0)*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_R; kk++) d_LogLikelihood[kk + Q_R * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk<NV_RH1; kk++) d_CRLBs[kk + NV_RH1*(BlockSize*bx + tx)] = Diag[kk]; return; } //******************************************************************************************* //theta is: {x,y,N,bg} __global__ void kernel_MLEFit(const float *d_data, float PSFSigma, int sz, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ float M[NV_PH1*NV_PH1], Diag[NV_PH1], Minv[NV_PH1*NV_PH1]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; //int NV = NV_PH1; float dudt[NV_PH1]; float d2udt2[NV_PH1]; float NR_Numerator[NV_PH1], NR_Denominator[NV_PH1]; float thetaH1[NV_PH1]; float thetaH0[NV_PH0]; float maxjump[NV_PH1] = { 1e0f, 1e0f, 1e2f, 2e0f }; float gamma[NV_PH1] = { 1.0f, 1.0f, 0.5f, 1.0f }; float LR[Q_P]; float Nmax; float logModel; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_PH1*NV_PH1*sizeof(float)); memset(Minv, 0, NV_PH1*NV_PH1*sizeof(float)); const float *s_data = d_data + (sz*sz*bx*BlockSize + sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &thetaH1[0], &thetaH1[1], 0, 0); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &thetaH1[3]); thetaH1[2] = max(0.1f, (Nmax - thetaH1[3]) * 2 * pi*PSFSigma*PSFSigma); for (kk = 0; kk<iterations; kk++) {//main iterative loop memset(NR_Numerator, 0, NV_PH1*sizeof(float)); memset(NR_Denominator, 0, NV_PH1*sizeof(float)); for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], PSFSigma); PSFy = kernel_IntGauss1D(jj, thetaH1[1], PSFSigma); model = thetaH1[3] + thetaH1[2] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, thetaH1[0], PSFSigma, thetaH1[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, thetaH1[1], PSFSigma, thetaH1[2], PSFx, &dudt[1], &d2udt2[1]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_PH1; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } // The update if (kk<2) for (ll = 0; ll<NV_PH1; ll++) thetaH1[ll] -= gamma[ll] * min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll = 0; ll<NV_PH1; ll++) thetaH1[ll] -= min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints thetaH1[2] = max(thetaH1[2], 1.0f); thetaH1[3] = max(thetaH1[3], 0.01f); } //Estimate background model thetaH0[0] = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { thetaH0[0] += s_data[sz*jj + ii]; } thetaH0[0] = thetaH0[0] / pow((float)sz, 2); // Calculating the CRLB and LogLikelihood Div = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], PSFSigma); PSFy = kernel_IntGauss1D(jj, thetaH1[1], PSFSigma); model = thetaH1[3] + max(thetaH1[2], thetaH1[3])*PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, thetaH1[0], PSFSigma, thetaH1[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, thetaH1[1], PSFSigma, thetaH1[2], PSFx, &dudt[1], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk = 0; kk<NV_PH1; kk++)for (ll = kk; ll<NV_PH1; ll++){ M[kk*NV_PH1 + ll] += dudt[ll] * dudt[kk] / model; M[ll*NV_PH1 + kk] = M[kk*NV_PH1 + ll]; } //LogLikelyhood logModel = model / (thetaH0[0] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[0]); } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV_PH1); kernel_CalcLLRProp(Diag[2], thetaH1[2], Div, LR); for (kk = 0; kk<NV_PH1; kk++) d_Parameters[kk + (NV_PH1 + NV_PH0)*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<NV_PH0; kk++) d_Parameters[(NV_PH1 + kk) + (NV_PH1 + NV_PH0)*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_P; kk++) d_LogLikelihood[kk + Q_P * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk<NV_PH1; kk++) d_CRLBs[kk + NV_PH1*(BlockSize*bx + tx)] = Diag[kk]; return; } //******************************************************************************************* __global__ void kernel_MLEFitSigma(float *d_data, float PSFSigma, int sz, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ float M[NV_PSH1*NV_PSH1], Diag[NV_PSH1], Minv[NV_PSH1*NV_PSH1]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; float dudt[NV_PSH1]; float d2udt2[NV_PSH1]; float NR_Numerator[NV_PSH1], NR_Denominator[NV_PSH1]; float thetaH1[NV_PSH1]; float thetaH0[NV_PSH0]; float maxjump[NV_PSH1] = { 1e0f, 1e0f, 1e2f, 2e0f, 5e-1f }; float gamma[NV_PSH1] = { 1.0f, 1.0f, 0.5f, 1.0f, 1.0f }; float Nmax; float logModel; float LR[Q_PS]; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_PSH1*NV_PSH1*sizeof(float)); memset(Minv, 0, NV_PSH1*NV_PSH1*sizeof(float)); //load data const float *s_data = d_data + (sz*sz*bx*BlockSize + sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &thetaH1[0], &thetaH1[1], 0, 0); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &thetaH1[3]); thetaH1[2] = max(0.1f, (Nmax - thetaH1[3]) * 2 * pi*PSFSigma*PSFSigma); thetaH1[4] = PSFSigma; for (kk = 0; kk<iterations; kk++) {//main iterative loop //initialize memset(NR_Numerator, 0, NV_PSH1*sizeof(float)); memset(NR_Denominator, 0, NV_PSH1*sizeof(float)); for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], thetaH1[4]); PSFy = kernel_IntGauss1D(jj, thetaH1[1], thetaH1[4]); model = thetaH1[3] + thetaH1[2] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], thetaH1[4], thetaH1[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGaussPSF2DSigma(ii, jj, thetaH1[0], thetaH1[1], thetaH1[4], thetaH1[2], PSFx, PSFy, &dudt[4], &d2udt2[4]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_PSH1; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } // The update if (kk<5) for (ll = 0; ll<NV_PSH1; ll++) thetaH1[ll] -= gamma[ll] * min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll = 0; ll<NV_PSH1; ll++) thetaH1[ll] -= min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints thetaH1[2] = max(thetaH1[2], 1.0f); thetaH1[3] = max(thetaH1[3], 0.01f); thetaH1[4] = max(thetaH1[4], 0.5f); thetaH1[4] = min(thetaH1[4], sz / 2.0f); } //Estimate background model thetaH0[0] = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { thetaH0[0] += s_data[sz*jj + ii]; } thetaH0[0] = thetaH0[0] / pow((float)sz, 2); // Calculating the CRLB and LogLikelihood Div = 0.0f; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], PSFSigma); PSFy = kernel_IntGauss1D(jj, thetaH1[1], PSFSigma); model = thetaH1[3] + thetaH1[2] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], thetaH1[4], thetaH1[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGaussPSF2DSigma(ii, jj, thetaH1[0], thetaH1[1], thetaH1[4], thetaH1[2], PSFx, PSFy, &dudt[4], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk = 0; kk<NV_PSH1; kk++)for (ll = kk; ll<NV_PSH1; ll++){ M[kk*NV_PSH1 + ll] += dudt[ll] * dudt[kk] / model; M[ll*NV_PSH1 + kk] = M[kk*NV_PSH1 + ll]; } //LogLikelyhood logModel = model / (thetaH0[0] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[0]); } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV_PSH1); kernel_CalcLLRProp(Diag[2], thetaH1[2], Div, LR); for (kk = 0; kk<NV_PSH1; kk++) d_Parameters[kk + (NV_PSH1 + NV_PSH0)*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<NV_PSH0; kk++) d_Parameters[(NV_PSH1 + kk) + (NV_PSH1 + NV_PSH0)*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_PS; kk++) d_LogLikelihood[kk + Q_PS * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk<NV_PSH1; kk++) d_CRLBs[kk + NV_PSH1*(BlockSize*bx + tx)] = Diag[kk]; return; } __global__ void kernel_MLEFit3D(const float *d_data, float PSFSigmax, float PSFSigmaz, int szXY, int szZ, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //Dynamic allocation is slow. Maybe this will change overtime but right now keep it static! float M[NV_PH13*NV_PH13], Diag[NV_PH13], Minv[NV_PH13*NV_PH13]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int hh, ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx, PSFz; float *dudt = new float[NV_PH13 + szZ - 1]; float *d2udt2 = new float[NV_PH13 + szZ - 1]; float NR_Numerator[NV_PH13], NR_Denominator[NV_PH13]; float *thetaH1 = new float[NV_PH13 + szZ - 1]; float *thetaH0 = new float[NV_PH03+szZ-1]; // last value is used for all variables bigger then NV_PH13 float maxjump[NV_PH13] = { 1e0f, 1e0f, 1e0f, 1e2f, 2e0f }; float gamma[NV_PH13] = { 1.0f, 1.0f, 1.0f, 0.5f, 1.0f }; float LR[Q_P3]; float Nmax; float logModel; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_PH13*NV_PH13*sizeof(float)); memset(Minv, 0, NV_PH13*NV_PH13*sizeof(float)); const float *s_data = d_data + (szXY*szXY*szZ*bx*BlockSize + szXY*szXY*szZ*tx); ////initial values kernel_CenterofMass3D(szXY, szZ, s_data, &thetaH1[0], &thetaH1[1], &thetaH1[2], 0, 0, 0); kernel_GaussFMaxMin3D(szXY, szZ, PSFSigmax, PSFSigmaz, s_data, &Nmax, &thetaH1[4]); thetaH1[3] = max(0.1f, (Nmax - thetaH1[4]) * pow(sqrt(2 * pi),3) * PSFSigmax * PSFSigmax * PSFSigmaz); for (kk = 0; kk<iterations; kk++) {//main iterative loop memset(NR_Numerator, 0, NV_PH13*sizeof(float)); memset(NR_Denominator, 0, NV_PH13*sizeof(float)); for (hh = 0; hh < szZ; hh++) for (ii = 0; ii < szXY; ii++) for (jj = 0; jj < szXY; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], PSFSigmax); PSFy = kernel_IntGauss1D(jj, thetaH1[1], PSFSigmax); PSFz = kernel_IntGauss1D(hh, thetaH1[2], PSFSigmaz); model = thetaH1[4+hh] + thetaH1[3] * PSFx*PSFy*PSFz; data = s_data[hh*szXY*szXY +szXY*ii+jj]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], PSFSigmax, thetaH1[3], PSFy, PSFz, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], PSFSigmax, thetaH1[3], PSFx, PSFz, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGaussPSF1D(hh, thetaH1[2], PSFSigmaz, thetaH1[3], PSFx, PSFy, &dudt[2], &d2udt2[2]); dudt[3] = PSFx*PSFy*PSFz; d2udt2[3] = 0.0f; dudt[4+hh] = 1.0f; d2udt2[4+hh] = 0.0f; cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_PH13; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } //// The update if (kk<3) for (ll = 0; ll<NV_PH13 + szZ - 1; ll++) thetaH1[ll] -= gamma[min(NV_PH13, ll)] * min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[min(NV_PH13, ll)]), maxjump[min(NV_PH13, ll)]); else for (ll = 0; ll<NV_PH13 + szZ - 1; ll++) thetaH1[ll] -= min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[min(NV_PH13, ll)]), maxjump[min(NV_PH13, ll)]); // Any other constraints thetaH1[3] = max(thetaH1[3], 1.0f); for (hh = 4; hh<NV_PH13 + szZ - 1; hh++) thetaH1[hh] = max(thetaH1[hh], 0.1f); } //Estimate background model thetaH0[0] = 0.0; for (hh = 0; hh < szZ; hh++){ for (ii = 0; ii < szXY; ii++) for (jj = 0; jj < szXY; jj++) { //TODO add oo estimation!! thetaH0[hh] += s_data[hh*szXY*szXY + szXY*ii + jj]; } thetaH0[hh] = thetaH0[hh] / pow((float)szXY, 2); } // Calculating the CRLB and LogLikelihood Div = 0.0; for (hh = 0; hh < szZ; hh++) for (ii = 0; ii<szXY; ii++) for (jj = 0; jj<szXY; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], PSFSigmax); PSFy = kernel_IntGauss1D(jj, thetaH1[1], PSFSigmax); PSFz = kernel_IntGauss1D(hh, thetaH1[2], PSFSigmaz); model = thetaH1[4+hh] + max(thetaH1[3], thetaH1[4+hh])*PSFx*PSFy*PSFz; data = s_data[hh*szXY*szXY +szXY*ii+jj]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], PSFSigmax, thetaH1[3], PSFy, PSFz, &dudt[0], NULL); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], PSFSigmax, thetaH1[3], PSFx, PSFz, &dudt[1], NULL); kernel_DerivativeIntGaussPSF1D(hh, thetaH1[2], PSFSigmaz, thetaH1[3], PSFx, PSFy, &dudt[2], NULL); dudt[3] = PSFx*PSFy*PSFz; dudt[4] = 1.0f; //Building the Fisher Information Matrix for (kk = 0; kk<(NV_PH13 + szZ - 1); kk++)for (ll = kk; ll<(NV_PH13 + szZ - 1); ll++){ M[kk*(NV_PH13 + szZ - 1) + ll] += dudt[ll] * dudt[kk] / model; M[ll*(NV_PH13 + szZ - 1) + kk] = M[kk*(NV_PH13 + szZ - 1) + ll]; } //LogLikelyhood logModel = model / (thetaH0[hh] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[hh]); } //// Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, (NV_PH13 + szZ - 1)); kernel_CalcLLRProp(Diag[3], thetaH1[3], Div, LR); for (kk = 0; kk<(NV_PH13 + szZ - 1); kk++) d_Parameters[kk + ((NV_PH13 + szZ - 1) + (NV_PH03 + szZ - 1))*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<(NV_PH03 + szZ - 1); kk++) d_Parameters[((NV_PH13 + szZ - 1) + kk) + ((NV_PH13 + szZ - 1) + (NV_PH03 + szZ - 1))*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_P3; kk++) d_LogLikelihood[kk + Q_P3 * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk<(NV_PH13 + szZ - 1); kk++) d_CRLBs[kk + (NV_PH13 + szZ - 1)*(BlockSize*bx + tx)] = Diag[kk]; return; } __global__ void kernel_MLEFit3DSigma(const float *d_data, float PSFSigmax, float PSFSigmaz, int szXY, int szZ, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //Dynamic allocation is slow. Maybe this will change overtime but right now keep it static! float M[NV_PH15*NV_PH15], Diag[NV_PH15], Minv[NV_PH15*NV_PH15]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int hh, ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx, PSFz; float dudt[NV_PH15]; float d2udt2[NV_PH15]; float NR_Numerator[NV_PH15], NR_Denominator[NV_PH15]; float thetaH1[NV_PH15]; float thetaH0[NV_PH05]; float maxjump[NV_PH15] = { 1e0f, 1e0f, 1e0f, 1e2f, 2e0f, 5e-1f, 5e-1f }; float gamma[NV_PH15] = { 1.0f, 1.0f, 1.0f, 0.5f, 1.0f, 1.0f, 1.0f }; float LR[Q_P5]; float Nmax; float logModel; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_PH15*NV_PH15*sizeof(float)); memset(Minv, 0, NV_PH15*NV_PH15*sizeof(float)); const float *s_data = d_data + (szXY*szXY*szZ*bx*BlockSize + szXY*szXY*szZ*tx); ////initial values kernel_CenterofMass3D(szXY, szZ, s_data, &thetaH1[0], &thetaH1[1], &thetaH1[2], 0, 0, 0); kernel_GaussFMaxMin3D(szXY, szZ, PSFSigmax, PSFSigmaz, s_data, &Nmax, &thetaH1[4]); thetaH1[5] = PSFSigmax; thetaH1[6] = PSFSigmaz; thetaH1[3] = max(0.1f, (Nmax - thetaH1[4]) * pow(sqrt(2 * pi), 3) * PSFSigmax * PSFSigmax * PSFSigmaz); for (kk = 0; kk<iterations; kk++) {//main iterative loop memset(NR_Numerator, 0, NV_PH15*sizeof(float)); memset(NR_Denominator, 0, NV_PH15*sizeof(float)); for (hh = 0; hh < szZ; hh++) for (ii = 0; ii < szXY; ii++) for (jj = 0; jj < szXY; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], thetaH1[5]); PSFy = kernel_IntGauss1D(jj, thetaH1[1], thetaH1[5]); PSFz = kernel_IntGauss1D(hh, thetaH1[2], thetaH1[6]); model = thetaH1[4] + thetaH1[3] * PSFx*PSFy*PSFz; data = s_data[hh*szXY*szXY + szXY*ii + jj]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], thetaH1[5], thetaH1[3], PSFy, PSFz, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], thetaH1[5], thetaH1[3], PSFx, PSFz, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGaussPSF1D(hh, thetaH1[2], thetaH1[6], thetaH1[3], PSFx, PSFy, &dudt[2], &d2udt2[2]); dudt[3] = PSFx*PSFy*PSFz; d2udt2[3] = 0.0f; dudt[4] = 1.0f; d2udt2[4] = 0.0f; kernel_DerivativeIntGaussPSF3DSigma(ii, jj, hh, thetaH1[0], thetaH1[1], thetaH1[2], thetaH1[5], thetaH1[6], thetaH1[3], PSFx, PSFy, PSFz, &dudt[5], &dudt[6], &d2udt2[5], &d2udt2[6]); cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_PH15; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } // The update if (kk<3) for (ll = 0; ll<NV_PH15; ll++) thetaH1[ll] -= gamma[ll] * min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); else for (ll = 0; ll<NV_PH15; ll++) thetaH1[ll] -= min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints thetaH1[3] = max(thetaH1[3], 1.0f); thetaH1[4] = max(thetaH1[4], 0.1f); thetaH1[5] = max(thetaH1[5], 0.5f); thetaH1[5] = min(thetaH1[5], szXY / 2.0f); thetaH1[6] = max(thetaH1[6], 0.5f); thetaH1[6] = min(thetaH1[6], szZ / 2.0f); } //Estimate background model thetaH0[0] = 0.0; for (hh = 0; hh < szZ; hh++) for (ii = 0; ii<szXY; ii++) for (jj = 0; jj<szXY; jj++) { //TODO add oo estimation!! thetaH0[0] += s_data[hh*szXY*szXY + szXY*ii + jj]; } thetaH0[0] = thetaH0[0] / pow((float)szXY, 2) / szZ; // Calculating the CRLB and LogLikelihood Div = 0.0; for (hh = 0; hh < szZ; hh++) for (ii = 0; ii<szXY; ii++) for (jj = 0; jj<szXY; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], thetaH1[5]); PSFy = kernel_IntGauss1D(jj, thetaH1[1], thetaH1[5]); PSFz = kernel_IntGauss1D(hh, thetaH1[2], thetaH1[6]); model = thetaH1[4] + max(thetaH1[3], thetaH1[4])*PSFx*PSFy*PSFz; data = s_data[hh*szXY*szXY + szXY*ii + jj]; //calculating derivatives kernel_DerivativeIntGaussPSF1D(ii, thetaH1[0], thetaH1[5], thetaH1[3], PSFy, PSFz, &dudt[0], NULL); kernel_DerivativeIntGaussPSF1D(jj, thetaH1[1], thetaH1[5], thetaH1[3], PSFx, PSFz, &dudt[1], NULL); kernel_DerivativeIntGaussPSF1D(hh, thetaH1[2], thetaH1[6], thetaH1[3], PSFx, PSFy, &dudt[2], NULL); dudt[3] = PSFx*PSFy*PSFz; dudt[4] = 1.0f; kernel_DerivativeIntGaussPSF3DSigma(ii, jj, kk, thetaH1[0], thetaH1[1], thetaH1[2], thetaH1[5], thetaH1[6], thetaH1[3], PSFx, PSFy, PSFz, &dudt[5], &dudt[6], NULL, NULL); //Building the Fisher Information Matrix for (kk = 0; kk<NV_PH15; kk++)for (ll = kk; ll<NV_PH15; ll++){ M[kk*NV_PH15 + ll] += dudt[ll] * dudt[kk] / model; M[ll*NV_PH15 + kk] = M[kk*NV_PH15 + ll]; } //LogLikelyhood logModel = model / (thetaH0[0] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[0]); } //// Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV_PH15); kernel_CalcLLRProp(Diag[3], thetaH1[3], Div, LR); for (kk = 0; kk<NV_PH15; kk++) d_Parameters[kk + (NV_PH15 + NV_PH05)*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<NV_PH05; kk++) d_Parameters[(NV_PH15 + kk) + (NV_PH15 + NV_PH05)*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_P5; kk++) d_LogLikelihood[kk + Q_P5 * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk < NV_PH15; kk++) d_CRLBs[kk + NV_PH15*(BlockSize*bx + tx)] = Diag[kk]; return; } //******************************************************************************************* __global__ void kernel_MLEFitSigmaXX(const float *d_data, float PSFSigma, int sz, int iterations, float *d_Parameters, float *d_CRLBs, float *d_LogLikelihood, int Nfits){ /*! * \brief basic MLE fitting kernel. No additional parameters are computed. * \param d_data array of subregions to fit copied to GPU * \param PSFSigma sigma of the point spread function * \param sz nxn size of the subregion to fit * \param iterations number of iterations for solution to converge * \param d_Parameters array of fitting parameters to return for each subregion * \param d_CRLBs array of Cramer-Rao lower bound estimates to return for each subregion * \param d_LogLikelihood array of loglikelihood estimates to return for each subregion * \param Nfits number of subregions to fit */ //__shared__ float s_data[MEM]; float M[NV_PS2H1*NV_PS2H1], Diag[NV_PS2H1], Minv[NV_PS2H1*NV_PS2H1]; int tx = threadIdx.x; int bx = blockIdx.x; int BlockSize = blockDim.x; int ii, jj, kk, ll; float model, cf, df, data; float Div; float PSFy, PSFx; float dudt[NV_PS2H1]; float d2udt2[NV_PS2H1]; float NR_Numerator[NV_PS2H1], NR_Denominator[NV_PS2H1]; float thetaH1[NV_PS2H1]; float thetaH0[NV_PS2H0]; float maxjump[NV_PS2H1] = { 1e0f, 1e0f, 1e2f, 2e0f, 1e-1f, 1e-1f }; float g[NV_PS2H1] = { 1.0f, 1.0f, 0.5f, 1.0f, 1.0f, 1.0f }; float Nmax; float logModel; float LR[Q_PS2]; //Prevent read/write past end of array if ((bx*BlockSize + tx) >= Nfits) return; memset(M, 0, NV_PSH1*NV_PSH1*sizeof(float)); memset(Minv, 0, NV_PSH1*NV_PSH1*sizeof(float)); //load data const float *s_data = d_data + (sz*sz*bx*BlockSize + sz*sz*tx); //initial values kernel_CenterofMass2D(sz, s_data, &thetaH1[0], &thetaH1[1], 0, 0); kernel_GaussFMaxMin2D(sz, PSFSigma, s_data, &Nmax, &thetaH1[3]); thetaH1[2] = max(0.0f, (Nmax - thetaH1[3]) * 2 * pi*PSFSigma*PSFSigma); thetaH1[4] = PSFSigma; thetaH1[5] = PSFSigma; for (kk = 0; kk<iterations; kk++) {//main iterative loop //initialize memset(NR_Numerator, 0, NV_PS2H1*sizeof(float)); memset(NR_Denominator, 0, NV_PS2H1*sizeof(float)); for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], thetaH1[4]); PSFy = kernel_IntGauss1D(jj, thetaH1[1], thetaH1[5]); model = thetaH1[3] + thetaH1[2] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[0], &d2udt2[0]); kernel_DerivativeIntGauss1D(jj, thetaH1[1], thetaH1[5], thetaH1[2], PSFx, &dudt[1], &d2udt2[1]); kernel_DerivativeIntGauss1DSigma(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[4], &d2udt2[4]); kernel_DerivativeIntGauss1DSigma(jj, thetaH1[1], thetaH1[5], thetaH1[2], PSFx, &dudt[5], &d2udt2[5]); dudt[2] = PSFx*PSFy; d2udt2[2] = 0.0f; dudt[3] = 1.0f; d2udt2[3] = 0.0f; cf = 0.0f; df = 0.0f; if (model>10e-3f) cf = data / model - 1; if (model>10e-3f) df = data / pow(model, 2); cf = min(cf, 10e4f); df = min(df, 10e4f); for (ll = 0; ll<NV_PS2H1; ll++){ NR_Numerator[ll] += dudt[ll] * cf; NR_Denominator[ll] += d2udt2[ll] * cf - pow(dudt[ll], 2)*df; } } // The update for (ll = 0; ll<NV_PS2H1; ll++) thetaH1[ll] -= g[ll] * min(max(NR_Numerator[ll] / NR_Denominator[ll], -maxjump[ll]), maxjump[ll]); // Any other constraints thetaH1[2] = max(thetaH1[2], 1.0f); thetaH1[3] = max(thetaH1[3], 0.01f); thetaH1[4] = max(thetaH1[4], PSFSigma / 10.0f); thetaH1[5] = max(thetaH1[5], PSFSigma / 10.0f); } thetaH0[0] = 0.0; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { thetaH0[0] += s_data[sz*jj + ii]; } thetaH0[0] = thetaH0[0] / pow((float)sz, 2); // Calculating the CRLB and LogLikelihood Div = 0.0f; for (ii = 0; ii<sz; ii++) for (jj = 0; jj<sz; jj++) { PSFx = kernel_IntGauss1D(ii, thetaH1[0], thetaH1[4]); PSFy = kernel_IntGauss1D(jj, thetaH1[1], thetaH1[5]); model = thetaH1[3] + thetaH1[2] * PSFx*PSFy; data = s_data[sz*jj + ii]; //calculating derivatives kernel_DerivativeIntGauss1D(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[0], NULL); kernel_DerivativeIntGauss1D(jj, thetaH1[1], thetaH1[5], thetaH1[2], PSFx, &dudt[1], NULL); kernel_DerivativeIntGauss1DSigma(ii, thetaH1[0], thetaH1[4], thetaH1[2], PSFy, &dudt[4], NULL); kernel_DerivativeIntGauss1DSigma(jj, thetaH1[1], thetaH1[5], thetaH1[2], PSFx, &dudt[5], NULL); dudt[2] = PSFx*PSFy; dudt[3] = 1.0f; //Building the Fisher Information Matrix for (kk = 0; kk<NV_PS2H1; kk++)for (ll = kk; ll<NV_PS2H1; ll++){ M[kk*NV_PS2H1 + ll] += dudt[ll] * dudt[kk] / model; M[ll*NV_PS2H1 + kk] = M[kk*NV_PS2H1 + ll]; } //LogLikelyhood logModel = model / (thetaH0[0] + 1e-5); if (logModel>0 && data > 0) Div += 2 * (data*log(logModel + 1e-5) - model + thetaH0[0]); } // Matrix inverse (CRLB=F^-1) and output assigments kernel_MatInvN(M, Minv, Diag, NV_PS2H1); kernel_CalcLLRProp(Diag[2], thetaH1[2], Div, LR); //write to global arrays //for (kk = 0; kk<NV_PS2H1; kk++) // d_Parameters[Nfits*kk + (NV_PSH1 + NV_PSH0)*BlockSize*bx + tx] = thetaH1[kk]; //for (kk = 0; kk<NV_PS2H0; kk++) // d_Parameters[Nfits*(NV_PS2H1 + kk) + BlockSize*bx + tx] = thetaH0[kk]; //for (kk = 0; kk<Q_PS2; kk++) // d_LogLikelihood[Nfits*kk + BlockSize*bx + tx] = LR[kk]; //for (kk = 0; kk<NV_PS2H1; kk++) // d_CRLBs[Nfits*kk + BlockSize*bx + tx] = Diag[kk]; for (kk = 0; kk<NV_PS2H1; kk++) d_Parameters[kk + (NV_PS2H1 + NV_PS2H0)*(BlockSize*bx + tx)] = thetaH1[kk]; for (kk = 0; kk<NV_PS2H0; kk++) d_Parameters[(NV_PS2H1 + kk) + (NV_PS2H1 + NV_PS2H0)*(BlockSize*bx + tx)] = thetaH0[kk]; for (kk = 0; kk<Q_PS2; kk++) d_LogLikelihood[kk + Q_PS2 * (BlockSize*bx + tx)] = LR[kk]; for (kk = 0; kk<NV_PS2H1; kk++) d_CRLBs[kk + NV_PS2H1*(BlockSize*bx + tx)] = Diag[kk]; return; }
35b87f4de8c4e41a34ed6fbc1f765baccc7e28b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #ifndef THREADS #define THREADS 256 #endif #ifndef BLOCKS #define BLOCKS 64 #endif #define TOTAL_THREADS (THREADS * BLOCKS) #ifndef SIZE #define SIZE 32 #endif #ifndef BITS #define BITS 768 #endif #if BITS == 768 #define BASES 25 int bases[BASES] = {1073741827, 1073741831, 1073741833, 1073741839, 1073741843, 1073741857, 1073741891, 1073741909, 1073741939, 1073741953, 1073741969, 1073741971, 1073741987, 1073741993, 1073742037, 1073742053, 1073742073, 1073742077, 1073742091, 1073742113, 1073742169, 1073742203, 1073742209, 1073742223, 1073742233, 1073743327}; #else #error "Only 768 implemented" #endif #define INPUT_SIZE (BASES * SIZE * TOTAL_THREADS) #define OUTPUT_SIZE (BASES * TOTAL_THREADS) #define INPUT_STRIDE (SIZE * TOTAL_THREADS) #define OUTPUT_STRIDE (TOTAL_THREADS) __global__ void task(int *input, int *output, int items) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int ix; for (ix = tid; ix < OUTPUT_SIZE; ix += blockDim.x * gridDim.x) output[ix] = 1; for (ix = tid; ix < items; ix += blockDim.x * gridDim.x) output[tid + (ix - tid) / INPUT_STRIDE * OUTPUT_SIZE] *= input[ix] % bases[ix / INPUT_STRIDE]; } int main(int argc, char *argv[]) { dim3 numThreads(THREADS, 1), numBlocks(BLOCKS, 1); int i, j, device = 0; hipEvent_t start, stop; float msecTotal; int *gpuInput, *gpuOutput; int *cpuInput, *cpuOutput; hipError_t err; /* init events and calloc vectors (calloc to ensure padding with 0) */ hipEventCreate(&start); hipEventCreate(&stop); cpuInput = (int *)calloc(INPUT_SIZE, 1); cpuOutput = (int *)calloc(OUTPUT_SIZE, 1); /* randomly fill up input vectors: use constant seed */ srand48(42); for (i = 0; i < INPUT_SIZE; i++) input[i] = lrand48() % bases[j / INPUT_STRIDE]; /* set up vectors on device */ hipSetDevice(device); hipMalloc((void**)&gpuInput, size); hipMalloc((void**)&gpuOutput, size); /* copy vectors to device */ err = hipMemcpy(gpuInput, cpuInput, INPUT_SIZE, hipMemcpyHostToDevice); if (err != hipSuccess) printf("Copy input to device %s\n", hipGetErrorString(err)); /* GPU computation */ hipEventRecord(start, NULL); hipLaunchKernelGGL(( task), dim3(numBlocks), dim3(numThreads), 0, 0, gpuInput, gpuOutput, INPUT_SIZE); err = hipGetLastError(); if (err != hipSuccess) printf("cannot invoke task kernel! %s\n", hipGetErrorString(err)); hipDeviceSynchronize(); hipEventRecord(stop, NULL); hipEventSynchronize(stop); hipEventElapsedTime(&msecTotal, start, stop); printf("gpu time: %.3f ms\n", msecTotal); return 0; }
35b87f4de8c4e41a34ed6fbc1f765baccc7e28b3.cu
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <time.h> #ifndef THREADS #define THREADS 256 #endif #ifndef BLOCKS #define BLOCKS 64 #endif #define TOTAL_THREADS (THREADS * BLOCKS) #ifndef SIZE #define SIZE 32 #endif #ifndef BITS #define BITS 768 #endif #if BITS == 768 #define BASES 25 int bases[BASES] = {1073741827, 1073741831, 1073741833, 1073741839, 1073741843, 1073741857, 1073741891, 1073741909, 1073741939, 1073741953, 1073741969, 1073741971, 1073741987, 1073741993, 1073742037, 1073742053, 1073742073, 1073742077, 1073742091, 1073742113, 1073742169, 1073742203, 1073742209, 1073742223, 1073742233, 1073743327}; #else #error "Only 768 implemented" #endif #define INPUT_SIZE (BASES * SIZE * TOTAL_THREADS) #define OUTPUT_SIZE (BASES * TOTAL_THREADS) #define INPUT_STRIDE (SIZE * TOTAL_THREADS) #define OUTPUT_STRIDE (TOTAL_THREADS) __global__ void task(int *input, int *output, int items) { int tid = threadIdx.x + blockIdx.x * blockDim.x; int ix; for (ix = tid; ix < OUTPUT_SIZE; ix += blockDim.x * gridDim.x) output[ix] = 1; for (ix = tid; ix < items; ix += blockDim.x * gridDim.x) output[tid + (ix - tid) / INPUT_STRIDE * OUTPUT_SIZE] *= input[ix] % bases[ix / INPUT_STRIDE]; } int main(int argc, char *argv[]) { dim3 numThreads(THREADS, 1), numBlocks(BLOCKS, 1); int i, j, device = 0; cudaEvent_t start, stop; float msecTotal; int *gpuInput, *gpuOutput; int *cpuInput, *cpuOutput; cudaError_t err; /* init events and calloc vectors (calloc to ensure padding with 0) */ cudaEventCreate(&start); cudaEventCreate(&stop); cpuInput = (int *)calloc(INPUT_SIZE, 1); cpuOutput = (int *)calloc(OUTPUT_SIZE, 1); /* randomly fill up input vectors: use constant seed */ srand48(42); for (i = 0; i < INPUT_SIZE; i++) input[i] = lrand48() % bases[j / INPUT_STRIDE]; /* set up vectors on device */ cudaSetDevice(device); cudaMalloc((void**)&gpuInput, size); cudaMalloc((void**)&gpuOutput, size); /* copy vectors to device */ err = cudaMemcpy(gpuInput, cpuInput, INPUT_SIZE, cudaMemcpyHostToDevice); if (err != cudaSuccess) printf("Copy input to device %s\n", cudaGetErrorString(err)); /* GPU computation */ cudaEventRecord(start, NULL); task<<<numBlocks, numThreads>>>(gpuInput, gpuOutput, INPUT_SIZE); err = cudaGetLastError(); if (err != cudaSuccess) printf("cannot invoke task kernel! %s\n", cudaGetErrorString(err)); cudaDeviceSynchronize(); cudaEventRecord(stop, NULL); cudaEventSynchronize(stop); cudaEventElapsedTime(&msecTotal, start, stop); printf("gpu time: %.3f ms\n", msecTotal); return 0; }
887f9582490ce4938e9263c956ac1b00f70010a7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void opt_32to8Kernel(uint32_t *input, uint8_t* output, size_t length){ int idx = blockDim.x * blockIdx.x + threadIdx.x; output[idx] = (uint8_t)((input[idx] < UINT8_MAX) * input[idx]) + (input[idx] >= UINT8_MAX) * UINT8_MAX; __syncthreads(); }
887f9582490ce4938e9263c956ac1b00f70010a7.cu
#include "includes.h" __global__ void opt_32to8Kernel(uint32_t *input, uint8_t* output, size_t length){ int idx = blockDim.x * blockIdx.x + threadIdx.x; output[idx] = (uint8_t)((input[idx] < UINT8_MAX) * input[idx]) + (input[idx] >= UINT8_MAX) * UINT8_MAX; __syncthreads(); }
a919051edfcc01942d361d4333380be617b9df0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2016 Tommi M. Tykkl Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdio.h> #include <image2/Image2.h> #include <image2/ImagePyramid2.h> //#include <types.h> #include <hostUtils.h> #include <calib/calib.h> #include <rendering/VertexBuffer2.h> #include <math.h> #include <cwchar> #include <math.h> #include <assert.h> #include <helper_cuda.h> #include <tracker/basic_math.h> //#include <cpp_type_traits.h> using namespace std; namespace warputils { #include "expmCuda.h" #include "f2cCuda.h" #include "kernelUtils.h" #include "reduction_kernel.cu" #include "expmCuda.cu" #define SIZE 6 typedef doublereal CHOLMAT[SIZE][SIZE]; typedef doublereal CHOLVEC[SIZE]; #include "cholesky.cu" } using namespace warputils; texture<float, 2, hipReadModeElementType> texC; __global__ void collectPointsIntoImageKernel(int *iDataSrc, float *vDataSrc, float *Tsrc, int skipper, float *vDataDst, float *Tdst, int width, int height,float *calibDataDev, int stride) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int idxStride = iDataSrc[idx]*stride; float3 p3,r3; p3.x = vDataSrc[idxStride+0]; p3.y = vDataSrc[idxStride+1]; p3.z = vDataSrc[idxStride+2]; float TsrcInv[16],T[16]; invertRT4Cuda(Tsrc,TsrcInv); matrixMult4(Tdst,TsrcInv,T); matrixMultVec4(T, p3, r3); float2 p2,pp; p2.x = r3.x / r3.z; p2.y = r3.y / r3.z; float *K = &calibDataDev[KR_OFFSET]; pp.x = K[0]*p2.x+K[2]; pp.y = K[4]*p2.y+K[5]; unsigned int xi = (unsigned int)(pp.x); unsigned int yi = (unsigned int)(pp.y); if ((xi < width) && (yi < height)) { int offset = (xi + yi * width)*stride; vDataDst[offset+0] = r3.x; vDataDst[offset+1] = r3.y; vDataDst[offset+2] = r3.z; for (int i = 3; i < stride; i++) { vDataDst[offset+i] = vDataSrc[idxStride+i]; } } } __global__ void collectPointsKernel(int *iDataSrc, float *vDataSrc, float *Tsrc, int skipper, float *vDataDst, float *Tdst, int stride) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int srcIndex = idx * skipper; int idxStrideSrc = iDataSrc[srcIndex]*stride; int idxStrideDst = idx*stride; float3 p3,r3; p3.x = vDataSrc[idxStrideSrc+0]; p3.y = vDataSrc[idxStrideSrc+1]; p3.z = vDataSrc[idxStrideSrc+2]; float TsrcInv[16],T[16]; invertRT4Cuda(Tsrc,TsrcInv); matrixMult4(Tdst,TsrcInv,T); matrixMultVec4(T, p3, r3); vDataDst[idxStrideDst+0] = r3.x; vDataDst[idxStrideDst+1] = r3.y; vDataDst[idxStrideDst+2] = r3.z; vDataDst[idxStrideDst+3] = vDataSrc[idxStrideSrc+3]; vDataDst[idxStrideDst+4] = vDataSrc[idxStrideSrc+4]; vDataDst[idxStrideDst+5] = vDataSrc[idxStrideSrc+5]; vDataDst[idxStrideDst+6] = 0; vDataDst[idxStrideDst+7] = 0; vDataDst[idxStrideDst+8] = vDataSrc[idxStrideSrc+8]; vDataDst[idxStrideDst+9] = vDataSrc[idxStrideSrc+9]; vDataDst[idxStrideDst+10] = vDataSrc[idxStrideSrc+10]; vDataDst[idxStrideDst+11] = vDataSrc[idxStrideSrc+11]; vDataDst[idxStrideDst+12] = vDataSrc[idxStrideSrc+12]; vDataDst[idxStrideDst+13] = vDataSrc[idxStrideSrc+13]; vDataDst[idxStrideDst+14] = vDataSrc[idxStrideSrc+14];; vDataDst[idxStrideDst+15] = vDataSrc[idxStrideSrc+15]; vDataDst[idxStrideDst+16] = vDataSrc[idxStrideSrc+16]; vDataDst[idxStrideDst+17] = vDataSrc[idxStrideSrc+17]; vDataDst[idxStrideDst+18] = vDataSrc[idxStrideSrc+18]; vDataDst[idxStrideDst+19] = vDataSrc[idxStrideSrc+19]; vDataDst[idxStrideDst+20] = vDataSrc[idxStrideSrc+20]; } // no need to check screen bounds here (only opengl vertices) __global__ void warpPointsKernel(int *iData, float *vData, float *weightsDev, float *T, float *calibDataDev, float *scratchPtr, float *imgData1, float *imgData2, float *imgData3, int width, int srcStride, int targetStride, int rgbOffset) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*srcStride; // indices to src and target vertices in the baseBuffer int dstIdx1 = (10000+idx)*targetStride; int dstIdx2 = (10000+320*240+idx)*targetStride; float3 p3,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3); // project r3 into screenspace for obtaining pixel coordinates float *KR = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float2 pu,p1_1,p2_1;//,p2_2,p2_3; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p2_1); // generate reference point also pu.x = p3.x/p3.z; pu.y = p3.y/p3.z; distortPoint(pu,kc,KR,p1_1); //// FETCH current depth, intensity1, intensity2, intensity3 for KEYFRAME update! /* // compute low-resolution coordinates float a = 0.5f; float b = -0.25f; p2_2.x = a*p2_1.x + b; p2_2.y = a*p2_1.y + b; p2_3.x = a*p2_2.x + b; p2_3.y = a*p2_2.y + b; float color1 = 0, color2 = 0, color3 = 0; int xdi = 0, ydi = 0; float fracX = 0.0f, fracY = 0.0f; xdi = (int)p2_1.x; ydi = (int)p2_1.y; fracX = p2_1.x - xdi; fracY = p2_1.y - ydi; bilinearInterpolation(xdi, ydi, fracX, fracY, width, imgData1, color1); // bilinearInterpolation(xdi, ydi, fracX, fracY, width, depthData1, depth1); // TODO // determine inv(T) at this point! (compute in every thread?) transpose + vector multiplication is piece of cake // reconstruct 3D point + intensity in all 3 layers // map it back to reference using inv(T) // IIR 3D point + intensity // effect -> grid is lost, but consistency maintained? xdi = (int)p2_2.x; ydi = (int)p2_2.y; fracX = p2_2.x - xdi; fracY = p2_2.y - ydi; bilinearInterpolation(xdi, ydi, fracX, fracY, width/2, imgData2, color2); xdi = (int)p2_3.x; ydi = (int)p2_3.y; fracX = p2_3.x - xdi; fracY = p2_3.y - ydi; bilinearInterpolation(xdi, ydi, fracX, fracY, width/4, imgData3, color3); */ float w = weightsDev[idx]; scratchPtr[dstIdx1+0] = p1_1.x;//vData[idxStride+6]; scratchPtr[dstIdx1+1] = p1_1.y;//vData[idxStride+7]; scratchPtr[dstIdx1+2] = 0.0f; scratchPtr[dstIdx1+rgbOffset+0] = 1.0f - w; scratchPtr[dstIdx1+rgbOffset+1] = w; scratchPtr[dstIdx1+rgbOffset+2] = 0.0f; // float maxDist = calibDataDev[MAXD_OFFSET]; if (w > 0) { scratchPtr[dstIdx2+0] = p2_1.x; scratchPtr[dstIdx2+1] = p2_1.y; } else { scratchPtr[dstIdx2+0] = -1000.0f; scratchPtr[dstIdx2+1] = -1000.0f; } scratchPtr[dstIdx2+2] = 0.0f; scratchPtr[dstIdx2+rgbOffset+0] = 1.0f - w; scratchPtr[dstIdx2+rgbOffset+1] = w; scratchPtr[dstIdx2+rgbOffset+2] = 0.0f; } __global__ void warpBaseKernel(float *vData, float *T, int emptyVertexSlot, int stride, int rgbOffset) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int idxStrideSrc = idx*stride; int idxStrideDst = (idx+6)*stride; float3 p3,r3; p3.x = vData[idxStrideSrc+0]; p3.y = vData[idxStrideSrc+1]; p3.z = vData[idxStrideSrc+2]; float Tinv[16]; invertRT4Cuda(T, Tinv); matrixMultVec4(Tinv, p3, r3); vData[idxStrideDst+0] = r3.x; // target point x vData[idxStrideDst+1] = r3.y; // target point y vData[idxStrideDst+2] = r3.z; // target point z // add new line segment as extra job :) // avoids yet another lock/unlock with additional vbuffer if (idx == 0) { int previousSlot = emptyVertexSlot - 1; if (previousSlot < 0) previousSlot = 0; float px = vData[previousSlot*stride+0]; float py = vData[previousSlot*stride+1]; float pz = vData[previousSlot*stride+2]; vData[emptyVertexSlot*stride+0] = px; vData[emptyVertexSlot*stride+1] = py; vData[emptyVertexSlot*stride+2] = pz; vData[emptyVertexSlot*stride+rgbOffset+0] = 1; vData[emptyVertexSlot*stride+rgbOffset+1] = 0; vData[emptyVertexSlot*stride+rgbOffset+2] = 0; vData[(emptyVertexSlot+1)*stride+0] = r3.x; vData[(emptyVertexSlot+1)*stride+1] = r3.y; vData[(emptyVertexSlot+1)*stride+2] = r3.z; vData[(emptyVertexSlot+1)*stride+rgbOffset+0] = 1; vData[(emptyVertexSlot+1)*stride+rgbOffset+1] = 0; vData[(emptyVertexSlot+1)*stride+rgbOffset+2] = 0; } } __global__ void interpolateResidualKernel2(int *iData, float *vData, float *T, float *calibDataDev, float a, float b, int refColorOffset, float *imgData, int width, int height, float *zCurrentDev, float *zWeightsDev, float *residual, int srcStride, int zwidth, int zheight) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*srcStride; float *KR = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float *TLR = &calibDataDev[TLR_OFFSET]; float3 p3,r3_ir,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3_ir); // reference IR -> current IR matrixMultVec4(TLR, r3_ir, r3); // current IR -> current RGB float2 pu,p2; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p2); // resolution tweak: float2 p; p.x = a*p2.x + b; p.y = a*p2.y + b; float iResidual = 1.0f; // set max residual value for points outside fov int xdi = (int)p.x; int ydi = (int)p.y; float zWeight = 0.0f; if (xdi >= 0 && ydi >= 0 && xdi < width-1 && ydi < height-1) { float fx = p.x - xdi; float fy = p.y - ydi; float color = 0; bilinearInterpolation(xdi, ydi, fx, fy, width, imgData, color); iResidual = vData[idxStride+refColorOffset] - color; // residual range [-1,1] // fetch depth coordinate from vertex buffer (offset runs over IR image) float *KL = &calibDataDev[KL_OFFSET]; float *TRL = &calibDataDev[TRL_OFFSET]; float3 rl3,pl2; matrixMultVec4(TRL, r3, rl3); // current RGB -> current IR rl3.x /= rl3.z; rl3.y /= rl3.z; rl3.z = 1; // normalize matrixMultVec3(KL, rl3, pl2); // project to image space int xdi2 = (int)(pl2.x+0.5f); // nearest point sample in IR view int ydi2 = (int)(pl2.y+0.5f); if (xdi2 >= 0 && ydi2 >= 0 && xdi2 < zwidth && ydi2 < zheight) { int offset = xdi2 + ydi2*zwidth; float zcur = zCurrentDev[offset]; float zerr = zcur-r3.z; zerr *= zerr; if (zerr < 100*100) { zWeight = 1.0f-zerr/(100.0f*100.0f); } } } residual[idx] = iResidual; zWeightsDev[idx] = zWeight; } __global__ void interpolateResidualKernel(int *iData, float *vData, float *T, float *calibDataDev, float a, float b, int refColorOffset, float *imgData, int width, int height, float *vDataCur, int zwidth, int zheight, float *residual, float *zWeights, int srcStride, int dstStride) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*srcStride; float *KR = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float3 p3,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3); // reference RGB -> current RGB float2 pu,p2; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p2); // resolution tweak: float2 p; p.x = a*p2.x + b; p.y = a*p2.y + b; float zWeight = 0.0f; float iResidual = 1.0f; // set max residual value for points outside fov int xdi = (int)p.x; int ydi = (int)p.y; if (xdi >= 0 && ydi >= 0 && xdi < width-1 && ydi < height-1) { float fx = p.x - xdi; float fy = p.y - ydi; float color = 0; bilinearInterpolation(xdi, ydi, fx, fy, width, imgData, color); iResidual = vData[idxStride+refColorOffset] - color; // residual range [-1,1] // fetch depth coordinate from vertex buffer (offset runs over IR image) float *KL = &calibDataDev[KL_OFFSET]; float *TRL = &calibDataDev[TRL_OFFSET]; float3 rl3,pl2; matrixMultVec4(TRL, r3, rl3); // current RGB -> current IR rl3.x /= rl3.z; rl3.y /= rl3.z; rl3.z = 1; // normalize matrixMultVec3(KL, rl3, pl2); // project to image space int xdi2 = (int)(pl2.x+0.5f); // nearest point sample in IR view int ydi2 = (int)(pl2.y+0.5f); if (xdi2 >= 0 && ydi2 >= 0 && xdi2 < zwidth && ydi2 < zheight) { int offset = xdi2 + ydi2*zwidth; float zcur = vDataCur[offset*dstStride+2]; float zerr = zcur-r3.z; zerr *= zerr; if (zerr < 300*300) { zWeight = 1.0f-zerr/(300.0f*300.0f); } } } residual[idx] = iResidual; zWeights[idx] = zWeight; } __global__ void filterDepthIIRKernel(int *iData, float *vData, float *T, float *calibDataDev, int width, int height, float *vDataCur, float *weightsDev, float weightThreshold, int stride) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*stride; // do not average depth values if M-estimator weight is too small if (weightsDev[idx] < weightThreshold) return; float *KR = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float3 p3,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3); // reference RGB -> current RGB float2 pu,p; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p); int xdi = (int)p.x; int ydi = (int)p.y; if (xdi >= 0 && ydi >= 0 && xdi < width-1 && ydi < height-1) { // fetch depth coordinate from vertex buffer (offset runs over IR image) float *KL = &calibDataDev[KL_OFFSET]; float *TRL = &calibDataDev[TRL_OFFSET]; float3 rl3,pl2; matrixMultVec4(TRL, r3, rl3); // current RGB -> current IR rl3.x /= rl3.z; rl3.y /= rl3.z; rl3.z = 1; // normalize matrixMultVec3(KL, rl3, pl2); // project to image space int xdi2 = (int)(pl2.x+0.5f); // nearest point sample in IR view int ydi2 = (int)(pl2.y+0.5f); if (xdi2 >= 0 && ydi2 >= 0 && xdi2 < width && ydi2 < height) { int offset = xdi2 + ydi2*width; float3 pc,pr,ray; // pc is in RGB frame of the current view pc.x = vDataCur[offset*stride+0]; pc.y = vDataCur[offset*stride+1]; pc.z = vDataCur[offset*stride+2]; // generate mapping from current to reference float iT[16]; invertRT4Cuda(&T[0],&iT[0]); // map current point to reference matrixMultVec4(iT, pc, pr); // current RGB -> reference RGB // generate a ray from current origin towards p3 float len = sqrtf(p3.x*p3.x+p3.y*p3.y+p3.z*p3.z); ray.x = p3.x / len; ray.y = p3.y / len; ray.z = p3.z / len; // project pr to ray float rayProj = pr.x*ray.x+pr.y*ray.y+pr.z*ray.z; ray.x *= rayProj; ray.y *= rayProj; ray.z *= rayProj; // compute orthogonal displacement from ray /* float3 dp; dp.x = pr.x - ray.x; dp.y = pr.y - ray.y; dp.z = pr.z - ray.z;*/ // determine squared length //float rayDist2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; // compute weight based on ray distance float distWeight = 0.9f;//__expf(-xyDist/50.0f+1e-5f); vData[idxStride+2] = p3.z*0.9f*(1.0f-distWeight) + 0.1f*pr.z*distWeight; } } } __global__ void compressVertexBufferKernel(int *iDataSrc, float *vDataSrc, int *iDataDst, float *vDataDst, int srcStride, int dstStride, bool rgbVisualization) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int idxStrideSrc = iDataSrc[idx]*srcStride; // identity mapping iDataDst[idx] = idx; int idxStrideDst = idx*dstStride; if (srcStride == dstStride) { for (int i = 0; i < srcStride; i++) { vDataDst[idxStrideDst+i] = vDataSrc[idxStrideSrc+i]; } } else if (srcStride == VERTEXBUFFER_STRIDE && dstStride == COMPRESSED_STRIDE){ vDataDst[idxStrideDst+0] = vDataSrc[idxStrideSrc+0]; vDataDst[idxStrideDst+1] = vDataSrc[idxStrideSrc+1]; vDataDst[idxStrideDst+2] = vDataSrc[idxStrideSrc+2]; vDataDst[idxStrideDst+3] = vDataSrc[idxStrideSrc+3]; vDataDst[idxStrideDst+4] = vDataSrc[idxStrideSrc+4]; vDataDst[idxStrideDst+5] = vDataSrc[idxStrideSrc+5]; if (!rgbVisualization) { vDataDst[idxStrideDst+6] = vDataSrc[idxStrideSrc+14]; vDataDst[idxStrideDst+7] = vDataSrc[idxStrideSrc+17]; vDataDst[idxStrideDst+8] = vDataSrc[idxStrideSrc+20]; } else { vDataDst[idxStrideDst+6] = vDataSrc[idxStrideSrc+8]; vDataDst[idxStrideDst+7] = vDataSrc[idxStrideSrc+9]; vDataDst[idxStrideDst+8] = vDataSrc[idxStrideSrc+10]; } } } __global__ void precomputeJacobian4Kernel(int *iData, float *vData, float *calibDataDev, int vectorLength, float *jacobian1, float *jacobian2, float *jacobian3, float *jacobian4, int stride, float optScaleIn) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*stride; float *K = &calibDataDev[KR_OFFSET]; float *T = &calibDataDev[TLR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float3 p3,r3; float3 dp2,dd2,dpn2; dd2.z = 0; dpn2.z = 0; float3 dp3,dr3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T,p3,r3); // input points can be scaled without altering perspective projection // because it is useful to have uniform magnitudes during optimization, unit scaling is supported r3.x *= optScaleIn; r3.y *= optScaleIn; r3.z *= optScaleIn; float gradX1 = vData[idxStride+11]; float gradY1 = vData[idxStride+12]; float gradX2 = vData[idxStride+15]; float gradY2 = vData[idxStride+16]; float gradX3 = vData[idxStride+18]; float gradY3 = vData[idxStride+19]; float gradX4 = vData[idxStride+21]; float gradY4 = vData[idxStride+22]; // A[0] = 0; A[1] = -x[2]; A[2] = x[1]; A[3] = x[3]; // A[4] = x[2];A[5] = 0; A[6] =-x[0]; A[7] = x[4]; // A[8] =-x[1];A[9] = x[0]; A[10] = 0; A[11] = x[5]; // A[12] = 0; A[13] = 0; A[14] = 0; A[15] = 0; float dN[6]; dN[0] = 1.0f/r3.z; dN[1] = 0; dN[2] = -r3.x/(r3.z*r3.z); dN[3] = 0; dN[4] = 1.0f/r3.z; dN[5] = -r3.y/(r3.z*r3.z); float x = r3.x/r3.z; float y = r3.y/r3.z; float x2 = x*x; float y2 = y*y; float x4 = x2*x2; float y4 = y2*y2; float r2 = x2+y2; float r4 = r2*r2; float dD[4]; dD[0] = 1 + kc[0]*(3*x2+y2) + kc[1]*(5*x4+6*x2*y2+y4) + kc[4]*r4*(7*x2+y2); dD[1] = kc[0]*2*x*y + kc[1]*4*x*y*r2 + kc[4]*6*x*y*r4; dD[2] = kc[0]*2*y*x + kc[1]*4*x*y*r2 + kc[4]*6*x*y*r4; dD[3] = 1 + kc[0]*(3*x2+y2) + kc[1]*(5*x4+6*x2*y2+y4) + kc[4]*r4*(7*y2+x2); // param1 dp3.x = 0.0f; dp3.y =-r3.z; dp3.z = r3.y; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*0 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*0 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*0 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*0 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; // param2 dp3.x = r3.z; dp3.y = 0.0f; dp3.z =-r3.x; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*1 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*1 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*1 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*1 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; // param3 dp3.x =-r3.y; dp3.y = r3.x; dp3.z = 0.0f; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*2 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*2 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*2 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*2 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; // param4 dp3.x = 1.0f; dp3.y = 0.0f; dp3.z = 0.0f; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*3 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*3 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*3 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*3 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; // param5 dp3.x = 0.0f; dp3.y = 1.0f; dp3.z = 0.0f; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*4 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*4 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*4 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*4 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; // param6 dp3.x = 0.0f; dp3.y = 0.0f; dp3.z = 1.0f; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*5 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*5 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*5 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*5 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; } __global__ void precomputeJacobianKernel(int *iData, float *vData, float *calibDataDev, int vectorLength, float *jacobian1, float *jacobian2, float *jacobian3, int stride, float optScaleIn) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*stride; float *K = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float3 p3; float3 dp3,dp2,dd2,dpn2; dd2.z = 0; dpn2.z = 0; // input points can be scaled without altering perspective projection // because it is useful to have uniform magnitudes during optimization, unit scaling is supported p3.x = vData[idxStride+0]*optScaleIn; p3.y = vData[idxStride+1]*optScaleIn; p3.z = vData[idxStride+2]*optScaleIn; float gradX1 = vData[idxStride+11]; float gradY1 = vData[idxStride+12]; float gradX2 = vData[idxStride+15]; float gradY2 = vData[idxStride+16]; float gradX3 = vData[idxStride+18]; float gradY3 = vData[idxStride+19]; // A[0] = 0; A[1] = -x[2]; A[2] = x[1]; A[3] = x[3]; // A[4] = x[2];A[5] = 0; A[6] =-x[0]; A[7] = x[4]; // A[8] =-x[1];A[9] = x[0]; A[10] = 0; A[11] = x[5]; // A[12] = 0; A[13] = 0; A[14] = 0; A[15] = 0; float dN[6]; dN[0] = 1.0f/p3.z; dN[1] = 0; dN[2] = -p3.x/(p3.z*p3.z); dN[3] = 0; dN[4] = 1.0f/p3.z; dN[5] = -p3.y/(p3.z*p3.z); float x = p3.x/p3.z; float y = p3.y/p3.z; float x2 = x*x; float y2 = y*y; float x4 = x2*x2; float y4 = y2*y2; float r2 = x2+y2; float r4 = r2*r2; float dD[4]; dD[0] = 1 + kc[0]*(3*x2+y2) + kc[1]*(5*x4+6*x2*y2+y4) + kc[4]*r4*(7*x2+y2); dD[1] = kc[0]*2*x*y + kc[1]*4*x*y*r2 + kc[4]*6*x*y*r4; dD[2] = kc[0]*2*y*x + kc[1]*4*x*y*r2 + kc[4]*6*x*y*r4; dD[3] = 1 + kc[0]*(3*x2+y2) + kc[1]*(5*x4+6*x2*y2+y4) + kc[4]*r4*(7*y2+x2); // param1 dp3.x = 0.0f; dp3.y =-p3.z; dp3.z = p3.y; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*0 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*0 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*0 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; // param2 dp3.x = p3.z; dp3.y = 0.0f; dp3.z =-p3.x; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*1 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*1 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*1 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; // param3 dp3.x =-p3.y; dp3.y = p3.x; dp3.z = 0.0f; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*2 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*2 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*2 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; // param4 dp3.x = 1.0f; dp3.y = 0.0f; dp3.z = 0.0f; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*3 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*3 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*3 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; // param5 dp3.x = 0.0f; dp3.y = 1.0f; dp3.z = 0.0f; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*4 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*4 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*4 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; // param6 dp3.x = 0.0f; dp3.y = 0.0f; dp3.z = 1.0f; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*5 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*5 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*5 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; } __global__ void weightJacobianKernel(float *jacobian, float *weights, int vectorLength, float *weightedJacobian) { int idx = blockIdx.x*blockDim.x+threadIdx.x; float w = weights[idx]; weightedJacobian[vectorLength*0 + idx] = jacobian[vectorLength*0 + idx] * w; weightedJacobian[vectorLength*1 + idx] = jacobian[vectorLength*1 + idx] * w; weightedJacobian[vectorLength*2 + idx] = jacobian[vectorLength*2 + idx] * w; weightedJacobian[vectorLength*3 + idx] = jacobian[vectorLength*3 + idx] * w; weightedJacobian[vectorLength*4 + idx] = jacobian[vectorLength*4 + idx] * w; weightedJacobian[vectorLength*5 + idx] = jacobian[vectorLength*5 + idx] * w; } __global__ void elementwiseMultKernel(float *vecA, float *vecB, float *result) { int idx = blockIdx.x*blockDim.x+threadIdx.x; result[idx] = vecA[idx]*vecB[idx]; } __global__ void sumElemsKernel2(float *blockScratch, int nblocks, float *resultA, float *resultB) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx == 0) { float sumElems = 0.0f; for (int i = 0; i < nblocks; i++) { sumElems += blockScratch[i]; } resultA[0] = sumElems; if (resultB != NULL) resultB[0] = sumElems; } } __global__ void sumElemsKernel(float *blockScratch, int nblocks, float *resultA) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx == 0) { float sumElems = 0.0f; for (int i = 0; i < nblocks; i++) { sumElems += blockScratch[i]; } resultA[0] = sumElems; } } __global__ void matrixMult4Kernel(float *A, float *B, float *C) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx == 0) { matrixMult4(A,B,C); } } __global__ void matrixMult4NormalizedKernel(float *A, float *B, float *C) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx == 0) { matrixMult4(A,B,C); normalizeMat4(C); } } __global__ void invertMatrix4Kernel(float *A, float *iA, int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < N) { invertRT4Cuda(A+idx*16,iA+idx*16); } } __global__ void convertToAxisAngleKernel(float *A, float *posAxisAngle, int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < N) { float m[16]; invertRT4Cuda(A+idx*16,m); float q[4]; rot2QuaternionCuda(m,q); float axisAngle[4]; quaternion2AxisAngleCuda(q,axisAngle); posAxisAngle[idx*7+0] = m[3]; posAxisAngle[idx*7+1] = m[7]; posAxisAngle[idx*7+2] = m[11]; posAxisAngle[idx*7+3] = axisAngle[0]; posAxisAngle[idx*7+4] = axisAngle[1]; posAxisAngle[idx*7+5] = axisAngle[2]; posAxisAngle[idx*7+6] = axisAngle[3]; // also save pose matrices for debugging for (int i = 0; i < 16; i++) A[idx*16+i] = m[i]; } } __global__ void filterPoseKernel(float *posAxisAngle, float *weightsDev, int N, float *invT) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx != 0) return; float avg[7],weightSum = 1e-7f; for (int j = 0; j < 7; j++) { avg[j] = 0; } for (int i = 0; i < N; i++) { for (int j = 0; j < 7; j++) { avg[j] += posAxisAngle[i*7+j]*weightsDev[i]; } weightSum += weightsDev[i]; } for (int j = 0; j < 7; j++) avg[j] = avg[j]/weightSum; // normalize rotation axis // float len = sqrtf(avg[3]*avg[3]+avg[4]*avg[4]+avg[5]*avg[5]+1e-7f); // avg[3] /= len; // avg[4] /= len; // avg[5] /= len; float T[16]; axisAngle2RotCuda(&avg[3],T); T[3] = avg[0]; T[7] = avg[1]; T[11] = avg[2]; invertRT4Cuda(T,invT); /* float T[16]; axisAngle2RotCuda(&posAxisAngle[3],T); T[3] = posAxisAngle[0]; T[7] = posAxisAngle[1]; T[11] = posAxisAngle[2]; invertRT4Cuda(T,invT); */ /* float q[4],T[16]; quaternion2RotCuda(&posAxisAngle[3],T); T[3] = posAxisAngle[0]; T[7] = posAxisAngle[1]; T[11] = posAxisAngle[2]; invertRT4Cuda(T,invT);*/ } __device__ doublereal dotProduct6(doublereal *a, doublereal *b) { doublereal dot = 0; for (int i = 0; i < 6; i++) dot += a[i]*b[i]; return dot; } __device__ void matrixMultVec6(doublereal *A, doublereal *x, doublereal *r) { for (int i = 0; i < 6; i++) r[i] = (doublereal)0.0; for (int j = 0; j < 6; j++) { for (int k = 0; k < 6; k++) { r[j] += A[j*6+k]*x[k]; } } } __device__ void generateA(doublereal *x, doublereal *A) { A[0] = 0; A[1] = -x[2]; A[2] = x[1]; A[3] = x[3]; A[4] = x[2];A[5] = 0; A[6] =-x[0]; A[7] = x[4]; A[8] =-x[1];A[9] = x[0]; A[10] = 0; A[11] = x[5]; A[12] = 0; A[13] = 0; A[14] = 0; A[15] = 0; } // TODO: normalize N out from quantities! __global__ void linearFuseKernel(float *JtJDevExt, float *residual6DevExt, float weight1, float iN1, float *JtJDev, float *residual6Dev, float weight2, float iN2) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < 6) { residual6DevExt[idx] = iN1*weight1*residual6DevExt[idx]+iN2*weight2*residual6Dev[idx]; } JtJDevExt[idx] = iN1*weight1*JtJDevExt[idx]+iN2*weight2*JtJDev[idx]; } __global__ void conjugateGradientKernel(float *JtJDev, float *bb, doublereal tol, int maxSteps, doublereal *ADev) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx != 0) return; doublereal A[6*6]; doublereal x[6]; doublereal r[6]; doublereal b[6]; doublereal dir[6]; // copy matrix to local memory for speeding up access for (int i = 0; i < 36; i++) A[i] = (doublereal)JtJDev[i]; //for (int i = 0; i < 6; i++) A[i+i*6] += (doublereal)1e-8; for (int i = 0; i < 6; i++) { b[i] = (doublereal)bb[i]; x[i] = 0.0; r[i] = b[i]; dir[i] = b[i]; } int nSteps = 0; while (nSteps < maxSteps) { doublereal Adir[6]; matrixMultVec6(A,dir,Adir); //step length doublereal rr = dotProduct6(r,r); doublereal Adirdir = dotProduct6(Adir,dir); // compute abs(Adirdir), its numerically more stable than |Adirdir|: doublereal div = Adirdir; if (div < 0) div = -div; doublereal stepLength = 0.0; // prevent division by zero: if (div > tol) stepLength = rr/Adirdir; // update error: for (int i = 0; i < 6; i++) { r[i] -= stepLength*Adir[i]; } doublereal rr2 = dotProduct6(r,r); /* // early exit with previous x, (minimization step failed!) if (rr2 > rr) { generateA(x,ADev); return; } */ // update params: for (int i = 0; i < 6; i++) { x[i] += stepLength*dir[i];} // early exit, residual is below a threshold: if (sqrt(rr2) < tol) { generateA(x,ADev); return; } doublereal beta = rr2/rr; for (int i = 0; i < 6; i++) { dir[i] = r[i] + beta*dir[i]; } nSteps++; } generateA(x,ADev); } // only one block __global__ void choleskyKernel(float *JtJDev, float *bb, doublereal *ADev) { unsigned int idxI = threadIdx.x; unsigned int idxJ = threadIdx.y; __shared__ doublereal iA[SIZE][SIZE]; __shared__ doublereal B[SIZE]; // __shared__ float x[6]; bool firstThread = (idxI == 0 && idxJ == 0); // load data into local memory iA[idxJ][idxI] = (doublereal)JtJDev[idxJ*6+idxI]; if (idxJ == 0) B[idxI] = (doublereal)bb[idxI]; __syncthreads(); // single thread only: if (firstThread) { CHOLVEC P; // cholesky decomposition choldc1(6, iA,P); choldcsl2(6,iA,P); choleskyInverse(6,iA); } __syncthreads(); __shared__ doublereal x[6]; if (idxJ == 0) { x[idxI] = iA[idxI][0] * B[0] + iA[idxI][1] * B[1] + iA[idxI][2] * B[2] + iA[idxI][3] * B[3] + iA[idxI][4] * B[4] + iA[idxI][5] * B[5]; } __syncthreads(); // fill A(x) elements if (firstThread) generateA(x,ADev); } /* __global__ void dotProductKernel(float *vecA, float *vecB, int nblocks, float *blockScratch, float *result) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (threadIdx.x > 1023) return; if (idx >= nblocks*1024) return; __shared__ float sharedMem[1024]; sharedMem[threadIdx.x] = vecA[idx]*vecB[idx]; for(uint stride = 512; stride > 0; stride >>= 1) { __syncthreads(); if(threadIdx.x < stride) sharedMem[threadIdx.x] += sharedMem[threadIdx.x + stride]; } if (threadIdx.x == 0) blockScratch[blockIdx.x] = sharedMem[0]; __syncthreads(); // BUG here: blockSums might not be updated yet, cuda doesn't support block synchronization, only threads if(idx == 0) { float dotSum = 0.0f; for (int i = 0; i < nblocks; i++) dotSum += blockScratch[i]; result[0] = dotSum; } } */ extern "C" void warpBase(VertexBuffer2 *vbuffer,float *T) { if (vbuffer == NULL || T == NULL || vbuffer->devPtr == NULL) return; if (vbuffer->getVertexCount() >= 10014) { /*printf("linebuffer ran out!\n");*/ return; } float *vData = (float*)vbuffer->devPtr; int targetStride = vbuffer->getStride(); int rgbOffset = 0; if (targetStride == VERTEXBUFFER_STRIDE) { rgbOffset = 8; } else if (targetStride == BASEBUFFER_STRIDE) { rgbOffset = 3; } int freeVertex = vbuffer->getVertexCount(); dim3 cudaBlockSize(6,1,1); dim3 cudaGridSize(1,1,1); hipLaunchKernelGGL(( warpBaseKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,vbuffer->cudaStream, vData, T, freeVertex,targetStride,rgbOffset); // printf("new vertex amount : %d\n",vbuffer->getVertexCount()+2); vbuffer->setVertexAmount(vbuffer->getVertexCount()+2); checkCudaError("warpBase error"); } extern "C" void warpPoints(VertexBuffer2 *vbuffer, float *weightsDev, float *T, float *calibDataDev, VertexBuffer2 *baseBuf, ImagePyramid2 *grayPyramid) { if (vbuffer == NULL || T == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || calibDataDev == NULL || weightsDev == NULL || baseBuf == NULL || baseBuf->devPtr == NULL || grayPyramid == NULL) return; float *imgData[3]; assert(grayPyramid->nLayers == 3); for (int i = 0; i < 3; i++) { imgData[i] = (float*)grayPyramid->getImageRef(i).devPtr; if (imgData[i] == NULL) { printf("warpPoints error: grayPyramid layer %d not locked! panik exit \n",i); return; } } int targetStride = baseBuf->getStride(); int rgbOffset = 0; if (targetStride == VERTEXBUFFER_STRIDE) { rgbOffset = 8; } else if (targetStride == BASEBUFFER_STRIDE) { rgbOffset = 3; } // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("warp points: vbuffer has wrong number of selected pixels!\n"); return; } float *vData = (float*)vbuffer->devPtr; int *iData = (int*)vbuffer->indexDevPtr; float *dstData = (float*)baseBuf->devPtr; dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( warpPointsKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,vbuffer->cudaStream, iData,vData,weightsDev,T,calibDataDev,dstData, imgData[0],imgData[1],imgData[2],grayPyramid->getImageRef(0).width,vbuffer->getStride(),targetStride,rgbOffset); checkCudaError("warpPoints error"); } extern "C" void interpolateResidual(VertexBuffer2 *vbuffer, VertexBuffer2 *vbufferCur, float *T, float *calibDataDev, ImagePyramid2 &grayPyramid, int layer, float *residual, float *zWeightsDev) { if (vbuffer == NULL || vbufferCur == NULL || T == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || vbufferCur->devPtr == NULL || calibDataDev == NULL || residual == NULL || zWeightsDev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("interpolateResidual: vbuffer has wrong number of selected pixels!\n"); return; } float *imgData = (float*)grayPyramid.getImageRef(layer).devPtr; hipArray *cArray = (hipArray*)grayPyramid.getImageRef(layer).cArray; if (imgData == NULL/* || cArray == NULL*/) { printf("given image does not have data allocated!\n"); return; } /* // set texture parameters texC.addressMode[0] = hipAddressModeClamp; texC.addressMode[1] = hipAddressModeClamp; texC.filterMode = hipFilterModeLinear; texC.normalized = false; // bind the array to the texture hipBindTextureToArray(texC, cArray); */ int srcStride = vbuffer->getStride(); int dstStride = vbufferCur->getStride(); int width = grayPyramid.getImageRef(layer).width; int height = grayPyramid.getImageRef(layer).height; float *vData = (float*)vbuffer->devPtr; float *vDataCur = (float*)vbufferCur->devPtr; int *iData = (int*)vbuffer->indexDevPtr; int colorOffset = 0; if (srcStride == VERTEXBUFFER_STRIDE) { colorOffset = 14; if (layer == 1) { colorOffset = 17; } else if (layer == 2) { colorOffset = 20; } } else if (srcStride == COMPRESSED_STRIDE) { colorOffset = 6; if (layer == 1) { colorOffset = 7; } else if (layer == 2) { colorOffset = 8; } } int divisor = 1<<layer; float a = 1.0f/float(divisor); float b = 0.5f*(a-1.0f); dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( interpolateResidualKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,vbuffer->cudaStream, iData,vData,T, calibDataDev, a, b, colorOffset, imgData, width, height, vDataCur, grayPyramid.getImageRef(0).width, grayPyramid.getImageRef(0).height, residual, zWeightsDev, srcStride,dstStride); checkCudaError("interpolateResidual error"); } extern "C" void interpolateResidual2(VertexBuffer2 *vbuffer, float *T, float *calibDataDev, ImagePyramid2 &grayPyramid, int layer, float *zCurrentDev, float *zWeightsDev, float *residual, hipStream_t stream) { if (vbuffer == NULL || T == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || calibDataDev == NULL || residual == NULL || zCurrentDev == NULL || zWeightsDev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("interpolateResidual: vbuffer has wrong number of selected pixels!\n"); return; } if (layer > grayPyramid.nLayers-1) { printf("interpolateResidual: invalid layer number!\n"); return; } float *imgData = (float*)grayPyramid.getImageRef(layer).devPtr; if (imgData == NULL) { printf("given image does not have data allocated!\n"); return; } int srcStride = vbuffer->getStride(); int width = grayPyramid.getImageRef(layer).width; int height = grayPyramid.getImageRef(layer).height; float *vData = (float*)vbuffer->devPtr; int *iData = (int*)vbuffer->indexDevPtr; int colorOffset = 0; if (srcStride == VERTEXBUFFER_STRIDE) { colorOffset = 14; if (layer == 1) { colorOffset = 17; } else if (layer == 2) { colorOffset = 20; } else if (layer == 3) { colorOffset = 23; } } else if (srcStride == COMPRESSED_STRIDE) { colorOffset = 6; if (layer == 1) { colorOffset = 7; } else if (layer == 2) { colorOffset = 8; } else { printf("compressed stride does not have 4th layer attributes!\n"); return; } } int divisor = 1<<layer; float a = 1.0f/float(divisor); float b = 0.5f*(a-1.0f); dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); //printf("%d %d\n",vbuffer->getElementsCount(),cudaBlockSize.x); hipLaunchKernelGGL(( interpolateResidualKernel2), dim3(cudaGridSize),dim3(cudaBlockSize),0,stream, iData,vData, T, calibDataDev, a, b, colorOffset, imgData, width, height, zCurrentDev, zWeightsDev, residual, srcStride, grayPyramid.getImageRef(0).width,grayPyramid.getImageRef(0).height); checkCudaError("interpolateResidual2 error"); } /* static int maxDist2Reso = 20*20; static float *expTableDev = NULL; extern "C" void initExpTable(int maxD2) { if (expTableDev == NULL) { maxDist2Reso = maxD2; hipMalloc((void **)&expTableDev, maxDist2Reso*sizeof(float)); float *expTable = new float[resolution]; for (int i = 0; i < resolution; i++) { expTable[i] = exp(-50.0f*float(i)/(resolution*50.0f)); } } } extern "C" void releaseCudaDotProduct() { if (blockSumDev != NULL) { hipFree(blockSumDev); blockSumDev = NULL; hipFree(ADev); ADev = NULL; } }*/ extern "C" void filterDepthIIR(VertexBuffer2 *vbuffer, VertexBuffer2 *vbufferCur, float *T, float *calibDataDev, float *weightsDev, int width, int height, float weightThreshold) { if (vbuffer == NULL || vbufferCur == NULL || T == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || vbufferCur->devPtr == NULL || calibDataDev == NULL || weightsDev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("filterDepthIIR: vbuffer has wrong number of selected pixels!\n"); return; } // float *vData = (float*)vbuffer->devPtr; // float *vDataCur = (float*)vbufferCur->devPtr; // int *iData = (int*)vbuffer->indexDevPtr; dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); // filterDepthIIRKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(iData,vData,T, calibDataDev, width, height, vDataCur, weightsDev, weightThreshold); checkCudaError("filterDepthIIR error"); //printf("executing iir depth filter\n"); fflush(stdin); fflush(stdout); } extern "C" void compressVertexBuffer(VertexBuffer2 *vbufferSrc, VertexBuffer2 *vbufferDst, bool rgbVisualization) { if (vbufferSrc == NULL || vbufferSrc->devPtr == NULL || vbufferSrc->indexDevPtr == NULL) return; if (vbufferDst == NULL || vbufferDst->devPtr == NULL || vbufferDst->indexDevPtr == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbufferSrc->getElementsCount()%1024 != 0) { printf("compressVertexBuffer: vbufferSrc has wrong number of selected pixels!\n"); return; } if (vbufferDst->getMaxVertexCount() < vbufferSrc->getElementsCount()) { printf("vbufferDst : %d, vbufferSrc: %d\n",vbufferDst->getElementsCount(),vbufferSrc->getElementsCount()); printf("compressVertexBuffer: vbufferDst max vertex size != vbufferSrc element size!\n"); fflush(stdin); fflush(stdout); return; } int srcStride = vbufferSrc->getStride(); int dstStride = vbufferDst->getStride(); vbufferDst->setElementsCount(vbufferSrc->getElementsCount()); float *vDataSrc = (float*)vbufferSrc->devPtr; int *iDataSrc = (int*)vbufferSrc->indexDevPtr; float *vDataDst = (float*)vbufferDst->devPtr; int *iDataDst = (int*)vbufferDst->indexDevPtr; dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(vbufferDst->getElementsCount()/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( compressVertexBufferKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,vbufferDst->cudaStream, iDataSrc,vDataSrc,iDataDst,vDataDst,srcStride,dstStride,rgbVisualization); char buf[512]; sprintf(buf,"compressVertexBufferKernel() execution failed, arguments: %d, %d, %d, elems: %d\n",srcStride,dstStride,int(rgbVisualization),vbufferDst->getElementsCount()); getLastCudaError(buf); // checkCudaError("compressVertexBuffer error"); } extern "C" void compressVertexBuffer2(int *indicesExt,float *verticesExt,int pixelSelectionAmount,int srcStride, VertexBuffer2 *vbufferDst) { if (verticesExt == NULL || indicesExt == NULL) return; if (vbufferDst == NULL || vbufferDst->devPtr == NULL || vbufferDst->indexDevPtr == NULL) return; // enforce multiple of 1024 for element count -> max performance if (pixelSelectionAmount % 1024 != 0) { printf("compressVertexBuffer2: wrong number of selected pixels!\n"); return; } int dstStride = vbufferDst->getStride(); if (vbufferDst->getMaxVertexCount() < pixelSelectionAmount) { printf("vbufferDst : %d, vbufferSrc: %d\n",vbufferDst->getElementsCount(),pixelSelectionAmount); printf("compressVertexBuffer2: vbufferDst max vertex size != vbufferSrc element size!\n"); fflush(stdin); fflush(stdout); return; } bool rgbVisualization = false; float *vDataSrc = verticesExt; int *iDataSrc = indicesExt; float *vDataDst = (float*)vbufferDst->devPtr; int *iDataDst = (int*)vbufferDst->indexDevPtr; dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(pixelSelectionAmount/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( compressVertexBufferKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,vbufferDst->cudaStream, iDataSrc,vDataSrc,iDataDst,vDataDst,srcStride,dstStride,rgbVisualization); vbufferDst->setElementsCount(pixelSelectionAmount); checkCudaError("compressVertexBuffer2 error"); } extern "C" void precomputeJacobian4Cuda(VertexBuffer2 *vbuffer, float *calibDataDev, float *jacobian1Dev, float *jacobian2Dev, float *jacobian3Dev, float *jacobian4Dev, float optScaleIn, hipStream_t stream) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || jacobian1Dev == NULL || jacobian2Dev == NULL || jacobian3Dev == NULL|| jacobian4Dev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("precomputeJacobian: vbuffer has wrong number of selected pixels!\n"); return; } int stride = vbuffer->getStride(); float *vData = (float*)vbuffer->devPtr; int *iData = (int*)vbuffer->indexDevPtr; dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( precomputeJacobian4Kernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,stream, iData,vData,calibDataDev,vbuffer->getElementsCount(),jacobian1Dev,jacobian2Dev,jacobian3Dev,jacobian4Dev,stride, optScaleIn); checkCudaError("precomputeJacobian4Cuda error"); } extern "C" void precomputeJacobianCuda(VertexBuffer2 *vbuffer, float *calibDataDev, float *jacobian1Dev, float *jacobian2Dev, float *jacobian3Dev, float optScaleIn) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || jacobian1Dev == NULL || jacobian2Dev == NULL || jacobian3Dev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("precomputeJacobian: vbuffer has wrong number of selected pixels!\n"); return; } int stride = vbuffer->getStride(); float *vData = (float*)vbuffer->devPtr; int *iData = (int*)vbuffer->indexDevPtr; dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( precomputeJacobianKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,vbuffer->cudaStream, iData,vData,calibDataDev,vbuffer->getElementsCount(),jacobian1Dev,jacobian2Dev,jacobian3Dev,stride, optScaleIn); checkCudaError("precomputeJacobianCuda error"); } /* extern "C" void precomputeJacobianUncompressedCuda(VertexBuffer2 *vbuffer, float *calibDataDev, float *jacobianDev1, float *jacobianDev2, float *jacobianDev3) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || jacobian1Dev == NULL || jacobian2Dev == NULL || jacobian3Dev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("precomputeJacobian: vbuffer has wrong number of selected pixels!\n"); return; } int stride = vbuffer->getStride(); float *vData = (float*)vbuffer->devPtr; int *iData = (int*)vbuffer->indexDevPtr; dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); precomputeJacobianUncompressedKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(iData,vData,calibDataDev,vbuffer->getElementsCount(),jacobian1Dev,jacobian2Dev,jacobian3Dev,stride); checkCudaError("precomputeJacobianCuda error"); }*/ extern "C" void weightJacobian(float *jacobianTDev, float *weights, int count, float *weightedJacobianTDev, hipStream_t stream) { if (jacobianTDev == NULL || count < 1024 || weightedJacobianTDev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (count%1024 != 0) { printf("wrong count for weightJacobian\n"); return; } dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(count/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( weightJacobianKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,stream, jacobianTDev, weights, count, weightedJacobianTDev); checkCudaError("weightJacobian error"); } static float *blockSumDev = NULL; static doublereal *ADev = NULL; extern "C" void initCudaDotProduct() { if (blockSumDev == NULL) { hipMalloc((void **)&blockSumDev, 1024*sizeof(float)); hipMemset(blockSumDev,0,1024*sizeof(float)); hipMalloc((void **)&ADev, 16*sizeof(doublereal)); hipMemset(ADev, 0, 16*sizeof(doublereal)); } } extern "C" void releaseCudaDotProduct() { if (blockSumDev != NULL) { hipFree(blockSumDev); blockSumDev = NULL; hipFree(ADev); ADev = NULL; } } extern "C" void dotProductCuda(float *vecA, float *vecB, int count, float *resultA, float *resultB, hipStream_t stream) { if (vecA == NULL || vecB == NULL || resultA == NULL || count < 1024 || blockSumDev == NULL) { printf("invalid input to dotProductCuda!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (count%1024 != 0) { printf("count has wrong number of pixels!\n"); fflush(stdout); return; } int nthreads = 256;//512; int nblocks = count/nthreads; reduceProducts<float>(count, nthreads, nblocks, 6, vecA, vecB, blockSumDev,stream); dim3 cudaBlockSize(1,1,1); dim3 cudaGridSize(1,1,1); if (resultB != NULL) { hipLaunchKernelGGL(( sumElemsKernel2), dim3(cudaGridSize),dim3(cudaBlockSize),0,stream, blockSumDev,nblocks,resultA,resultB); } else { hipLaunchKernelGGL(( sumElemsKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,stream, blockSumDev,nblocks,resultA); } checkCudaError("dotProductCuda error"); } extern "C" void JTresidualCuda(float *JT, float *residual, int count, float *result6, hipStream_t stream) { if (JT == NULL || residual == NULL || count < 1024 || result6 == NULL) { printf("invalid input to JTresidualCuda!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (count%1024 != 0) { printf("count has wrong number of pixels!\n"); return; } dotProductCuda(JT+0*count, residual, count, result6+0,NULL,stream); dotProductCuda(JT+1*count, residual, count, result6+1,NULL,stream); dotProductCuda(JT+2*count, residual, count, result6+2,NULL,stream); dotProductCuda(JT+3*count, residual, count, result6+3,NULL,stream); dotProductCuda(JT+4*count, residual, count, result6+4,NULL,stream); dotProductCuda(JT+5*count, residual, count, result6+5,NULL,stream); } extern "C" void JTJCuda(float *JT,int count, float *JtJDev, hipStream_t stream) { if (JT == NULL || count < 1024 || JtJDev == NULL) { printf("invalid parameters to JTJCuda.\n"); return; } // enforce multiple of 1024 for element count -> max performance if (count%1024 != 0) { printf("count has wrong number of pixels!\n"); fflush(stdout); return; } for (int j = 0; j < 6; j++) { for (int i = j; i < 6; i++) { dotProductCuda(JT+j*count, JT+i*count, count, JtJDev+i+j*6, JtJDev+i*6+j,stream); } } } void dumpp(const char *str, const float *M, int rows, int cols) { printf("%s:\n",str); for (int j = 0; j < rows; j++) { for (int i = 0; i < cols; i++) printf("%e ",M[i+j*cols]); printf("\n"); } } extern "C" void linearFuseCuda(float *JtJDevExt,float *residual6DevExt, float weight1, int N1, float *JtJDev, float *residual6Dev, float weight2, int N2, hipStream_t stream) { if (JtJDevExt == NULL || JtJDev == NULL || residual6DevExt == NULL || residual6Dev == NULL) { printf("linearFuseCuda: invalid parameters.\n"); return; } double invN1 = 1.0/double(N1); double invN2 = 1.0/double(N2); dim3 cudaBlockSize(36,1,1); dim3 cudaGridSize(1,1,1); hipLaunchKernelGGL(( linearFuseKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,stream, JtJDevExt,residual6DevExt, weight1, float(invN1), JtJDev, residual6Dev, weight2, float(invN2)); checkCudaError("linearFuseCuda error"); } extern "C" void solveMotionCuda(float *JtJDev, float *b, float *TDev, float scaleOut, hipStream_t stream) { if (JtJDev == NULL || b == NULL || TDev == NULL || ADev == NULL) { printf("invalid parameters to solveMotionCuda.\n"); return; } /* float delay = 0.0f; float delays[4] = {0,0,0,0}; int N = 1; hipEvent_t start,stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start,0); */ /* float *JJ = new float[36]; hipMemcpyAsync(&JJ[0],JtJDev,sizeof(float)*36,hipMemcpyDeviceToHost,stream); hipStreamSynchronize(stream); dumpMatrix("JtJ",&JJ[0],6,6); delete[] JJ; */ /* for (int i = 0; i < N; i++) {*/ doublereal tol=1e-8; int maxSteps = 6; dim3 cudaBlockSize(1,1,1); dim3 cudaGridSize(1,1,1); hipLaunchKernelGGL(( conjugateGradientKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,stream, JtJDev,b,tol,maxSteps,ADev); // } /* doublereal *A = new doublereal[16]; hipMemcpyAsync(&A[0],ADev,sizeof(doublereal)*16,hipMemcpyDeviceToHost,stream); hipStreamSynchronize(stream); dumpMatrix("A",(double*)&A[0],4,4); delete[] A;*/ /* // TODO: load ADev pmuistiin ja tarkastele onko suuria eroja arovjen skaaloissa trans vs rot params? static float xmin[6] = {1e6,1e6,1e6,1e6,1e6,1e6}; static float xmax[6] = {0,0,0,0,0,0}; A[0] = 0; A[1] = -x[2]; A[2] = x[1]; A[3] = x[3]; A[4] = x[2];A[5] = 0; A[6] =-x[0]; A[7] = x[4]; A[8] =-x[1];A[9] = x[0]; A[10] = 0; A[11] = x[5]; A[12] = 0; A[13] = 0; A[14] = 0; A[15] = 0; float *A = new float[16]; hipMemcpy(A,ADev,sizeof(float)*16,hipMemcpyDeviceToHost); float angle = sqrt(A[6]*A[6]+A[2]*A[2]+A[1]*A[1]); if (angle > xmax[0]) xmax[0] = angle; if (angle > xmax[1]) xmax[1] = angle; if (angle > xmax[2]) xmax[2] = angle; if (fabs(A[3]) > xmax[3]) xmax[3] = fabs(A[3]); if (fabs(A[7]) > xmax[4]) xmax[4] = fabs(A[7]); if (fabs(A[11])> xmax[5]) xmax[5] = fabs(A[11]); dumpp("xmax",xmax,1,6); delete[] A; */ /* hipEventRecord(stop,0); hipDeviceSynchronize(); hipEventElapsedTime(&delay, start, stop); delays[0] += delay; hipEventRecord(start,0); dim3 cudaBlockSize(6,6,1); dim3 cudaGridSize(1,1,1); for (int i = 0; i < N; i++) { choleskyKernel<<<cudaGridSize,cudaBlockSize,0,0>>>(JtJDev,b,ADev); } hipEventRecord(stop,0); hipDeviceSynchronize(); hipEventElapsedTime(&delay, start, stop); delays[1] += delay; for (int i = 0; i < N; i++) {*/ expmCuda(ADev, TDev, scaleOut, stream); /*} hipEventRecord(stop,0); hipDeviceSynchronize(); hipEventElapsedTime(&delay, start, stop); delays[0] += delay; printf("expm: %fms\n",delays[0]/N); */ // printf("cgm: %fms, chol: %fms\n",delays[0]/N,delays[1]/N); //hipEventDestroy(start); //hipEventDestroy(stop); checkCudaError("solveMotion error"); } extern "C" void matrixMult4Cuda(float *A, float *B, float *C) { if (A == NULL || B == NULL || C == NULL) { printf("invalid arguments to matrixMult4Cuda\n"); return; } dim3 cudaBlockSize(1,1,1); int nblocks = 1; dim3 cudaGridSize(nblocks,1,1); hipLaunchKernelGGL(( matrixMult4Kernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,0, A,B,C); //hipDeviceSynchronize(); } extern "C" void matrixMult4NormalizedCuda(float *A, float *B, float *C) { if (A == NULL || B == NULL || C == NULL) { printf("invalid arguments to matrixMult4Cuda\n"); return; } dim3 cudaBlockSize(1,1,1); int nblocks = 1; dim3 cudaGridSize(nblocks,1,1); hipLaunchKernelGGL(( matrixMult4NormalizedKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,0, A,B,C); } extern "C" void invertPoseCuda(float *A, float *iA, int N, hipStream_t stream) { if (A == NULL || iA == NULL || N < 1 || N > 1024) { printf("invalid arguments to invertPoseCuda\n"); return; } dim3 cudaBlockSize(N,1,1); int nblocks = 1; dim3 cudaGridSize(nblocks,1,1); hipLaunchKernelGGL(( invertMatrix4Kernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,stream, A,iA,N); } extern "C" void convertMatrixToPosAxisAngleCuda(float *A, float *posAxisAngle, int N) { if (A == NULL || posAxisAngle == NULL || N < 1) { printf("invalid arguments to convertMatrixToPosAxisAngleCuda\n"); return; } dim3 cudaBlockSize(N,1,1); int nblocks = 1; dim3 cudaGridSize(nblocks,1,1); hipLaunchKernelGGL(( convertToAxisAngleKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,0, A,posAxisAngle,N); } extern "C" void filterPoseCuda(float *posAxisAngle, float *weightsDev, int N, float *T) { if (T == NULL || posAxisAngle == NULL || weightsDev == NULL || N < 1) { printf("invalid arguments to filterPoseCuda\n"); return; } dim3 cudaBlockSize(1,1,1); int nblocks = 1; dim3 cudaGridSize(nblocks,1,1); hipLaunchKernelGGL(( filterPoseKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,0, posAxisAngle,weightsDev,N,T); } extern "C" void collectPointsCuda(VertexBuffer2 *vbufferSrc, float *Tsrc, int collectedPoints256, VertexBuffer2 *vbufferDst, float *Tdst) { if (vbufferSrc == NULL || vbufferSrc->devPtr == NULL || vbufferSrc->indexDevPtr == NULL) { printf("invalid source vbuffer (collectPointsCuda)\n"); return; } if (vbufferDst == NULL || vbufferDst->devPtr == NULL || vbufferDst->indexDevPtr == NULL) { printf("invalid destination vbuffer (collectPointsCuda)\n"); return; } if (Tsrc == NULL || Tdst == NULL || collectedPoints256 < 1) { printf("invalid Tsrc, Tdst or collectedPoints (collectPointsCuda)\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbufferSrc->getElementsCount()%256 != 0 || vbufferSrc->getElementsCount() <= 0) { printf("collectPointsCuda: vbuffer has wrong number of selected points!\n"); return; } int stride = vbufferSrc->getStride(); float *vDataSrc = (float*)vbufferSrc->devPtr; int *iDataSrc = (int*)vbufferSrc->indexDevPtr; float *vDataDst = (float*)vbufferDst->devPtr; // int *iDataDst = (int*)vbufferDst->indexDevPtr; int collectedPoints = collectedPoints256*256; int existingPoints = vbufferDst->getElementsCount(); int skipper = vbufferSrc->getElementsCount()/collectedPoints; if (skipper < 1) skipper = 1; dim3 cudaBlockSize(256,1,1); dim3 cudaGridSize(collectedPoints/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( collectPointsKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,vbufferSrc->cudaStream, iDataSrc,vDataSrc,Tsrc, skipper,&vDataDst[existingPoints*stride],Tdst, vbufferSrc->getStride()); vbufferDst->setElementsCount(existingPoints+collectedPoints); checkCudaError("collectPointsCuda error"); // printf("elem count: %d, collected: %d, skipper: %d\n",vbufferSrc->getElementsCount(),collectedPoints,skipper); //fflush(stdin); //fflush(stdout); } extern "C" void collectPointsCuda2(VertexBuffer2 *vbufferSrc, float *Tsrc, int collectedPoints256, float *vertexImageDev, float *Tdst) { if (vbufferSrc == NULL || vbufferSrc->devPtr == NULL || vbufferSrc->indexDevPtr == NULL) { printf("invalid source vbuffer (collectPointsCuda)\n"); return; } if (vertexImageDev == NULL) { printf("invalid destination vbuffer (collectPointsCuda)\n"); return; } if (Tsrc == NULL || Tdst == NULL || collectedPoints256 < 1) { printf("invalid Tsrc, Tdst or collectedPoints (collectPointsCuda)\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbufferSrc->getElementsCount()%256 != 0 || vbufferSrc->getElementsCount() <= 0) { printf("collectPointsCuda: vbuffer has wrong number of selected points!\n"); return; } float *vDataSrc = (float*)vbufferSrc->devPtr; int *iDataSrc = (int*)vbufferSrc->indexDevPtr; int collectedPoints = collectedPoints256*256; int skipper = vbufferSrc->getElementsCount()/collectedPoints; if (skipper < 1) skipper = 1; dim3 cudaBlockSize(256,1,1); dim3 cudaGridSize(collectedPoints/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( collectPointsKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,vbufferSrc->cudaStream, iDataSrc,vDataSrc,Tsrc, skipper,vertexImageDev,Tdst, vbufferSrc->getStride()); checkCudaError("collectPointsCuda error"); // printf("elem count: %d, collected: %d, skipper: %d\n",vbufferSrc->getElementsCount(),collectedPoints,skipper); //fflush(stdin); //fflush(stdout); } extern "C" void setPointIntensityCuda(VertexBuffer2 *vbuffer, float *Tsrc,float *Tdst,ImagePyramid2 *grayPyramid) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL) { printf("invalid source vbuffer (setPointIntensityCuda)\n"); return; } if (Tsrc == NULL || Tdst == NULL) { printf("invalid Tsrc or Tdst (setPointIntensityCuda)\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0 || vbuffer->getElementsCount() <= 0) { printf("setPointIntensityCuda: vbuffer has wrong number of selected points!\n"); return; } if (grayPyramid == NULL || grayPyramid->getImagePtr(0) == NULL || grayPyramid->getImagePtr(1) == NULL || grayPyramid->getImagePtr(2) == NULL) { printf("setPointIntensityCuda: graypyramid is invalid\n"); return; } float *imgData[3]; assert(grayPyramid->nLayers == 3); for (int i = 0; i < 3; i++) { imgData[i] = (float*)grayPyramid->getImageRef(i).devPtr; if (imgData[i] == NULL) { printf("setPointIntensityCuda error: grayPyramid layer %d not locked! panik exit \n",i); return; } if (grayPyramid->getImageRef(i).renderable) { printf("setPointIntensityCuda error %d: grayPyramid layer is set renderable for no reason!\n",i); } } // float *vDataSrc = (float*)vbuffer->devPtr; // int *iDataSrc = (int*)vbuffer->indexDevPtr; int numPoints = vbuffer->getElementsCount(); dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(numPoints/cudaBlockSize.x,1,1); //collectPointsKernel<<<cudaGridSize,cudaBlockSize,0,vbufferSrc->cudaStream>>>(iDataSrc,vDataSrc,Tsrc, skipper,vertexImageDev,Tdst); checkCudaError("setPointIntensityCuda error"); } extern "C" void collectPointsIntoImageCuda(VertexBuffer2 *vbufferSrc, float *Tsrc, int collectedPoints256, float *vertexImageDev, float *Tdst, int width, int height, float *calibDataDev) { if (vbufferSrc == NULL || vbufferSrc->devPtr == NULL || vbufferSrc->indexDevPtr == NULL) { printf("invalid source vbuffer (collectPointsIntoImageCuda)\n"); return; } if (vertexImageDev == NULL) { printf("invalid destination vbuffer (collectPointsIntoImageCuda)\n"); return; } if (Tsrc == NULL || Tdst == NULL || collectedPoints256 < 1) { printf("invalid Tsrc, Tdst or collectedPoints (collectPointsIntoImageCuda)\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbufferSrc->getElementsCount()%256 != 0 || vbufferSrc->getElementsCount() <= 0) { printf("collectPointsIntoImageCuda: vbuffer has wrong number of selected points!\n"); return; } float *vDataSrc = (float*)vbufferSrc->devPtr; int *iDataSrc = (int*)vbufferSrc->indexDevPtr; int collectedPoints = collectedPoints256*256; int skipper = vbufferSrc->getElementsCount()/collectedPoints; if (skipper < 1) skipper = 1; dim3 cudaBlockSize(256,1,1); dim3 cudaGridSize(collectedPoints/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( collectPointsIntoImageKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,vbufferSrc->cudaStream, iDataSrc,vDataSrc,Tsrc, skipper,vertexImageDev,Tdst,width,height,calibDataDev, vbufferSrc->getStride()); checkCudaError("collectPointsIntoImageCuda error"); } __global__ void vecProductKernel(float *vecA,float *vecB,float *result){ int idx = blockIdx.x*blockDim.x+threadIdx.x; result[idx] = vecA[idx]*vecB[idx]; } extern "C" void vectorProductCuda(float *vecA,float *vecB,int count,float *result, hipStream_t stream) { if (vecA == NULL || vecB == NULL || result == NULL || count < 1024) { printf("invalid input to vectorProductCuda!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (count%1024 != 0) { printf("count has wrong number of pixels! (vectorProductCuda)\n"); return; } dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(count/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( vecProductKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,stream, vecA,vecB,result); checkCudaError("vectorProductCuda error"); } __global__ void listKernel(float *vData, int stride, float *selectedPoints) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // make sure stride has matching number of elements stored here! int idxStride = idx*stride; selectedPoints[idx*2+0] = vData[idxStride+6];//r3.x;//p_1.x; selectedPoints[idx*2+1] = vData[idxStride+7];//r3.y;//p_1.y; } __global__ void listSelectedRefKernel(int *indexPointer, float *vData, int stride, float *selectedPoints, float *selectionColors) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // make sure stride has matching number of elements stored here! int idxStride = indexPointer[idx]*stride; selectedPoints[idx*2+0] = vData[idxStride+6]; selectedPoints[idx*2+1] = vData[idxStride+7]; selectionColors[idx] = vData[idxStride+13]; } __global__ void listSelectedCurKernel(int *indexPointer, float *vData, float *calibDataDev, float *T, int stride, float *selectedPoints, float *selectionColors) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // make sure stride has matching number of elements stored here! int idxStride = indexPointer[idx]*stride; float *kc = &calibDataDev[KcR_OFFSET]; float *KR = &calibDataDev[KR_OFFSET]; float3 p3,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3); float2 p_1; float2 pu; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p_1); selectedPoints[idx*2+0] = p_1.x; selectedPoints[idx*2+1] = p_1.y; selectionColors[idx] = 0.5f;//vData[idxStride+13]; } extern "C" void listSelectedRefCuda(VertexBuffer2 *vbuffer, float *selectionPointsDev, float *selectionColorsDev) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || selectionColorsDev == NULL) { printf("listSelectedRefCuda: null pointer given!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%512 != 0) { printf("listSelectedRefCuda: vbuffer has wrong number of selected pixels! (%d)\n",vbuffer->getElementsCount()); return; } int *indexPointer = (int*)vbuffer->indexDevPtr; float *vertexData = (float*)vbuffer->devPtr; int nElements = vbuffer->getElementsCount(); dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(nElements/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( listSelectedRefKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,vbuffer->cudaStream, indexPointer,vertexData,vbuffer->getStride(),selectionPointsDev,selectionColorsDev); /* float *vertexData = (float*)vbuffer->devPtr; int nElements = vbuffer->getVertexCount(); dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(nElements/cudaBlockSize.x,1,1); listKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(vertexData,vbuffer->getStride(),selectionPointsDev); */ checkCudaError("listSelectedRefCuda error"); } extern "C" void listSelectedCurCuda(VertexBuffer2 *vbuffer, float *calibDataDev, float *TrelDev, float *selectionPointsDev, float *selectionColorsDev, hipStream_t stream) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || selectionPointsDev == NULL || selectionColorsDev == NULL) { printf("listSelectedCurCuda: null pointer given!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%512 != 0) { printf("listSelectedCurCuda: vbuffer has wrong number of selected pixels! (%d)\n",vbuffer->getElementsCount()); return; } int *indexPointer = (int*)vbuffer->indexDevPtr; float *vertexData = (float*)vbuffer->devPtr; int nElements = vbuffer->getElementsCount(); dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(nElements/cudaBlockSize.x,1,1); hipLaunchKernelGGL(( listSelectedCurKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,stream, indexPointer,vertexData,calibDataDev, TrelDev, vbuffer->getStride(),selectionPointsDev,selectionColorsDev); /* float *vertexData = (float*)vbuffer->devPtr; int nElements = vbuffer->getVertexCount(); dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(nElements/cudaBlockSize.x,1,1); listKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(vertexData,vbuffer->getStride(),selectionPointsDev); */ checkCudaError("listSelectedCurCuda error"); } __global__ void xyz2DiffKernel(int *iData, float *vData, int vWidth, int vHeight, float *T, float *calibDataDev, float a, float b, int refColorOffset, float *imgData, int width, int height, int srcStride, float *diffImage) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*srcStride; float *KR = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float *TLR = &calibDataDev[TLR_OFFSET]; float3 p3,r3_ir,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3_ir); // reference IR -> current IR matrixMultVec4(TLR, r3_ir, r3); // current IR -> current RGB float2 pu,p2; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p2); // resolution tweak: float2 p; p.x = a*p2.x + b; p.y = a*p2.y + b; float iResidual = 1.0f; // set max residual value for points outside fov int xdi = (int)p.x; int ydi = (int)p.y; if (xdi >= 0 && ydi >= 0 && xdi < width-1 && ydi < height-1) { float fx = p.x - xdi; float fy = p.y - ydi; float color = 0; bilinearInterpolation(xdi, ydi, fx, fy, width, imgData, color); iResidual = fabs(vData[idxStride+refColorOffset] - color); // residual range [-1,1] } int x = vindex % vWidth; int y = (vindex - x)/vWidth; diffImage[x+y*vWidth] = min(iResidual*25.0f,1); } extern "C" void xyz2DiffCuda(VertexBuffer2 *vbuffer, int vWidth, int vHeight, float *calibDataDev, float *TrelDev, float *diffImage, int width, int height, int layer, ImagePyramid2 *grayPyramidCur, hipStream_t stream) { if (vbuffer == NULL || vbuffer->devPtr == NULL || calibDataDev == NULL || vbuffer->indexDevPtr == NULL || TrelDev == NULL || diffImage == NULL || grayPyramidCur == NULL) { printf("xyz2DiffCuda: null pointer given!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%512 != 0) { printf("xyz2DiffCuda: vbuffer has wrong number of selected pixels! (%d)\n",vbuffer->getElementsCount()); return; } float *imgData = (float*)grayPyramidCur->getImageRef(layer).devPtr; int imgWidth = grayPyramidCur->getImageRef(layer).width; int imgHeight = grayPyramidCur->getImageRef(layer).height; if (imgData == NULL) { printf("xyz2DiffCuda: given image does not have data allocated!\n"); return; } int srcStride = vbuffer->getStride(); int colorOffset = 0; if (srcStride == VERTEXBUFFER_STRIDE) { colorOffset = 14; if (layer == 1) { colorOffset = 17; } else if (layer == 2) { colorOffset = 20; } else if (layer == 3) { colorOffset = 23; } } else if (srcStride == COMPRESSED_STRIDE) { colorOffset = 6; if (layer == 1) { colorOffset = 7; } else if (layer == 2) { colorOffset = 8; } else { printf("compressed stride does not have 4th layer attributes!\n"); return; } } int divisor = 1<<layer; float a = 1.0f/float(divisor); float b = 0.5f*(a-1.0f); int *indexPointer = (int*)vbuffer->indexDevPtr; float *vertexData = (float*)vbuffer->devPtr; int nElements = vbuffer->getElementsCount(); dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(nElements/cudaBlockSize.x,1,1); hipMemsetAsync(diffImage,0,sizeof(float)*vWidth*vHeight,stream); hipLaunchKernelGGL(( xyz2DiffKernel), dim3(cudaGridSize),dim3(cudaBlockSize),0,stream, indexPointer,vertexData,vWidth,vHeight,TrelDev, calibDataDev, a, b, colorOffset, imgData, imgWidth, imgHeight, srcStride, diffImage); checkCudaError("xyz2DiffCuda error"); }
a919051edfcc01942d361d4333380be617b9df0d.cu
/* Copyright 2016 Tommi M. Tykkälä Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <stdio.h> #include <image2/Image2.h> #include <image2/ImagePyramid2.h> //#include <types.h> #include <hostUtils.h> #include <calib/calib.h> #include <rendering/VertexBuffer2.h> #include <math.h> #include <cwchar> #include <math.h> #include <assert.h> #include <helper_cuda.h> #include <tracker/basic_math.h> //#include <cpp_type_traits.h> using namespace std; namespace warputils { #include "expmCuda.h" #include "f2cCuda.h" #include "kernelUtils.h" #include "reduction_kernel.cu" #include "expmCuda.cu" #define SIZE 6 typedef doublereal CHOLMAT[SIZE][SIZE]; typedef doublereal CHOLVEC[SIZE]; #include "cholesky.cu" } using namespace warputils; texture<float, 2, cudaReadModeElementType> texC; __global__ void collectPointsIntoImageKernel(int *iDataSrc, float *vDataSrc, float *Tsrc, int skipper, float *vDataDst, float *Tdst, int width, int height,float *calibDataDev, int stride) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int idxStride = iDataSrc[idx]*stride; float3 p3,r3; p3.x = vDataSrc[idxStride+0]; p3.y = vDataSrc[idxStride+1]; p3.z = vDataSrc[idxStride+2]; float TsrcInv[16],T[16]; invertRT4Cuda(Tsrc,TsrcInv); matrixMult4(Tdst,TsrcInv,T); matrixMultVec4(T, p3, r3); float2 p2,pp; p2.x = r3.x / r3.z; p2.y = r3.y / r3.z; float *K = &calibDataDev[KR_OFFSET]; pp.x = K[0]*p2.x+K[2]; pp.y = K[4]*p2.y+K[5]; unsigned int xi = (unsigned int)(pp.x); unsigned int yi = (unsigned int)(pp.y); if ((xi < width) && (yi < height)) { int offset = (xi + yi * width)*stride; vDataDst[offset+0] = r3.x; vDataDst[offset+1] = r3.y; vDataDst[offset+2] = r3.z; for (int i = 3; i < stride; i++) { vDataDst[offset+i] = vDataSrc[idxStride+i]; } } } __global__ void collectPointsKernel(int *iDataSrc, float *vDataSrc, float *Tsrc, int skipper, float *vDataDst, float *Tdst, int stride) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int srcIndex = idx * skipper; int idxStrideSrc = iDataSrc[srcIndex]*stride; int idxStrideDst = idx*stride; float3 p3,r3; p3.x = vDataSrc[idxStrideSrc+0]; p3.y = vDataSrc[idxStrideSrc+1]; p3.z = vDataSrc[idxStrideSrc+2]; float TsrcInv[16],T[16]; invertRT4Cuda(Tsrc,TsrcInv); matrixMult4(Tdst,TsrcInv,T); matrixMultVec4(T, p3, r3); vDataDst[idxStrideDst+0] = r3.x; vDataDst[idxStrideDst+1] = r3.y; vDataDst[idxStrideDst+2] = r3.z; vDataDst[idxStrideDst+3] = vDataSrc[idxStrideSrc+3]; vDataDst[idxStrideDst+4] = vDataSrc[idxStrideSrc+4]; vDataDst[idxStrideDst+5] = vDataSrc[idxStrideSrc+5]; vDataDst[idxStrideDst+6] = 0; vDataDst[idxStrideDst+7] = 0; vDataDst[idxStrideDst+8] = vDataSrc[idxStrideSrc+8]; vDataDst[idxStrideDst+9] = vDataSrc[idxStrideSrc+9]; vDataDst[idxStrideDst+10] = vDataSrc[idxStrideSrc+10]; vDataDst[idxStrideDst+11] = vDataSrc[idxStrideSrc+11]; vDataDst[idxStrideDst+12] = vDataSrc[idxStrideSrc+12]; vDataDst[idxStrideDst+13] = vDataSrc[idxStrideSrc+13]; vDataDst[idxStrideDst+14] = vDataSrc[idxStrideSrc+14];; vDataDst[idxStrideDst+15] = vDataSrc[idxStrideSrc+15]; vDataDst[idxStrideDst+16] = vDataSrc[idxStrideSrc+16]; vDataDst[idxStrideDst+17] = vDataSrc[idxStrideSrc+17]; vDataDst[idxStrideDst+18] = vDataSrc[idxStrideSrc+18]; vDataDst[idxStrideDst+19] = vDataSrc[idxStrideSrc+19]; vDataDst[idxStrideDst+20] = vDataSrc[idxStrideSrc+20]; } // no need to check screen bounds here (only opengl vertices) __global__ void warpPointsKernel(int *iData, float *vData, float *weightsDev, float *T, float *calibDataDev, float *scratchPtr, float *imgData1, float *imgData2, float *imgData3, int width, int srcStride, int targetStride, int rgbOffset) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*srcStride; // indices to src and target vertices in the baseBuffer int dstIdx1 = (10000+idx)*targetStride; int dstIdx2 = (10000+320*240+idx)*targetStride; float3 p3,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3); // project r3 into screenspace for obtaining pixel coordinates float *KR = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float2 pu,p1_1,p2_1;//,p2_2,p2_3; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p2_1); // generate reference point also pu.x = p3.x/p3.z; pu.y = p3.y/p3.z; distortPoint(pu,kc,KR,p1_1); //// FETCH current depth, intensity1, intensity2, intensity3 for KEYFRAME update! /* // compute low-resolution coordinates float a = 0.5f; float b = -0.25f; p2_2.x = a*p2_1.x + b; p2_2.y = a*p2_1.y + b; p2_3.x = a*p2_2.x + b; p2_3.y = a*p2_2.y + b; float color1 = 0, color2 = 0, color3 = 0; int xdi = 0, ydi = 0; float fracX = 0.0f, fracY = 0.0f; xdi = (int)p2_1.x; ydi = (int)p2_1.y; fracX = p2_1.x - xdi; fracY = p2_1.y - ydi; bilinearInterpolation(xdi, ydi, fracX, fracY, width, imgData1, color1); // bilinearInterpolation(xdi, ydi, fracX, fracY, width, depthData1, depth1); // TODO // determine inv(T) at this point! (compute in every thread?) transpose + vector multiplication is piece of cake // reconstruct 3D point + intensity in all 3 layers // map it back to reference using inv(T) // IIR 3D point + intensity // effect -> grid is lost, but consistency maintained? xdi = (int)p2_2.x; ydi = (int)p2_2.y; fracX = p2_2.x - xdi; fracY = p2_2.y - ydi; bilinearInterpolation(xdi, ydi, fracX, fracY, width/2, imgData2, color2); xdi = (int)p2_3.x; ydi = (int)p2_3.y; fracX = p2_3.x - xdi; fracY = p2_3.y - ydi; bilinearInterpolation(xdi, ydi, fracX, fracY, width/4, imgData3, color3); */ float w = weightsDev[idx]; scratchPtr[dstIdx1+0] = p1_1.x;//vData[idxStride+6]; scratchPtr[dstIdx1+1] = p1_1.y;//vData[idxStride+7]; scratchPtr[dstIdx1+2] = 0.0f; scratchPtr[dstIdx1+rgbOffset+0] = 1.0f - w; scratchPtr[dstIdx1+rgbOffset+1] = w; scratchPtr[dstIdx1+rgbOffset+2] = 0.0f; // float maxDist = calibDataDev[MAXD_OFFSET]; if (w > 0) { scratchPtr[dstIdx2+0] = p2_1.x; scratchPtr[dstIdx2+1] = p2_1.y; } else { scratchPtr[dstIdx2+0] = -1000.0f; scratchPtr[dstIdx2+1] = -1000.0f; } scratchPtr[dstIdx2+2] = 0.0f; scratchPtr[dstIdx2+rgbOffset+0] = 1.0f - w; scratchPtr[dstIdx2+rgbOffset+1] = w; scratchPtr[dstIdx2+rgbOffset+2] = 0.0f; } __global__ void warpBaseKernel(float *vData, float *T, int emptyVertexSlot, int stride, int rgbOffset) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int idxStrideSrc = idx*stride; int idxStrideDst = (idx+6)*stride; float3 p3,r3; p3.x = vData[idxStrideSrc+0]; p3.y = vData[idxStrideSrc+1]; p3.z = vData[idxStrideSrc+2]; float Tinv[16]; invertRT4Cuda(T, Tinv); matrixMultVec4(Tinv, p3, r3); vData[idxStrideDst+0] = r3.x; // target point x vData[idxStrideDst+1] = r3.y; // target point y vData[idxStrideDst+2] = r3.z; // target point z // add new line segment as extra job :) // avoids yet another lock/unlock with additional vbuffer if (idx == 0) { int previousSlot = emptyVertexSlot - 1; if (previousSlot < 0) previousSlot = 0; float px = vData[previousSlot*stride+0]; float py = vData[previousSlot*stride+1]; float pz = vData[previousSlot*stride+2]; vData[emptyVertexSlot*stride+0] = px; vData[emptyVertexSlot*stride+1] = py; vData[emptyVertexSlot*stride+2] = pz; vData[emptyVertexSlot*stride+rgbOffset+0] = 1; vData[emptyVertexSlot*stride+rgbOffset+1] = 0; vData[emptyVertexSlot*stride+rgbOffset+2] = 0; vData[(emptyVertexSlot+1)*stride+0] = r3.x; vData[(emptyVertexSlot+1)*stride+1] = r3.y; vData[(emptyVertexSlot+1)*stride+2] = r3.z; vData[(emptyVertexSlot+1)*stride+rgbOffset+0] = 1; vData[(emptyVertexSlot+1)*stride+rgbOffset+1] = 0; vData[(emptyVertexSlot+1)*stride+rgbOffset+2] = 0; } } __global__ void interpolateResidualKernel2(int *iData, float *vData, float *T, float *calibDataDev, float a, float b, int refColorOffset, float *imgData, int width, int height, float *zCurrentDev, float *zWeightsDev, float *residual, int srcStride, int zwidth, int zheight) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*srcStride; float *KR = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float *TLR = &calibDataDev[TLR_OFFSET]; float3 p3,r3_ir,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3_ir); // reference IR -> current IR matrixMultVec4(TLR, r3_ir, r3); // current IR -> current RGB float2 pu,p2; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p2); // resolution tweak: float2 p; p.x = a*p2.x + b; p.y = a*p2.y + b; float iResidual = 1.0f; // set max residual value for points outside fov int xdi = (int)p.x; int ydi = (int)p.y; float zWeight = 0.0f; if (xdi >= 0 && ydi >= 0 && xdi < width-1 && ydi < height-1) { float fx = p.x - xdi; float fy = p.y - ydi; float color = 0; bilinearInterpolation(xdi, ydi, fx, fy, width, imgData, color); iResidual = vData[idxStride+refColorOffset] - color; // residual range [-1,1] // fetch depth coordinate from vertex buffer (offset runs over IR image) float *KL = &calibDataDev[KL_OFFSET]; float *TRL = &calibDataDev[TRL_OFFSET]; float3 rl3,pl2; matrixMultVec4(TRL, r3, rl3); // current RGB -> current IR rl3.x /= rl3.z; rl3.y /= rl3.z; rl3.z = 1; // normalize matrixMultVec3(KL, rl3, pl2); // project to image space int xdi2 = (int)(pl2.x+0.5f); // nearest point sample in IR view int ydi2 = (int)(pl2.y+0.5f); if (xdi2 >= 0 && ydi2 >= 0 && xdi2 < zwidth && ydi2 < zheight) { int offset = xdi2 + ydi2*zwidth; float zcur = zCurrentDev[offset]; float zerr = zcur-r3.z; zerr *= zerr; if (zerr < 100*100) { zWeight = 1.0f-zerr/(100.0f*100.0f); } } } residual[idx] = iResidual; zWeightsDev[idx] = zWeight; } __global__ void interpolateResidualKernel(int *iData, float *vData, float *T, float *calibDataDev, float a, float b, int refColorOffset, float *imgData, int width, int height, float *vDataCur, int zwidth, int zheight, float *residual, float *zWeights, int srcStride, int dstStride) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*srcStride; float *KR = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float3 p3,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3); // reference RGB -> current RGB float2 pu,p2; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p2); // resolution tweak: float2 p; p.x = a*p2.x + b; p.y = a*p2.y + b; float zWeight = 0.0f; float iResidual = 1.0f; // set max residual value for points outside fov int xdi = (int)p.x; int ydi = (int)p.y; if (xdi >= 0 && ydi >= 0 && xdi < width-1 && ydi < height-1) { float fx = p.x - xdi; float fy = p.y - ydi; float color = 0; bilinearInterpolation(xdi, ydi, fx, fy, width, imgData, color); iResidual = vData[idxStride+refColorOffset] - color; // residual range [-1,1] // fetch depth coordinate from vertex buffer (offset runs over IR image) float *KL = &calibDataDev[KL_OFFSET]; float *TRL = &calibDataDev[TRL_OFFSET]; float3 rl3,pl2; matrixMultVec4(TRL, r3, rl3); // current RGB -> current IR rl3.x /= rl3.z; rl3.y /= rl3.z; rl3.z = 1; // normalize matrixMultVec3(KL, rl3, pl2); // project to image space int xdi2 = (int)(pl2.x+0.5f); // nearest point sample in IR view int ydi2 = (int)(pl2.y+0.5f); if (xdi2 >= 0 && ydi2 >= 0 && xdi2 < zwidth && ydi2 < zheight) { int offset = xdi2 + ydi2*zwidth; float zcur = vDataCur[offset*dstStride+2]; float zerr = zcur-r3.z; zerr *= zerr; if (zerr < 300*300) { zWeight = 1.0f-zerr/(300.0f*300.0f); } } } residual[idx] = iResidual; zWeights[idx] = zWeight; } __global__ void filterDepthIIRKernel(int *iData, float *vData, float *T, float *calibDataDev, int width, int height, float *vDataCur, float *weightsDev, float weightThreshold, int stride) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*stride; // do not average depth values if M-estimator weight is too small if (weightsDev[idx] < weightThreshold) return; float *KR = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float3 p3,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3); // reference RGB -> current RGB float2 pu,p; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p); int xdi = (int)p.x; int ydi = (int)p.y; if (xdi >= 0 && ydi >= 0 && xdi < width-1 && ydi < height-1) { // fetch depth coordinate from vertex buffer (offset runs over IR image) float *KL = &calibDataDev[KL_OFFSET]; float *TRL = &calibDataDev[TRL_OFFSET]; float3 rl3,pl2; matrixMultVec4(TRL, r3, rl3); // current RGB -> current IR rl3.x /= rl3.z; rl3.y /= rl3.z; rl3.z = 1; // normalize matrixMultVec3(KL, rl3, pl2); // project to image space int xdi2 = (int)(pl2.x+0.5f); // nearest point sample in IR view int ydi2 = (int)(pl2.y+0.5f); if (xdi2 >= 0 && ydi2 >= 0 && xdi2 < width && ydi2 < height) { int offset = xdi2 + ydi2*width; float3 pc,pr,ray; // pc is in RGB frame of the current view pc.x = vDataCur[offset*stride+0]; pc.y = vDataCur[offset*stride+1]; pc.z = vDataCur[offset*stride+2]; // generate mapping from current to reference float iT[16]; invertRT4Cuda(&T[0],&iT[0]); // map current point to reference matrixMultVec4(iT, pc, pr); // current RGB -> reference RGB // generate a ray from current origin towards p3 float len = sqrtf(p3.x*p3.x+p3.y*p3.y+p3.z*p3.z); ray.x = p3.x / len; ray.y = p3.y / len; ray.z = p3.z / len; // project pr to ray float rayProj = pr.x*ray.x+pr.y*ray.y+pr.z*ray.z; ray.x *= rayProj; ray.y *= rayProj; ray.z *= rayProj; // compute orthogonal displacement from ray /* float3 dp; dp.x = pr.x - ray.x; dp.y = pr.y - ray.y; dp.z = pr.z - ray.z;*/ // determine squared length //float rayDist2 = dp.x*dp.x + dp.y*dp.y + dp.z*dp.z; // compute weight based on ray distance float distWeight = 0.9f;//__expf(-xyDist/50.0f+1e-5f); vData[idxStride+2] = p3.z*0.9f*(1.0f-distWeight) + 0.1f*pr.z*distWeight; } } } __global__ void compressVertexBufferKernel(int *iDataSrc, float *vDataSrc, int *iDataDst, float *vDataDst, int srcStride, int dstStride, bool rgbVisualization) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int idxStrideSrc = iDataSrc[idx]*srcStride; // identity mapping iDataDst[idx] = idx; int idxStrideDst = idx*dstStride; if (srcStride == dstStride) { for (int i = 0; i < srcStride; i++) { vDataDst[idxStrideDst+i] = vDataSrc[idxStrideSrc+i]; } } else if (srcStride == VERTEXBUFFER_STRIDE && dstStride == COMPRESSED_STRIDE){ vDataDst[idxStrideDst+0] = vDataSrc[idxStrideSrc+0]; vDataDst[idxStrideDst+1] = vDataSrc[idxStrideSrc+1]; vDataDst[idxStrideDst+2] = vDataSrc[idxStrideSrc+2]; vDataDst[idxStrideDst+3] = vDataSrc[idxStrideSrc+3]; vDataDst[idxStrideDst+4] = vDataSrc[idxStrideSrc+4]; vDataDst[idxStrideDst+5] = vDataSrc[idxStrideSrc+5]; if (!rgbVisualization) { vDataDst[idxStrideDst+6] = vDataSrc[idxStrideSrc+14]; vDataDst[idxStrideDst+7] = vDataSrc[idxStrideSrc+17]; vDataDst[idxStrideDst+8] = vDataSrc[idxStrideSrc+20]; } else { vDataDst[idxStrideDst+6] = vDataSrc[idxStrideSrc+8]; vDataDst[idxStrideDst+7] = vDataSrc[idxStrideSrc+9]; vDataDst[idxStrideDst+8] = vDataSrc[idxStrideSrc+10]; } } } __global__ void precomputeJacobian4Kernel(int *iData, float *vData, float *calibDataDev, int vectorLength, float *jacobian1, float *jacobian2, float *jacobian3, float *jacobian4, int stride, float optScaleIn) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*stride; float *K = &calibDataDev[KR_OFFSET]; float *T = &calibDataDev[TLR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float3 p3,r3; float3 dp2,dd2,dpn2; dd2.z = 0; dpn2.z = 0; float3 dp3,dr3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T,p3,r3); // input points can be scaled without altering perspective projection // because it is useful to have uniform magnitudes during optimization, unit scaling is supported r3.x *= optScaleIn; r3.y *= optScaleIn; r3.z *= optScaleIn; float gradX1 = vData[idxStride+11]; float gradY1 = vData[idxStride+12]; float gradX2 = vData[idxStride+15]; float gradY2 = vData[idxStride+16]; float gradX3 = vData[idxStride+18]; float gradY3 = vData[idxStride+19]; float gradX4 = vData[idxStride+21]; float gradY4 = vData[idxStride+22]; // A[0] = 0; A[1] = -x[2]; A[2] = x[1]; A[3] = x[3]; // A[4] = x[2];A[5] = 0; A[6] =-x[0]; A[7] = x[4]; // A[8] =-x[1];A[9] = x[0]; A[10] = 0; A[11] = x[5]; // A[12] = 0; A[13] = 0; A[14] = 0; A[15] = 0; float dN[6]; dN[0] = 1.0f/r3.z; dN[1] = 0; dN[2] = -r3.x/(r3.z*r3.z); dN[3] = 0; dN[4] = 1.0f/r3.z; dN[5] = -r3.y/(r3.z*r3.z); float x = r3.x/r3.z; float y = r3.y/r3.z; float x2 = x*x; float y2 = y*y; float x4 = x2*x2; float y4 = y2*y2; float r2 = x2+y2; float r4 = r2*r2; float dD[4]; dD[0] = 1 + kc[0]*(3*x2+y2) + kc[1]*(5*x4+6*x2*y2+y4) + kc[4]*r4*(7*x2+y2); dD[1] = kc[0]*2*x*y + kc[1]*4*x*y*r2 + kc[4]*6*x*y*r4; dD[2] = kc[0]*2*y*x + kc[1]*4*x*y*r2 + kc[4]*6*x*y*r4; dD[3] = 1 + kc[0]*(3*x2+y2) + kc[1]*(5*x4+6*x2*y2+y4) + kc[4]*r4*(7*y2+x2); // param1 dp3.x = 0.0f; dp3.y =-r3.z; dp3.z = r3.y; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*0 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*0 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*0 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*0 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; // param2 dp3.x = r3.z; dp3.y = 0.0f; dp3.z =-r3.x; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*1 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*1 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*1 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*1 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; // param3 dp3.x =-r3.y; dp3.y = r3.x; dp3.z = 0.0f; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*2 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*2 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*2 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*2 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; // param4 dp3.x = 1.0f; dp3.y = 0.0f; dp3.z = 0.0f; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*3 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*3 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*3 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*3 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; // param5 dp3.x = 0.0f; dp3.y = 1.0f; dp3.z = 0.0f; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*4 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*4 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*4 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*4 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; // param6 dp3.x = 0.0f; dp3.y = 0.0f; dp3.z = 1.0f; matrixRot4(T,dp3,dr3); // basetransform only influences rotation (after torque, w=0) dpn2.x = dN[0]*dr3.x + dN[1]*dr3.y + dN[2]*dr3.z; dpn2.y = dN[3]*dr3.x + dN[4]*dr3.y + dN[5]*dr3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*5 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*5 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*5 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; jacobian4[vectorLength*5 + idx] = (dp2.x/8.0f)*gradX4 + (dp2.y/8.0f)*gradY4; } __global__ void precomputeJacobianKernel(int *iData, float *vData, float *calibDataDev, int vectorLength, float *jacobian1, float *jacobian2, float *jacobian3, int stride, float optScaleIn) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*stride; float *K = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float3 p3; float3 dp3,dp2,dd2,dpn2; dd2.z = 0; dpn2.z = 0; // input points can be scaled without altering perspective projection // because it is useful to have uniform magnitudes during optimization, unit scaling is supported p3.x = vData[idxStride+0]*optScaleIn; p3.y = vData[idxStride+1]*optScaleIn; p3.z = vData[idxStride+2]*optScaleIn; float gradX1 = vData[idxStride+11]; float gradY1 = vData[idxStride+12]; float gradX2 = vData[idxStride+15]; float gradY2 = vData[idxStride+16]; float gradX3 = vData[idxStride+18]; float gradY3 = vData[idxStride+19]; // A[0] = 0; A[1] = -x[2]; A[2] = x[1]; A[3] = x[3]; // A[4] = x[2];A[5] = 0; A[6] =-x[0]; A[7] = x[4]; // A[8] =-x[1];A[9] = x[0]; A[10] = 0; A[11] = x[5]; // A[12] = 0; A[13] = 0; A[14] = 0; A[15] = 0; float dN[6]; dN[0] = 1.0f/p3.z; dN[1] = 0; dN[2] = -p3.x/(p3.z*p3.z); dN[3] = 0; dN[4] = 1.0f/p3.z; dN[5] = -p3.y/(p3.z*p3.z); float x = p3.x/p3.z; float y = p3.y/p3.z; float x2 = x*x; float y2 = y*y; float x4 = x2*x2; float y4 = y2*y2; float r2 = x2+y2; float r4 = r2*r2; float dD[4]; dD[0] = 1 + kc[0]*(3*x2+y2) + kc[1]*(5*x4+6*x2*y2+y4) + kc[4]*r4*(7*x2+y2); dD[1] = kc[0]*2*x*y + kc[1]*4*x*y*r2 + kc[4]*6*x*y*r4; dD[2] = kc[0]*2*y*x + kc[1]*4*x*y*r2 + kc[4]*6*x*y*r4; dD[3] = 1 + kc[0]*(3*x2+y2) + kc[1]*(5*x4+6*x2*y2+y4) + kc[4]*r4*(7*y2+x2); // param1 dp3.x = 0.0f; dp3.y =-p3.z; dp3.z = p3.y; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*0 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*0 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*0 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; // param2 dp3.x = p3.z; dp3.y = 0.0f; dp3.z =-p3.x; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*1 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*1 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*1 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; // param3 dp3.x =-p3.y; dp3.y = p3.x; dp3.z = 0.0f; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*2 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*2 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*2 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; // param4 dp3.x = 1.0f; dp3.y = 0.0f; dp3.z = 0.0f; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*3 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*3 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*3 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; // param5 dp3.x = 0.0f; dp3.y = 1.0f; dp3.z = 0.0f; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*4 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*4 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*4 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; // param6 dp3.x = 0.0f; dp3.y = 0.0f; dp3.z = 1.0f; dpn2.x = dN[0]*dp3.x + dN[1]*dp3.y + dN[2]*dp3.z; dpn2.y = dN[3]*dp3.x + dN[4]*dp3.y + dN[5]*dp3.z; dd2.x = dD[0]*dpn2.x+dD[1]*dpn2.y; dd2.y = dD[2]*dpn2.x+dD[3]*dpn2.y; matrixMultVec3(K, dd2, dp2); jacobian1[vectorLength*5 + idx] = dp2.x*gradX1 + dp2.y*gradY1; jacobian2[vectorLength*5 + idx] = (dp2.x/2.0f)*gradX2 + (dp2.y/2.0f)*gradY2; jacobian3[vectorLength*5 + idx] = (dp2.x/4.0f)*gradX3 + (dp2.y/4.0f)*gradY3; } __global__ void weightJacobianKernel(float *jacobian, float *weights, int vectorLength, float *weightedJacobian) { int idx = blockIdx.x*blockDim.x+threadIdx.x; float w = weights[idx]; weightedJacobian[vectorLength*0 + idx] = jacobian[vectorLength*0 + idx] * w; weightedJacobian[vectorLength*1 + idx] = jacobian[vectorLength*1 + idx] * w; weightedJacobian[vectorLength*2 + idx] = jacobian[vectorLength*2 + idx] * w; weightedJacobian[vectorLength*3 + idx] = jacobian[vectorLength*3 + idx] * w; weightedJacobian[vectorLength*4 + idx] = jacobian[vectorLength*4 + idx] * w; weightedJacobian[vectorLength*5 + idx] = jacobian[vectorLength*5 + idx] * w; } __global__ void elementwiseMultKernel(float *vecA, float *vecB, float *result) { int idx = blockIdx.x*blockDim.x+threadIdx.x; result[idx] = vecA[idx]*vecB[idx]; } __global__ void sumElemsKernel2(float *blockScratch, int nblocks, float *resultA, float *resultB) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx == 0) { float sumElems = 0.0f; for (int i = 0; i < nblocks; i++) { sumElems += blockScratch[i]; } resultA[0] = sumElems; if (resultB != NULL) resultB[0] = sumElems; } } __global__ void sumElemsKernel(float *blockScratch, int nblocks, float *resultA) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx == 0) { float sumElems = 0.0f; for (int i = 0; i < nblocks; i++) { sumElems += blockScratch[i]; } resultA[0] = sumElems; } } __global__ void matrixMult4Kernel(float *A, float *B, float *C) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx == 0) { matrixMult4(A,B,C); } } __global__ void matrixMult4NormalizedKernel(float *A, float *B, float *C) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx == 0) { matrixMult4(A,B,C); normalizeMat4(C); } } __global__ void invertMatrix4Kernel(float *A, float *iA, int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < N) { invertRT4Cuda(A+idx*16,iA+idx*16); } } __global__ void convertToAxisAngleKernel(float *A, float *posAxisAngle, int N) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < N) { float m[16]; invertRT4Cuda(A+idx*16,m); float q[4]; rot2QuaternionCuda(m,q); float axisAngle[4]; quaternion2AxisAngleCuda(q,axisAngle); posAxisAngle[idx*7+0] = m[3]; posAxisAngle[idx*7+1] = m[7]; posAxisAngle[idx*7+2] = m[11]; posAxisAngle[idx*7+3] = axisAngle[0]; posAxisAngle[idx*7+4] = axisAngle[1]; posAxisAngle[idx*7+5] = axisAngle[2]; posAxisAngle[idx*7+6] = axisAngle[3]; // also save pose matrices for debugging for (int i = 0; i < 16; i++) A[idx*16+i] = m[i]; } } __global__ void filterPoseKernel(float *posAxisAngle, float *weightsDev, int N, float *invT) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx != 0) return; float avg[7],weightSum = 1e-7f; for (int j = 0; j < 7; j++) { avg[j] = 0; } for (int i = 0; i < N; i++) { for (int j = 0; j < 7; j++) { avg[j] += posAxisAngle[i*7+j]*weightsDev[i]; } weightSum += weightsDev[i]; } for (int j = 0; j < 7; j++) avg[j] = avg[j]/weightSum; // normalize rotation axis // float len = sqrtf(avg[3]*avg[3]+avg[4]*avg[4]+avg[5]*avg[5]+1e-7f); // avg[3] /= len; // avg[4] /= len; // avg[5] /= len; float T[16]; axisAngle2RotCuda(&avg[3],T); T[3] = avg[0]; T[7] = avg[1]; T[11] = avg[2]; invertRT4Cuda(T,invT); /* float T[16]; axisAngle2RotCuda(&posAxisAngle[3],T); T[3] = posAxisAngle[0]; T[7] = posAxisAngle[1]; T[11] = posAxisAngle[2]; invertRT4Cuda(T,invT); */ /* float q[4],T[16]; quaternion2RotCuda(&posAxisAngle[3],T); T[3] = posAxisAngle[0]; T[7] = posAxisAngle[1]; T[11] = posAxisAngle[2]; invertRT4Cuda(T,invT);*/ } __device__ doublereal dotProduct6(doublereal *a, doublereal *b) { doublereal dot = 0; for (int i = 0; i < 6; i++) dot += a[i]*b[i]; return dot; } __device__ void matrixMultVec6(doublereal *A, doublereal *x, doublereal *r) { for (int i = 0; i < 6; i++) r[i] = (doublereal)0.0; for (int j = 0; j < 6; j++) { for (int k = 0; k < 6; k++) { r[j] += A[j*6+k]*x[k]; } } } __device__ void generateA(doublereal *x, doublereal *A) { A[0] = 0; A[1] = -x[2]; A[2] = x[1]; A[3] = x[3]; A[4] = x[2];A[5] = 0; A[6] =-x[0]; A[7] = x[4]; A[8] =-x[1];A[9] = x[0]; A[10] = 0; A[11] = x[5]; A[12] = 0; A[13] = 0; A[14] = 0; A[15] = 0; } // TODO: normalize N out from quantities! __global__ void linearFuseKernel(float *JtJDevExt, float *residual6DevExt, float weight1, float iN1, float *JtJDev, float *residual6Dev, float weight2, float iN2) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx < 6) { residual6DevExt[idx] = iN1*weight1*residual6DevExt[idx]+iN2*weight2*residual6Dev[idx]; } JtJDevExt[idx] = iN1*weight1*JtJDevExt[idx]+iN2*weight2*JtJDev[idx]; } __global__ void conjugateGradientKernel(float *JtJDev, float *bb, doublereal tol, int maxSteps, doublereal *ADev) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (idx != 0) return; doublereal A[6*6]; doublereal x[6]; doublereal r[6]; doublereal b[6]; doublereal dir[6]; // copy matrix to local memory for speeding up access for (int i = 0; i < 36; i++) A[i] = (doublereal)JtJDev[i]; //for (int i = 0; i < 6; i++) A[i+i*6] += (doublereal)1e-8; for (int i = 0; i < 6; i++) { b[i] = (doublereal)bb[i]; x[i] = 0.0; r[i] = b[i]; dir[i] = b[i]; } int nSteps = 0; while (nSteps < maxSteps) { doublereal Adir[6]; matrixMultVec6(A,dir,Adir); //step length doublereal rr = dotProduct6(r,r); doublereal Adirdir = dotProduct6(Adir,dir); // compute abs(Adirdir), its numerically more stable than |Adirdir|: doublereal div = Adirdir; if (div < 0) div = -div; doublereal stepLength = 0.0; // prevent division by zero: if (div > tol) stepLength = rr/Adirdir; // update error: for (int i = 0; i < 6; i++) { r[i] -= stepLength*Adir[i]; } doublereal rr2 = dotProduct6(r,r); /* // early exit with previous x, (minimization step failed!) if (rr2 > rr) { generateA(x,ADev); return; } */ // update params: for (int i = 0; i < 6; i++) { x[i] += stepLength*dir[i];} // early exit, residual is below a threshold: if (sqrt(rr2) < tol) { generateA(x,ADev); return; } doublereal beta = rr2/rr; for (int i = 0; i < 6; i++) { dir[i] = r[i] + beta*dir[i]; } nSteps++; } generateA(x,ADev); } // only one block __global__ void choleskyKernel(float *JtJDev, float *bb, doublereal *ADev) { unsigned int idxI = threadIdx.x; unsigned int idxJ = threadIdx.y; __shared__ doublereal iA[SIZE][SIZE]; __shared__ doublereal B[SIZE]; // __shared__ float x[6]; bool firstThread = (idxI == 0 && idxJ == 0); // load data into local memory iA[idxJ][idxI] = (doublereal)JtJDev[idxJ*6+idxI]; if (idxJ == 0) B[idxI] = (doublereal)bb[idxI]; __syncthreads(); // single thread only: if (firstThread) { CHOLVEC P; // cholesky decomposition choldc1(6, iA,P); choldcsl2(6,iA,P); choleskyInverse(6,iA); } __syncthreads(); __shared__ doublereal x[6]; if (idxJ == 0) { x[idxI] = iA[idxI][0] * B[0] + iA[idxI][1] * B[1] + iA[idxI][2] * B[2] + iA[idxI][3] * B[3] + iA[idxI][4] * B[4] + iA[idxI][5] * B[5]; } __syncthreads(); // fill A(x) elements if (firstThread) generateA(x,ADev); } /* __global__ void dotProductKernel(float *vecA, float *vecB, int nblocks, float *blockScratch, float *result) { int idx = blockIdx.x*blockDim.x+threadIdx.x; if (threadIdx.x > 1023) return; if (idx >= nblocks*1024) return; __shared__ float sharedMem[1024]; sharedMem[threadIdx.x] = vecA[idx]*vecB[idx]; for(uint stride = 512; stride > 0; stride >>= 1) { __syncthreads(); if(threadIdx.x < stride) sharedMem[threadIdx.x] += sharedMem[threadIdx.x + stride]; } if (threadIdx.x == 0) blockScratch[blockIdx.x] = sharedMem[0]; __syncthreads(); // BUG here: blockSums might not be updated yet, cuda doesn't support block synchronization, only threads if(idx == 0) { float dotSum = 0.0f; for (int i = 0; i < nblocks; i++) dotSum += blockScratch[i]; result[0] = dotSum; } } */ extern "C" void warpBase(VertexBuffer2 *vbuffer,float *T) { if (vbuffer == NULL || T == NULL || vbuffer->devPtr == NULL) return; if (vbuffer->getVertexCount() >= 10014) { /*printf("linebuffer ran out!\n");*/ return; } float *vData = (float*)vbuffer->devPtr; int targetStride = vbuffer->getStride(); int rgbOffset = 0; if (targetStride == VERTEXBUFFER_STRIDE) { rgbOffset = 8; } else if (targetStride == BASEBUFFER_STRIDE) { rgbOffset = 3; } int freeVertex = vbuffer->getVertexCount(); dim3 cudaBlockSize(6,1,1); dim3 cudaGridSize(1,1,1); warpBaseKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(vData, T, freeVertex,targetStride,rgbOffset); // printf("new vertex amount : %d\n",vbuffer->getVertexCount()+2); vbuffer->setVertexAmount(vbuffer->getVertexCount()+2); checkCudaError("warpBase error"); } extern "C" void warpPoints(VertexBuffer2 *vbuffer, float *weightsDev, float *T, float *calibDataDev, VertexBuffer2 *baseBuf, ImagePyramid2 *grayPyramid) { if (vbuffer == NULL || T == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || calibDataDev == NULL || weightsDev == NULL || baseBuf == NULL || baseBuf->devPtr == NULL || grayPyramid == NULL) return; float *imgData[3]; assert(grayPyramid->nLayers == 3); for (int i = 0; i < 3; i++) { imgData[i] = (float*)grayPyramid->getImageRef(i).devPtr; if (imgData[i] == NULL) { printf("warpPoints error: grayPyramid layer %d not locked! panik exit \n",i); return; } } int targetStride = baseBuf->getStride(); int rgbOffset = 0; if (targetStride == VERTEXBUFFER_STRIDE) { rgbOffset = 8; } else if (targetStride == BASEBUFFER_STRIDE) { rgbOffset = 3; } // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("warp points: vbuffer has wrong number of selected pixels!\n"); return; } float *vData = (float*)vbuffer->devPtr; int *iData = (int*)vbuffer->indexDevPtr; float *dstData = (float*)baseBuf->devPtr; dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); warpPointsKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(iData,vData,weightsDev,T,calibDataDev,dstData, imgData[0],imgData[1],imgData[2],grayPyramid->getImageRef(0).width,vbuffer->getStride(),targetStride,rgbOffset); checkCudaError("warpPoints error"); } extern "C" void interpolateResidual(VertexBuffer2 *vbuffer, VertexBuffer2 *vbufferCur, float *T, float *calibDataDev, ImagePyramid2 &grayPyramid, int layer, float *residual, float *zWeightsDev) { if (vbuffer == NULL || vbufferCur == NULL || T == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || vbufferCur->devPtr == NULL || calibDataDev == NULL || residual == NULL || zWeightsDev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("interpolateResidual: vbuffer has wrong number of selected pixels!\n"); return; } float *imgData = (float*)grayPyramid.getImageRef(layer).devPtr; cudaArray *cArray = (cudaArray*)grayPyramid.getImageRef(layer).cArray; if (imgData == NULL/* || cArray == NULL*/) { printf("given image does not have data allocated!\n"); return; } /* // set texture parameters texC.addressMode[0] = cudaAddressModeClamp; texC.addressMode[1] = cudaAddressModeClamp; texC.filterMode = cudaFilterModeLinear; texC.normalized = false; // bind the array to the texture cudaBindTextureToArray(texC, cArray); */ int srcStride = vbuffer->getStride(); int dstStride = vbufferCur->getStride(); int width = grayPyramid.getImageRef(layer).width; int height = grayPyramid.getImageRef(layer).height; float *vData = (float*)vbuffer->devPtr; float *vDataCur = (float*)vbufferCur->devPtr; int *iData = (int*)vbuffer->indexDevPtr; int colorOffset = 0; if (srcStride == VERTEXBUFFER_STRIDE) { colorOffset = 14; if (layer == 1) { colorOffset = 17; } else if (layer == 2) { colorOffset = 20; } } else if (srcStride == COMPRESSED_STRIDE) { colorOffset = 6; if (layer == 1) { colorOffset = 7; } else if (layer == 2) { colorOffset = 8; } } int divisor = 1<<layer; float a = 1.0f/float(divisor); float b = 0.5f*(a-1.0f); dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); interpolateResidualKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(iData,vData,T, calibDataDev, a, b, colorOffset, imgData, width, height, vDataCur, grayPyramid.getImageRef(0).width, grayPyramid.getImageRef(0).height, residual, zWeightsDev, srcStride,dstStride); checkCudaError("interpolateResidual error"); } extern "C" void interpolateResidual2(VertexBuffer2 *vbuffer, float *T, float *calibDataDev, ImagePyramid2 &grayPyramid, int layer, float *zCurrentDev, float *zWeightsDev, float *residual, cudaStream_t stream) { if (vbuffer == NULL || T == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || calibDataDev == NULL || residual == NULL || zCurrentDev == NULL || zWeightsDev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("interpolateResidual: vbuffer has wrong number of selected pixels!\n"); return; } if (layer > grayPyramid.nLayers-1) { printf("interpolateResidual: invalid layer number!\n"); return; } float *imgData = (float*)grayPyramid.getImageRef(layer).devPtr; if (imgData == NULL) { printf("given image does not have data allocated!\n"); return; } int srcStride = vbuffer->getStride(); int width = grayPyramid.getImageRef(layer).width; int height = grayPyramid.getImageRef(layer).height; float *vData = (float*)vbuffer->devPtr; int *iData = (int*)vbuffer->indexDevPtr; int colorOffset = 0; if (srcStride == VERTEXBUFFER_STRIDE) { colorOffset = 14; if (layer == 1) { colorOffset = 17; } else if (layer == 2) { colorOffset = 20; } else if (layer == 3) { colorOffset = 23; } } else if (srcStride == COMPRESSED_STRIDE) { colorOffset = 6; if (layer == 1) { colorOffset = 7; } else if (layer == 2) { colorOffset = 8; } else { printf("compressed stride does not have 4th layer attributes!\n"); return; } } int divisor = 1<<layer; float a = 1.0f/float(divisor); float b = 0.5f*(a-1.0f); dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); //printf("%d %d\n",vbuffer->getElementsCount(),cudaBlockSize.x); interpolateResidualKernel2<<<cudaGridSize,cudaBlockSize,0,stream>>>(iData,vData, T, calibDataDev, a, b, colorOffset, imgData, width, height, zCurrentDev, zWeightsDev, residual, srcStride, grayPyramid.getImageRef(0).width,grayPyramid.getImageRef(0).height); checkCudaError("interpolateResidual2 error"); } /* static int maxDist2Reso = 20*20; static float *expTableDev = NULL; extern "C" void initExpTable(int maxD2) { if (expTableDev == NULL) { maxDist2Reso = maxD2; cudaMalloc((void **)&expTableDev, maxDist2Reso*sizeof(float)); float *expTable = new float[resolution]; for (int i = 0; i < resolution; i++) { expTable[i] = exp(-50.0f*float(i)/(resolution*50.0f)); } } } extern "C" void releaseCudaDotProduct() { if (blockSumDev != NULL) { cudaFree(blockSumDev); blockSumDev = NULL; cudaFree(ADev); ADev = NULL; } }*/ extern "C" void filterDepthIIR(VertexBuffer2 *vbuffer, VertexBuffer2 *vbufferCur, float *T, float *calibDataDev, float *weightsDev, int width, int height, float weightThreshold) { if (vbuffer == NULL || vbufferCur == NULL || T == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || vbufferCur->devPtr == NULL || calibDataDev == NULL || weightsDev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("filterDepthIIR: vbuffer has wrong number of selected pixels!\n"); return; } // float *vData = (float*)vbuffer->devPtr; // float *vDataCur = (float*)vbufferCur->devPtr; // int *iData = (int*)vbuffer->indexDevPtr; dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); // filterDepthIIRKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(iData,vData,T, calibDataDev, width, height, vDataCur, weightsDev, weightThreshold); checkCudaError("filterDepthIIR error"); //printf("executing iir depth filter\n"); fflush(stdin); fflush(stdout); } extern "C" void compressVertexBuffer(VertexBuffer2 *vbufferSrc, VertexBuffer2 *vbufferDst, bool rgbVisualization) { if (vbufferSrc == NULL || vbufferSrc->devPtr == NULL || vbufferSrc->indexDevPtr == NULL) return; if (vbufferDst == NULL || vbufferDst->devPtr == NULL || vbufferDst->indexDevPtr == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbufferSrc->getElementsCount()%1024 != 0) { printf("compressVertexBuffer: vbufferSrc has wrong number of selected pixels!\n"); return; } if (vbufferDst->getMaxVertexCount() < vbufferSrc->getElementsCount()) { printf("vbufferDst : %d, vbufferSrc: %d\n",vbufferDst->getElementsCount(),vbufferSrc->getElementsCount()); printf("compressVertexBuffer: vbufferDst max vertex size != vbufferSrc element size!\n"); fflush(stdin); fflush(stdout); return; } int srcStride = vbufferSrc->getStride(); int dstStride = vbufferDst->getStride(); vbufferDst->setElementsCount(vbufferSrc->getElementsCount()); float *vDataSrc = (float*)vbufferSrc->devPtr; int *iDataSrc = (int*)vbufferSrc->indexDevPtr; float *vDataDst = (float*)vbufferDst->devPtr; int *iDataDst = (int*)vbufferDst->indexDevPtr; dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(vbufferDst->getElementsCount()/cudaBlockSize.x,1,1); compressVertexBufferKernel<<<cudaGridSize,cudaBlockSize,0,vbufferDst->cudaStream>>>(iDataSrc,vDataSrc,iDataDst,vDataDst,srcStride,dstStride,rgbVisualization); char buf[512]; sprintf(buf,"compressVertexBufferKernel() execution failed, arguments: %d, %d, %d, elems: %d\n",srcStride,dstStride,int(rgbVisualization),vbufferDst->getElementsCount()); getLastCudaError(buf); // checkCudaError("compressVertexBuffer error"); } extern "C" void compressVertexBuffer2(int *indicesExt,float *verticesExt,int pixelSelectionAmount,int srcStride, VertexBuffer2 *vbufferDst) { if (verticesExt == NULL || indicesExt == NULL) return; if (vbufferDst == NULL || vbufferDst->devPtr == NULL || vbufferDst->indexDevPtr == NULL) return; // enforce multiple of 1024 for element count -> max performance if (pixelSelectionAmount % 1024 != 0) { printf("compressVertexBuffer2: wrong number of selected pixels!\n"); return; } int dstStride = vbufferDst->getStride(); if (vbufferDst->getMaxVertexCount() < pixelSelectionAmount) { printf("vbufferDst : %d, vbufferSrc: %d\n",vbufferDst->getElementsCount(),pixelSelectionAmount); printf("compressVertexBuffer2: vbufferDst max vertex size != vbufferSrc element size!\n"); fflush(stdin); fflush(stdout); return; } bool rgbVisualization = false; float *vDataSrc = verticesExt; int *iDataSrc = indicesExt; float *vDataDst = (float*)vbufferDst->devPtr; int *iDataDst = (int*)vbufferDst->indexDevPtr; dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(pixelSelectionAmount/cudaBlockSize.x,1,1); compressVertexBufferKernel<<<cudaGridSize,cudaBlockSize,0,vbufferDst->cudaStream>>>(iDataSrc,vDataSrc,iDataDst,vDataDst,srcStride,dstStride,rgbVisualization); vbufferDst->setElementsCount(pixelSelectionAmount); checkCudaError("compressVertexBuffer2 error"); } extern "C" void precomputeJacobian4Cuda(VertexBuffer2 *vbuffer, float *calibDataDev, float *jacobian1Dev, float *jacobian2Dev, float *jacobian3Dev, float *jacobian4Dev, float optScaleIn, cudaStream_t stream) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || jacobian1Dev == NULL || jacobian2Dev == NULL || jacobian3Dev == NULL|| jacobian4Dev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("precomputeJacobian: vbuffer has wrong number of selected pixels!\n"); return; } int stride = vbuffer->getStride(); float *vData = (float*)vbuffer->devPtr; int *iData = (int*)vbuffer->indexDevPtr; dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); precomputeJacobian4Kernel<<<cudaGridSize,cudaBlockSize,0,stream>>>(iData,vData,calibDataDev,vbuffer->getElementsCount(),jacobian1Dev,jacobian2Dev,jacobian3Dev,jacobian4Dev,stride, optScaleIn); checkCudaError("precomputeJacobian4Cuda error"); } extern "C" void precomputeJacobianCuda(VertexBuffer2 *vbuffer, float *calibDataDev, float *jacobian1Dev, float *jacobian2Dev, float *jacobian3Dev, float optScaleIn) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || jacobian1Dev == NULL || jacobian2Dev == NULL || jacobian3Dev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("precomputeJacobian: vbuffer has wrong number of selected pixels!\n"); return; } int stride = vbuffer->getStride(); float *vData = (float*)vbuffer->devPtr; int *iData = (int*)vbuffer->indexDevPtr; dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); precomputeJacobianKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(iData,vData,calibDataDev,vbuffer->getElementsCount(),jacobian1Dev,jacobian2Dev,jacobian3Dev,stride, optScaleIn); checkCudaError("precomputeJacobianCuda error"); } /* extern "C" void precomputeJacobianUncompressedCuda(VertexBuffer2 *vbuffer, float *calibDataDev, float *jacobianDev1, float *jacobianDev2, float *jacobianDev3) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || jacobian1Dev == NULL || jacobian2Dev == NULL || jacobian3Dev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0) { printf("precomputeJacobian: vbuffer has wrong number of selected pixels!\n"); return; } int stride = vbuffer->getStride(); float *vData = (float*)vbuffer->devPtr; int *iData = (int*)vbuffer->indexDevPtr; dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(vbuffer->getElementsCount()/cudaBlockSize.x,1,1); precomputeJacobianUncompressedKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(iData,vData,calibDataDev,vbuffer->getElementsCount(),jacobian1Dev,jacobian2Dev,jacobian3Dev,stride); checkCudaError("precomputeJacobianCuda error"); }*/ extern "C" void weightJacobian(float *jacobianTDev, float *weights, int count, float *weightedJacobianTDev, cudaStream_t stream) { if (jacobianTDev == NULL || count < 1024 || weightedJacobianTDev == NULL) return; // enforce multiple of 1024 for element count -> max performance if (count%1024 != 0) { printf("wrong count for weightJacobian\n"); return; } dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(count/cudaBlockSize.x,1,1); weightJacobianKernel<<<cudaGridSize,cudaBlockSize,0,stream>>>(jacobianTDev, weights, count, weightedJacobianTDev); checkCudaError("weightJacobian error"); } static float *blockSumDev = NULL; static doublereal *ADev = NULL; extern "C" void initCudaDotProduct() { if (blockSumDev == NULL) { cudaMalloc((void **)&blockSumDev, 1024*sizeof(float)); cudaMemset(blockSumDev,0,1024*sizeof(float)); cudaMalloc((void **)&ADev, 16*sizeof(doublereal)); cudaMemset(ADev, 0, 16*sizeof(doublereal)); } } extern "C" void releaseCudaDotProduct() { if (blockSumDev != NULL) { cudaFree(blockSumDev); blockSumDev = NULL; cudaFree(ADev); ADev = NULL; } } extern "C" void dotProductCuda(float *vecA, float *vecB, int count, float *resultA, float *resultB, cudaStream_t stream) { if (vecA == NULL || vecB == NULL || resultA == NULL || count < 1024 || blockSumDev == NULL) { printf("invalid input to dotProductCuda!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (count%1024 != 0) { printf("count has wrong number of pixels!\n"); fflush(stdout); return; } int nthreads = 256;//512; int nblocks = count/nthreads; reduceProducts<float>(count, nthreads, nblocks, 6, vecA, vecB, blockSumDev,stream); dim3 cudaBlockSize(1,1,1); dim3 cudaGridSize(1,1,1); if (resultB != NULL) { sumElemsKernel2<<<cudaGridSize,cudaBlockSize,0,stream>>>(blockSumDev,nblocks,resultA,resultB); } else { sumElemsKernel<<<cudaGridSize,cudaBlockSize,0,stream>>>(blockSumDev,nblocks,resultA); } checkCudaError("dotProductCuda error"); } extern "C" void JTresidualCuda(float *JT, float *residual, int count, float *result6, cudaStream_t stream) { if (JT == NULL || residual == NULL || count < 1024 || result6 == NULL) { printf("invalid input to JTresidualCuda!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (count%1024 != 0) { printf("count has wrong number of pixels!\n"); return; } dotProductCuda(JT+0*count, residual, count, result6+0,NULL,stream); dotProductCuda(JT+1*count, residual, count, result6+1,NULL,stream); dotProductCuda(JT+2*count, residual, count, result6+2,NULL,stream); dotProductCuda(JT+3*count, residual, count, result6+3,NULL,stream); dotProductCuda(JT+4*count, residual, count, result6+4,NULL,stream); dotProductCuda(JT+5*count, residual, count, result6+5,NULL,stream); } extern "C" void JTJCuda(float *JT,int count, float *JtJDev, cudaStream_t stream) { if (JT == NULL || count < 1024 || JtJDev == NULL) { printf("invalid parameters to JTJCuda.\n"); return; } // enforce multiple of 1024 for element count -> max performance if (count%1024 != 0) { printf("count has wrong number of pixels!\n"); fflush(stdout); return; } for (int j = 0; j < 6; j++) { for (int i = j; i < 6; i++) { dotProductCuda(JT+j*count, JT+i*count, count, JtJDev+i+j*6, JtJDev+i*6+j,stream); } } } void dumpp(const char *str, const float *M, int rows, int cols) { printf("%s:\n",str); for (int j = 0; j < rows; j++) { for (int i = 0; i < cols; i++) printf("%e ",M[i+j*cols]); printf("\n"); } } extern "C" void linearFuseCuda(float *JtJDevExt,float *residual6DevExt, float weight1, int N1, float *JtJDev, float *residual6Dev, float weight2, int N2, cudaStream_t stream) { if (JtJDevExt == NULL || JtJDev == NULL || residual6DevExt == NULL || residual6Dev == NULL) { printf("linearFuseCuda: invalid parameters.\n"); return; } double invN1 = 1.0/double(N1); double invN2 = 1.0/double(N2); dim3 cudaBlockSize(36,1,1); dim3 cudaGridSize(1,1,1); linearFuseKernel<<<cudaGridSize,cudaBlockSize,0,stream>>>(JtJDevExt,residual6DevExt, weight1, float(invN1), JtJDev, residual6Dev, weight2, float(invN2)); checkCudaError("linearFuseCuda error"); } extern "C" void solveMotionCuda(float *JtJDev, float *b, float *TDev, float scaleOut, cudaStream_t stream) { if (JtJDev == NULL || b == NULL || TDev == NULL || ADev == NULL) { printf("invalid parameters to solveMotionCuda.\n"); return; } /* float delay = 0.0f; float delays[4] = {0,0,0,0}; int N = 1; cudaEvent_t start,stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start,0); */ /* float *JJ = new float[36]; cudaMemcpyAsync(&JJ[0],JtJDev,sizeof(float)*36,cudaMemcpyDeviceToHost,stream); cudaStreamSynchronize(stream); dumpMatrix("JtJ",&JJ[0],6,6); delete[] JJ; */ /* for (int i = 0; i < N; i++) {*/ doublereal tol=1e-8; int maxSteps = 6; dim3 cudaBlockSize(1,1,1); dim3 cudaGridSize(1,1,1); conjugateGradientKernel<<<cudaGridSize,cudaBlockSize,0,stream>>>(JtJDev,b,tol,maxSteps,ADev); // } /* doublereal *A = new doublereal[16]; cudaMemcpyAsync(&A[0],ADev,sizeof(doublereal)*16,cudaMemcpyDeviceToHost,stream); cudaStreamSynchronize(stream); dumpMatrix("A",(double*)&A[0],4,4); delete[] A;*/ /* // TODO: load ADev päämuistiin ja tarkastele onko suuria eroja arovjen skaaloissa trans vs rot params? static float xmin[6] = {1e6,1e6,1e6,1e6,1e6,1e6}; static float xmax[6] = {0,0,0,0,0,0}; A[0] = 0; A[1] = -x[2]; A[2] = x[1]; A[3] = x[3]; A[4] = x[2];A[5] = 0; A[6] =-x[0]; A[7] = x[4]; A[8] =-x[1];A[9] = x[0]; A[10] = 0; A[11] = x[5]; A[12] = 0; A[13] = 0; A[14] = 0; A[15] = 0; float *A = new float[16]; cudaMemcpy(A,ADev,sizeof(float)*16,cudaMemcpyDeviceToHost); float angle = sqrt(A[6]*A[6]+A[2]*A[2]+A[1]*A[1]); if (angle > xmax[0]) xmax[0] = angle; if (angle > xmax[1]) xmax[1] = angle; if (angle > xmax[2]) xmax[2] = angle; if (fabs(A[3]) > xmax[3]) xmax[3] = fabs(A[3]); if (fabs(A[7]) > xmax[4]) xmax[4] = fabs(A[7]); if (fabs(A[11])> xmax[5]) xmax[5] = fabs(A[11]); dumpp("xmax",xmax,1,6); delete[] A; */ /* cudaEventRecord(stop,0); cudaThreadSynchronize(); cudaEventElapsedTime(&delay, start, stop); delays[0] += delay; cudaEventRecord(start,0); dim3 cudaBlockSize(6,6,1); dim3 cudaGridSize(1,1,1); for (int i = 0; i < N; i++) { choleskyKernel<<<cudaGridSize,cudaBlockSize,0,0>>>(JtJDev,b,ADev); } cudaEventRecord(stop,0); cudaThreadSynchronize(); cudaEventElapsedTime(&delay, start, stop); delays[1] += delay; for (int i = 0; i < N; i++) {*/ expmCuda(ADev, TDev, scaleOut, stream); /*} cudaEventRecord(stop,0); cudaThreadSynchronize(); cudaEventElapsedTime(&delay, start, stop); delays[0] += delay; printf("expm: %fms\n",delays[0]/N); */ // printf("cgm: %fms, chol: %fms\n",delays[0]/N,delays[1]/N); //cudaEventDestroy(start); //cudaEventDestroy(stop); checkCudaError("solveMotion error"); } extern "C" void matrixMult4Cuda(float *A, float *B, float *C) { if (A == NULL || B == NULL || C == NULL) { printf("invalid arguments to matrixMult4Cuda\n"); return; } dim3 cudaBlockSize(1,1,1); int nblocks = 1; dim3 cudaGridSize(nblocks,1,1); matrixMult4Kernel<<<cudaGridSize,cudaBlockSize,0,0>>>(A,B,C); //cudaThreadSynchronize(); } extern "C" void matrixMult4NormalizedCuda(float *A, float *B, float *C) { if (A == NULL || B == NULL || C == NULL) { printf("invalid arguments to matrixMult4Cuda\n"); return; } dim3 cudaBlockSize(1,1,1); int nblocks = 1; dim3 cudaGridSize(nblocks,1,1); matrixMult4NormalizedKernel<<<cudaGridSize,cudaBlockSize,0,0>>>(A,B,C); } extern "C" void invertPoseCuda(float *A, float *iA, int N, cudaStream_t stream) { if (A == NULL || iA == NULL || N < 1 || N > 1024) { printf("invalid arguments to invertPoseCuda\n"); return; } dim3 cudaBlockSize(N,1,1); int nblocks = 1; dim3 cudaGridSize(nblocks,1,1); invertMatrix4Kernel<<<cudaGridSize,cudaBlockSize,0,stream>>>(A,iA,N); } extern "C" void convertMatrixToPosAxisAngleCuda(float *A, float *posAxisAngle, int N) { if (A == NULL || posAxisAngle == NULL || N < 1) { printf("invalid arguments to convertMatrixToPosAxisAngleCuda\n"); return; } dim3 cudaBlockSize(N,1,1); int nblocks = 1; dim3 cudaGridSize(nblocks,1,1); convertToAxisAngleKernel<<<cudaGridSize,cudaBlockSize,0,0>>>(A,posAxisAngle,N); } extern "C" void filterPoseCuda(float *posAxisAngle, float *weightsDev, int N, float *T) { if (T == NULL || posAxisAngle == NULL || weightsDev == NULL || N < 1) { printf("invalid arguments to filterPoseCuda\n"); return; } dim3 cudaBlockSize(1,1,1); int nblocks = 1; dim3 cudaGridSize(nblocks,1,1); filterPoseKernel<<<cudaGridSize,cudaBlockSize,0,0>>>(posAxisAngle,weightsDev,N,T); } extern "C" void collectPointsCuda(VertexBuffer2 *vbufferSrc, float *Tsrc, int collectedPoints256, VertexBuffer2 *vbufferDst, float *Tdst) { if (vbufferSrc == NULL || vbufferSrc->devPtr == NULL || vbufferSrc->indexDevPtr == NULL) { printf("invalid source vbuffer (collectPointsCuda)\n"); return; } if (vbufferDst == NULL || vbufferDst->devPtr == NULL || vbufferDst->indexDevPtr == NULL) { printf("invalid destination vbuffer (collectPointsCuda)\n"); return; } if (Tsrc == NULL || Tdst == NULL || collectedPoints256 < 1) { printf("invalid Tsrc, Tdst or collectedPoints (collectPointsCuda)\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbufferSrc->getElementsCount()%256 != 0 || vbufferSrc->getElementsCount() <= 0) { printf("collectPointsCuda: vbuffer has wrong number of selected points!\n"); return; } int stride = vbufferSrc->getStride(); float *vDataSrc = (float*)vbufferSrc->devPtr; int *iDataSrc = (int*)vbufferSrc->indexDevPtr; float *vDataDst = (float*)vbufferDst->devPtr; // int *iDataDst = (int*)vbufferDst->indexDevPtr; int collectedPoints = collectedPoints256*256; int existingPoints = vbufferDst->getElementsCount(); int skipper = vbufferSrc->getElementsCount()/collectedPoints; if (skipper < 1) skipper = 1; dim3 cudaBlockSize(256,1,1); dim3 cudaGridSize(collectedPoints/cudaBlockSize.x,1,1); collectPointsKernel<<<cudaGridSize,cudaBlockSize,0,vbufferSrc->cudaStream>>>(iDataSrc,vDataSrc,Tsrc, skipper,&vDataDst[existingPoints*stride],Tdst, vbufferSrc->getStride()); vbufferDst->setElementsCount(existingPoints+collectedPoints); checkCudaError("collectPointsCuda error"); // printf("elem count: %d, collected: %d, skipper: %d\n",vbufferSrc->getElementsCount(),collectedPoints,skipper); //fflush(stdin); //fflush(stdout); } extern "C" void collectPointsCuda2(VertexBuffer2 *vbufferSrc, float *Tsrc, int collectedPoints256, float *vertexImageDev, float *Tdst) { if (vbufferSrc == NULL || vbufferSrc->devPtr == NULL || vbufferSrc->indexDevPtr == NULL) { printf("invalid source vbuffer (collectPointsCuda)\n"); return; } if (vertexImageDev == NULL) { printf("invalid destination vbuffer (collectPointsCuda)\n"); return; } if (Tsrc == NULL || Tdst == NULL || collectedPoints256 < 1) { printf("invalid Tsrc, Tdst or collectedPoints (collectPointsCuda)\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbufferSrc->getElementsCount()%256 != 0 || vbufferSrc->getElementsCount() <= 0) { printf("collectPointsCuda: vbuffer has wrong number of selected points!\n"); return; } float *vDataSrc = (float*)vbufferSrc->devPtr; int *iDataSrc = (int*)vbufferSrc->indexDevPtr; int collectedPoints = collectedPoints256*256; int skipper = vbufferSrc->getElementsCount()/collectedPoints; if (skipper < 1) skipper = 1; dim3 cudaBlockSize(256,1,1); dim3 cudaGridSize(collectedPoints/cudaBlockSize.x,1,1); collectPointsKernel<<<cudaGridSize,cudaBlockSize,0,vbufferSrc->cudaStream>>>(iDataSrc,vDataSrc,Tsrc, skipper,vertexImageDev,Tdst, vbufferSrc->getStride()); checkCudaError("collectPointsCuda error"); // printf("elem count: %d, collected: %d, skipper: %d\n",vbufferSrc->getElementsCount(),collectedPoints,skipper); //fflush(stdin); //fflush(stdout); } extern "C" void setPointIntensityCuda(VertexBuffer2 *vbuffer, float *Tsrc,float *Tdst,ImagePyramid2 *grayPyramid) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL) { printf("invalid source vbuffer (setPointIntensityCuda)\n"); return; } if (Tsrc == NULL || Tdst == NULL) { printf("invalid Tsrc or Tdst (setPointIntensityCuda)\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%1024 != 0 || vbuffer->getElementsCount() <= 0) { printf("setPointIntensityCuda: vbuffer has wrong number of selected points!\n"); return; } if (grayPyramid == NULL || grayPyramid->getImagePtr(0) == NULL || grayPyramid->getImagePtr(1) == NULL || grayPyramid->getImagePtr(2) == NULL) { printf("setPointIntensityCuda: graypyramid is invalid\n"); return; } float *imgData[3]; assert(grayPyramid->nLayers == 3); for (int i = 0; i < 3; i++) { imgData[i] = (float*)grayPyramid->getImageRef(i).devPtr; if (imgData[i] == NULL) { printf("setPointIntensityCuda error: grayPyramid layer %d not locked! panik exit \n",i); return; } if (grayPyramid->getImageRef(i).renderable) { printf("setPointIntensityCuda error %d: grayPyramid layer is set renderable for no reason!\n",i); } } // float *vDataSrc = (float*)vbuffer->devPtr; // int *iDataSrc = (int*)vbuffer->indexDevPtr; int numPoints = vbuffer->getElementsCount(); dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(numPoints/cudaBlockSize.x,1,1); //collectPointsKernel<<<cudaGridSize,cudaBlockSize,0,vbufferSrc->cudaStream>>>(iDataSrc,vDataSrc,Tsrc, skipper,vertexImageDev,Tdst); checkCudaError("setPointIntensityCuda error"); } extern "C" void collectPointsIntoImageCuda(VertexBuffer2 *vbufferSrc, float *Tsrc, int collectedPoints256, float *vertexImageDev, float *Tdst, int width, int height, float *calibDataDev) { if (vbufferSrc == NULL || vbufferSrc->devPtr == NULL || vbufferSrc->indexDevPtr == NULL) { printf("invalid source vbuffer (collectPointsIntoImageCuda)\n"); return; } if (vertexImageDev == NULL) { printf("invalid destination vbuffer (collectPointsIntoImageCuda)\n"); return; } if (Tsrc == NULL || Tdst == NULL || collectedPoints256 < 1) { printf("invalid Tsrc, Tdst or collectedPoints (collectPointsIntoImageCuda)\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbufferSrc->getElementsCount()%256 != 0 || vbufferSrc->getElementsCount() <= 0) { printf("collectPointsIntoImageCuda: vbuffer has wrong number of selected points!\n"); return; } float *vDataSrc = (float*)vbufferSrc->devPtr; int *iDataSrc = (int*)vbufferSrc->indexDevPtr; int collectedPoints = collectedPoints256*256; int skipper = vbufferSrc->getElementsCount()/collectedPoints; if (skipper < 1) skipper = 1; dim3 cudaBlockSize(256,1,1); dim3 cudaGridSize(collectedPoints/cudaBlockSize.x,1,1); collectPointsIntoImageKernel<<<cudaGridSize,cudaBlockSize,0,vbufferSrc->cudaStream>>>(iDataSrc,vDataSrc,Tsrc, skipper,vertexImageDev,Tdst,width,height,calibDataDev, vbufferSrc->getStride()); checkCudaError("collectPointsIntoImageCuda error"); } __global__ void vecProductKernel(float *vecA,float *vecB,float *result){ int idx = blockIdx.x*blockDim.x+threadIdx.x; result[idx] = vecA[idx]*vecB[idx]; } extern "C" void vectorProductCuda(float *vecA,float *vecB,int count,float *result, cudaStream_t stream) { if (vecA == NULL || vecB == NULL || result == NULL || count < 1024) { printf("invalid input to vectorProductCuda!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (count%1024 != 0) { printf("count has wrong number of pixels! (vectorProductCuda)\n"); return; } dim3 cudaBlockSize(1024,1,1); dim3 cudaGridSize(count/cudaBlockSize.x,1,1); vecProductKernel<<<cudaGridSize,cudaBlockSize,0,stream>>>(vecA,vecB,result); checkCudaError("vectorProductCuda error"); } __global__ void listKernel(float *vData, int stride, float *selectedPoints) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // make sure stride has matching number of elements stored here! int idxStride = idx*stride; selectedPoints[idx*2+0] = vData[idxStride+6];//r3.x;//p_1.x; selectedPoints[idx*2+1] = vData[idxStride+7];//r3.y;//p_1.y; } __global__ void listSelectedRefKernel(int *indexPointer, float *vData, int stride, float *selectedPoints, float *selectionColors) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // make sure stride has matching number of elements stored here! int idxStride = indexPointer[idx]*stride; selectedPoints[idx*2+0] = vData[idxStride+6]; selectedPoints[idx*2+1] = vData[idxStride+7]; selectionColors[idx] = vData[idxStride+13]; } __global__ void listSelectedCurKernel(int *indexPointer, float *vData, float *calibDataDev, float *T, int stride, float *selectedPoints, float *selectionColors) { int idx = blockIdx.x*blockDim.x+threadIdx.x; // make sure stride has matching number of elements stored here! int idxStride = indexPointer[idx]*stride; float *kc = &calibDataDev[KcR_OFFSET]; float *KR = &calibDataDev[KR_OFFSET]; float3 p3,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3); float2 p_1; float2 pu; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p_1); selectedPoints[idx*2+0] = p_1.x; selectedPoints[idx*2+1] = p_1.y; selectionColors[idx] = 0.5f;//vData[idxStride+13]; } extern "C" void listSelectedRefCuda(VertexBuffer2 *vbuffer, float *selectionPointsDev, float *selectionColorsDev) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || selectionColorsDev == NULL) { printf("listSelectedRefCuda: null pointer given!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%512 != 0) { printf("listSelectedRefCuda: vbuffer has wrong number of selected pixels! (%d)\n",vbuffer->getElementsCount()); return; } int *indexPointer = (int*)vbuffer->indexDevPtr; float *vertexData = (float*)vbuffer->devPtr; int nElements = vbuffer->getElementsCount(); dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(nElements/cudaBlockSize.x,1,1); listSelectedRefKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(indexPointer,vertexData,vbuffer->getStride(),selectionPointsDev,selectionColorsDev); /* float *vertexData = (float*)vbuffer->devPtr; int nElements = vbuffer->getVertexCount(); dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(nElements/cudaBlockSize.x,1,1); listKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(vertexData,vbuffer->getStride(),selectionPointsDev); */ checkCudaError("listSelectedRefCuda error"); } extern "C" void listSelectedCurCuda(VertexBuffer2 *vbuffer, float *calibDataDev, float *TrelDev, float *selectionPointsDev, float *selectionColorsDev, cudaStream_t stream) { if (vbuffer == NULL || vbuffer->devPtr == NULL || vbuffer->indexDevPtr == NULL || selectionPointsDev == NULL || selectionColorsDev == NULL) { printf("listSelectedCurCuda: null pointer given!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%512 != 0) { printf("listSelectedCurCuda: vbuffer has wrong number of selected pixels! (%d)\n",vbuffer->getElementsCount()); return; } int *indexPointer = (int*)vbuffer->indexDevPtr; float *vertexData = (float*)vbuffer->devPtr; int nElements = vbuffer->getElementsCount(); dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(nElements/cudaBlockSize.x,1,1); listSelectedCurKernel<<<cudaGridSize,cudaBlockSize,0,stream>>>(indexPointer,vertexData,calibDataDev, TrelDev, vbuffer->getStride(),selectionPointsDev,selectionColorsDev); /* float *vertexData = (float*)vbuffer->devPtr; int nElements = vbuffer->getVertexCount(); dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(nElements/cudaBlockSize.x,1,1); listKernel<<<cudaGridSize,cudaBlockSize,0,vbuffer->cudaStream>>>(vertexData,vbuffer->getStride(),selectionPointsDev); */ checkCudaError("listSelectedCurCuda error"); } __global__ void xyz2DiffKernel(int *iData, float *vData, int vWidth, int vHeight, float *T, float *calibDataDev, float a, float b, int refColorOffset, float *imgData, int width, int height, int srcStride, float *diffImage) { int idx = blockIdx.x*blockDim.x+threadIdx.x; int vindex = iData[idx]; int idxStride = vindex*srcStride; float *KR = &calibDataDev[KR_OFFSET]; float *kc = &calibDataDev[KcR_OFFSET]; float *TLR = &calibDataDev[TLR_OFFSET]; float3 p3,r3_ir,r3; p3.x = vData[idxStride+0]; p3.y = vData[idxStride+1]; p3.z = vData[idxStride+2]; matrixMultVec4(T, p3, r3_ir); // reference IR -> current IR matrixMultVec4(TLR, r3_ir, r3); // current IR -> current RGB float2 pu,p2; pu.x = r3.x / r3.z; pu.y = r3.y / r3.z; distortPoint(pu,kc,KR,p2); // resolution tweak: float2 p; p.x = a*p2.x + b; p.y = a*p2.y + b; float iResidual = 1.0f; // set max residual value for points outside fov int xdi = (int)p.x; int ydi = (int)p.y; if (xdi >= 0 && ydi >= 0 && xdi < width-1 && ydi < height-1) { float fx = p.x - xdi; float fy = p.y - ydi; float color = 0; bilinearInterpolation(xdi, ydi, fx, fy, width, imgData, color); iResidual = fabs(vData[idxStride+refColorOffset] - color); // residual range [-1,1] } int x = vindex % vWidth; int y = (vindex - x)/vWidth; diffImage[x+y*vWidth] = min(iResidual*25.0f,1); } extern "C" void xyz2DiffCuda(VertexBuffer2 *vbuffer, int vWidth, int vHeight, float *calibDataDev, float *TrelDev, float *diffImage, int width, int height, int layer, ImagePyramid2 *grayPyramidCur, cudaStream_t stream) { if (vbuffer == NULL || vbuffer->devPtr == NULL || calibDataDev == NULL || vbuffer->indexDevPtr == NULL || TrelDev == NULL || diffImage == NULL || grayPyramidCur == NULL) { printf("xyz2DiffCuda: null pointer given!\n"); return; } // enforce multiple of 1024 for element count -> max performance if (vbuffer->getElementsCount()%512 != 0) { printf("xyz2DiffCuda: vbuffer has wrong number of selected pixels! (%d)\n",vbuffer->getElementsCount()); return; } float *imgData = (float*)grayPyramidCur->getImageRef(layer).devPtr; int imgWidth = grayPyramidCur->getImageRef(layer).width; int imgHeight = grayPyramidCur->getImageRef(layer).height; if (imgData == NULL) { printf("xyz2DiffCuda: given image does not have data allocated!\n"); return; } int srcStride = vbuffer->getStride(); int colorOffset = 0; if (srcStride == VERTEXBUFFER_STRIDE) { colorOffset = 14; if (layer == 1) { colorOffset = 17; } else if (layer == 2) { colorOffset = 20; } else if (layer == 3) { colorOffset = 23; } } else if (srcStride == COMPRESSED_STRIDE) { colorOffset = 6; if (layer == 1) { colorOffset = 7; } else if (layer == 2) { colorOffset = 8; } else { printf("compressed stride does not have 4th layer attributes!\n"); return; } } int divisor = 1<<layer; float a = 1.0f/float(divisor); float b = 0.5f*(a-1.0f); int *indexPointer = (int*)vbuffer->indexDevPtr; float *vertexData = (float*)vbuffer->devPtr; int nElements = vbuffer->getElementsCount(); dim3 cudaBlockSize(512,1,1); dim3 cudaGridSize(nElements/cudaBlockSize.x,1,1); cudaMemsetAsync(diffImage,0,sizeof(float)*vWidth*vHeight,stream); xyz2DiffKernel<<<cudaGridSize,cudaBlockSize,0,stream>>>(indexPointer,vertexData,vWidth,vHeight,TrelDev, calibDataDev, a, b, colorOffset, imgData, imgWidth, imgHeight, srcStride, diffImage); checkCudaError("xyz2DiffCuda error"); }
f60c7e147074e550b32c9f92ac9b77f8c750ca63.hip
// !!! This is a file automatically generated by hipify!!! /** * \file dnn/src/cuda/batched_matrix_mul/naive.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include <hip/hip_runtime.h> #include "src/cuda/matrix_mul/naive.cuh" #include "src/cuda/utils.cuh" namespace { template <typename AType, typename BType, typename CType, typename CompType> __global__ void do_exec( const AType* A, const BType* B, CType* C, size_t Batch, size_t M, size_t N, size_t K, size_t LDA, size_t LDB, size_t LDC, bool transA, bool transB) { for (int bid = blockIdx.x; bid < Batch; bid += gridDim.x) { const AType* A_r = A + (transA ? bid * K * LDA : bid * M * LDA); const BType* B_r = B + (transB ? bid * N * LDB : bid * K * LDB); CType* C_r = C + bid * M * LDC; for (size_t m = 0; m < M; ++m) { size_t n = threadIdx.x; for (; n < N; n += blockDim.x) { CompType res = static_cast<CompType>(0); for (size_t k = 0; k < K; ++k) { AType av = transA ? A_r[k * LDA + m] : A_r[m * LDA + k]; BType bv = transB ? B_r[n * LDB + k] : B_r[k * LDB + n]; res += av * bv; } C_r[m * LDC + n] = res; } } } } } // namespace namespace megdnn { namespace cuda { template <typename AType, typename BType, typename CType, typename CompType> void exec_bgemm_naive( const AType* A, const BType* B, CType* C, size_t Batch, size_t M, size_t N, size_t K, size_t LDA, size_t LDB, size_t LDC, bool transA, bool transB, hipStream_t stream) { hipLaunchKernelGGL(( do_exec<AType, BType, CType, CompType>), dim3(Batch), dim3(128), 0, stream, A, B, C, Batch, M, N, K, LDA, LDB, LDC, transA, transB); } #define INST(in_ct, out_ct, comp_ct) \ template void exec_bgemm_naive< \ typename in_ct, typename in_ct, typename out_ct, typename comp_ct>( \ const in_ct* A, const in_ct* B, out_ct* C, size_t Batch, size_t M, \ size_t N, size_t K, size_t LDA, size_t LDB, size_t LDC, bool transA, \ bool transB, hipStream_t stream); INST(megdnn::dt_float32, megdnn::dt_float32, megdnn::dt_float32) INST(megdnn::dt_float16, megdnn::dt_float16, megdnn::dt_float16) INST(megdnn::dt_float16, megdnn::dt_float16, megdnn::dt_float32) #undef cb #undef INST } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
f60c7e147074e550b32c9f92ac9b77f8c750ca63.cu
/** * \file dnn/src/cuda/batched_matrix_mul/naive.cu * MegEngine is Licensed under the Apache License, Version 2.0 (the "License") * * Copyright (c) 2014-2021 Megvii Inc. All rights reserved. * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. */ #include <cuda.h> #include "src/cuda/matrix_mul/naive.cuh" #include "src/cuda/utils.cuh" namespace { template <typename AType, typename BType, typename CType, typename CompType> __global__ void do_exec( const AType* A, const BType* B, CType* C, size_t Batch, size_t M, size_t N, size_t K, size_t LDA, size_t LDB, size_t LDC, bool transA, bool transB) { for (int bid = blockIdx.x; bid < Batch; bid += gridDim.x) { const AType* A_r = A + (transA ? bid * K * LDA : bid * M * LDA); const BType* B_r = B + (transB ? bid * N * LDB : bid * K * LDB); CType* C_r = C + bid * M * LDC; for (size_t m = 0; m < M; ++m) { size_t n = threadIdx.x; for (; n < N; n += blockDim.x) { CompType res = static_cast<CompType>(0); for (size_t k = 0; k < K; ++k) { AType av = transA ? A_r[k * LDA + m] : A_r[m * LDA + k]; BType bv = transB ? B_r[n * LDB + k] : B_r[k * LDB + n]; res += av * bv; } C_r[m * LDC + n] = res; } } } } } // namespace namespace megdnn { namespace cuda { template <typename AType, typename BType, typename CType, typename CompType> void exec_bgemm_naive( const AType* A, const BType* B, CType* C, size_t Batch, size_t M, size_t N, size_t K, size_t LDA, size_t LDB, size_t LDC, bool transA, bool transB, cudaStream_t stream) { do_exec<AType, BType, CType, CompType><<<Batch, 128, 0, stream>>>( A, B, C, Batch, M, N, K, LDA, LDB, LDC, transA, transB); } #define INST(in_ct, out_ct, comp_ct) \ template void exec_bgemm_naive< \ typename in_ct, typename in_ct, typename out_ct, typename comp_ct>( \ const in_ct* A, const in_ct* B, out_ct* C, size_t Batch, size_t M, \ size_t N, size_t K, size_t LDA, size_t LDB, size_t LDC, bool transA, \ bool transB, cudaStream_t stream); INST(megdnn::dt_float32, megdnn::dt_float32, megdnn::dt_float32) INST(megdnn::dt_float16, megdnn::dt_float16, megdnn::dt_float16) INST(megdnn::dt_float16, megdnn::dt_float16, megdnn::dt_float32) #undef cb #undef INST } // namespace cuda } // namespace megdnn // vim: syntax=cpp.doxygen
2866139b3505994d1a33ab942bb254f5b197e325.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void calculation( char *a, char *b, int *c, int constant, int vector_size ) { int tid = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id if (tid < vector_size){ // Read in inputs char prev_a = a[tid>0?tid-1:(vector_size-1)]; char curr_a = a[tid]; char post_a = a[tid<(vector_size-1)?tid+1:0]; char curr_b = b[tid]; // Do computation int output_c = (prev_a-post_a)*curr_b + curr_a*constant; // Write result c[tid] = output_c; } }
2866139b3505994d1a33ab942bb254f5b197e325.cu
#include "includes.h" __global__ void calculation( char *a, char *b, int *c, int constant, int vector_size ) { int tid = (blockIdx.x*blockDim.x) + threadIdx.x; // this thread handles the data at its thread id if (tid < vector_size){ // Read in inputs char prev_a = a[tid>0?tid-1:(vector_size-1)]; char curr_a = a[tid]; char post_a = a[tid<(vector_size-1)?tid+1:0]; char curr_b = b[tid]; // Do computation int output_c = (prev_a-post_a)*curr_b + curr_a*constant; // Write result c[tid] = output_c; } }
d9916908c267133471ce51fed932f8fb2f173253.hip
// !!! This is a file automatically generated by hipify!!! #include "stdio.h" #include "stdlib.h" #include "math.h" #include "time.h" #include "string.h" #include "hip/hip_runtime.h" #include "hipfft.h" #include "Myfunctions.h" using namespace std; #define BLOCK_WIDTH 16 #define BLOCK_HEIGHT 16 //#define WITH_SHARED_MEMORY 0 #define pi 3.1415926 struct Multistream { hipStream_t stream,stream_back; }; __global__ void cuda_kernel_wavenumber ( int ntx, int nty, int ntz, float dx, float dy, float dz, float *kx, float *kz, float *ky, float *k, hipfftComplex *kvx_x, hipfftComplex *kvx_z, hipfftComplex *kvx_y, hipfftComplex *kvz_x, hipfftComplex *kvz_z, hipfftComplex *kvz_y, hipfftComplex *kvy_x, hipfftComplex *kvy_z, hipfftComplex *kvy_y ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; float dkx,dky,dkz; dkz=1.0/ntz/dz; dky=1.0/nty/dy; dkx=1.0/ntx/dx; float tmpx,tmpy,tmpz; tmpx=2*pi*dkx; tmpy=2*pi*dky; tmpz=2*pi*dkz; if(ix>=0 && ix<ntx && iy>=0 && iy< nty && iz>=0 && iz<ntz/2+1) kz[iz]=2*pi/ntz/dz*iz; if(ix>=0 && ix<ntx && iy>=0 && iy< nty && iz>=ntz/2+1 && iz<ntz) kz[iz]=2*pi/ntz/dz*(ntz-iz); if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx/2+1) kx[ix]=2*pi/ntx/dx*ix; if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=ntx/2+1 && ix<ntx) kx[ix]=2*pi/ntx/dx*(ntx-ix); if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty/2+1) ky[iy]=2*pi/nty/dy*iy; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=nty/2+1 && iy<nty) ky[iy]=2*pi/nty/dy*(nty-iy); if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx) { k[ip]=pow(kx[ix],2)+pow(kz[iz],2)+pow(ky[iy],2); } k[0]=1e-10; if(ix>=0 && ix<ntx && iy>=0 && iy< nty && iz>=0 && iz<ntz/2+1) { kvz_z[ip].x=-tmpz*iz*sin(iz*pi/ntz); kvz_z[ip].y=tmpz*iz*cos(iz*pi/ntz); kvx_z[ip].x=tmpz*iz*sin(iz*pi/ntz); kvx_z[ip].y=tmpz*iz*cos(iz*pi/ntz); kvy_z[ip].x=tmpz*iz*sin(iz*pi/ntz); kvy_z[ip].y=tmpz*iz*cos(iz*pi/ntz); } if(ix>=0 && ix<ntx && iy>=0 && iy< nty && iz>=ntz/2+1 && iz<ntz) { kvz_z[ip].x=-tmpz*(ntz-iz)*sin((ntz-iz)*pi/ntz); kvz_z[ip].y=-tmpz*(ntz-iz)*cos((ntz-iz)*pi/ntz); kvx_z[ip].x=tmpz*(ntz-iz)*sin((ntz-iz)*pi/ntz); kvx_z[ip].y=-tmpz*(ntz-iz)*cos((ntz-iz)*pi/ntz); kvy_z[ip].x=tmpz*(ntz-iz)*sin((ntz-iz)*pi/ntz); kvy_z[ip].y=-tmpz*(ntz-iz)*cos((ntz-iz)*pi/ntz); } if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx/2+1) { kvx_x[iptt].x=-tmpx*ix*sin(ix*pi/ntx); kvx_x[iptt].y=tmpx*ix*cos(ix*pi/ntx); kvz_x[iptt].x=tmpx*ix*sin(ix*pi/ntx); kvz_x[iptt].y=tmpx*ix*cos(ix*pi/ntx); kvy_x[iptt].x=tmpx*ix*sin(ix*pi/ntx); kvy_x[iptt].y=tmpx*ix*cos(ix*pi/ntx); } if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=ntx/2+1 && ix<ntx) { kvx_x[iptt].x=-tmpx*(ntx-ix)*sin((ntx-ix)*pi/ntx); kvx_x[iptt].y=-tmpx*(ntx-ix)*cos((ntx-ix)*pi/ntx); kvz_x[iptt].x=tmpx*(ntx-ix)*sin((ntx-ix)*pi/ntx); kvz_x[iptt].y=-tmpx*(ntx-ix)*cos((ntx-ix)*pi/ntx); kvy_x[iptt].x=tmpx*(ntx-ix)*sin((ntx-ix)*pi/ntx); kvy_x[iptt].y=-tmpx*(ntx-ix)*cos((ntx-ix)*pi/ntx); } if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty/2+1) { kvy_y[ipt].x=-tmpy*iy*sin(iy*pi/nty); kvy_y[ipt].y=tmpy*iy*cos(iy*pi/nty); kvz_y[ipt].x=tmpy*iy*sin(iy*pi/nty); kvz_y[ipt].y=tmpy*iy*cos(iy*pi/nty); kvx_y[ipt].x=tmpy*iy*sin(iy*pi/nty); kvx_y[ipt].y=tmpy*iy*cos(iy*pi/nty); } if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=nty/2+1 && iy<nty) { kvy_y[ipt].x=-tmpy*(nty-iy)*sin((nty-iy)*pi/nty); kvy_y[ipt].y=-tmpy*(nty-iy)*cos((nty-iy)*pi/nty); kvz_y[ipt].x=tmpy*(nty-iy)*sin((nty-iy)*pi/nty); kvz_y[ipt].y=-tmpy*(nty-iy)*cos((nty-iy)*pi/nty); kvx_y[ipt].x=tmpy*(nty-iy)*sin((nty-iy)*pi/nty); kvx_y[ipt].y=-tmpy*(nty-iy)*cos((nty-iy)*pi/nty); } __syncthreads(); } __global__ void cuda_kernel_viscoacoustic_parameters ( int ntx, int nty, int ntz, float dx, float dy, float dz, float dt, float w0, float *velp, float *vels, float *rho, float *k, float *gama_p, float *gama_s, float *Ap1, float *Ap2, float *Ap3, float *tao_p1, float *tao_p2, float *eta_p1, float *eta_p2, float *eta_p3, float *tao_s1, float *tao_s2, float *eta_s1, float *eta_s2, float *eta_s3 ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; float sinc2nd; float vel; if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx) { // sinc2nd =1.0; //pow(sin(velp_max*powf(k[ip],0.5)*dt/2)/(velp_max*powf(k[ip],0.5)*dt/2),2); sinc2nd =1.0; //pow(sin(vel*powf(k[ip],0.5)*dt/2)/(vel*powf(k[ip],0.5)*dt/2),2); ////////////////////////////// Ap1[ip]=sinc2nd*powf(k[ip],-0.5); Ap2[ip]=sinc2nd; Ap3[ip]=sinc2nd*powf(k[ip],0.5); ///////////////////////////// ////////////////////////////////// tao_p1[ip]=rho[ip]*pow(velp[ip]*cos(gama_p[ip]*pi/2.0),1)*gama_p[ip]*pi; tao_s1[ip]=rho[ip]*pow(vels[ip]*cos(gama_s[ip]*pi/2.0),1)*gama_s[ip]*pi; tao_p2[ip]=rho[ip]*pow(velp[ip]*cos(gama_p[ip]*pi/2.0),2)*pow(gama_p[ip],2)*pi/w0; tao_s2[ip]=rho[ip]*pow(vels[ip]*cos(gama_s[ip]*pi/2.0),2)*pow(gama_s[ip],2)*pi/w0; eta_p1[ip]=-rho[ip]*pow(velp[ip]*cos(gama_p[ip]*pi/2.0),1)*gama_p[ip]*w0; eta_s1[ip]=-rho[ip]*pow(vels[ip]*cos(gama_s[ip]*pi/2.0),1)*gama_s[ip]*w0; eta_p2[ip]=rho[ip]*pow(velp[ip]*cos(gama_p[ip]*pi/2.0),2); eta_s2[ip]=rho[ip]*pow(vels[ip]*cos(gama_s[ip]*pi/2.0),2); eta_p3[ip]=rho[ip]*pow(velp[ip]*cos(gama_p[ip]*pi/2.0),3)*gama_p[ip]/w0; eta_s3[ip]=rho[ip]*pow(vels[ip]*cos(gama_s[ip]*pi/2.0),3)*gama_s[ip]/w0; ///////////////////////////////// } __syncthreads(); } __global__ void cuda_kernel_pml_parameters ( int ntx, int nty, int ntz, int pml, float dx, float dy, float dz, float dt, float f0, float velp_max, float *gammax, float *alphax, float *Omegax, float *a_x, float *b_x, float *gammay, float *alphay, float *Omegay, float *a_y, float *b_y, float *gammaz, float *alphaz, float *Omegaz, float *a_z, float *b_z ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int n1=2; int n2=1; int n3=2; float R=1e-2; velp_max=5000; // float gamma_max = 1.0; float alpha_max = pi*f0; float Omegax_max = (1+n1+n2)*velp_max*log(1.0/R)/((n1+n2-1)*pml*dx); float Omegay_max = (1+n1+n2)*velp_max*log(1.0/R)/((n1+n2-1)*pml*dy); float Omegaz_max = (1+n1+n2)*velp_max*log(1.0/R)/((n1+n2-1)*pml*dz); if(ix>=0&&ix<=pml-1) { gammax[ix] = 1.0;// + (gamma_max-1)*powf(1.0*ix/(pml-1),n1); alphax[ix] = alpha_max*powf(1.0*ix/(pml-1),n3); Omegax[ix] = Omegax_max*powf(1.0*(pml-1-ix)/pml,n1+n2); gammax[ntx-1-ix] = gammax[ix]; alphax[ntx-1-ix] = alphax[ix]; Omegax[ntx-1-ix] = Omegax[ix]; } if(ix>=pml&&ix<=ntx-1-pml) { gammax[ix] = 1.0; alphax[ix] = alpha_max; Omegax[ix] = 0.0; } if(iy>=0&&iy<=pml-1) { gammay[iy] = 1.0;// + (gamma_max-1)*powf(1.0*ix/(pml-1),n1); alphay[iy] = alpha_max*powf(1.0*iy/(pml-1),n3); Omegay[iy] = Omegay_max*powf(1.0*(pml-1-iy)/pml,n1+n2); gammay[nty-1-iy] = gammay[iy]; alphay[nty-1-iy] = alphay[iy]; Omegay[nty-1-iy] = Omegay[iy]; } if(iy>=pml&&iy<=nty-1-pml) { gammay[iy] = 1.0; alphay[iy] = alpha_max; Omegay[iy] = 0.0; } if(iz>=0&&iz<=pml-1) { gammaz[iz] = 1.0;// + (gamma_max-1)*gamma_max*powf(1.0*iz/(pml-1),n1); alphaz[iz] = alpha_max*powf(1.0*iz/(pml-1),n3); Omegaz[iz] = Omegaz_max*powf(1.0*(pml-1-iz)/pml,n1+n2); gammaz[ntz-1-iz] = gammaz[iz]; alphaz[ntz-1-iz] = alphaz[iz]; Omegaz[ntz-1-iz] = Omegaz[iz]; } if(iz>=pml&&iz<=ntz-1-pml) { gammaz[iz] = 1.0; alphaz[iz] = alpha_max; Omegaz[iz] = 0.0; } if(ix>=0&&ix<=ntx-1) { a_x[ix] = alphax[ix] + Omegax[ix]/gammax[ix]; b_x[ix] = Omegax[ix]/powf(gammax[ix],2.0); } if(iy>=0&&iy<=nty-1) { a_y[iy] = alphay[iy] + Omegay[iy]/gammay[iy]; b_y[iy] = Omegay[iy]/powf(gammay[iy],2.0); } if(iz>=0&&iz<=ntz-1) { a_z[iz] = alphaz[iz] + Omegaz[iz]/gammaz[iz]; b_z[iz] = Omegaz[iz]/powf(gammaz[iz],2.0); } __syncthreads(); } __global__ void cuda_kernel_initialization ( int ntx, int nty, int ntz, float *vx, float *vy, float *vz, float *pxx, float *pyy, float *pzz, float *pxy, float *pyz, float *pxz, float *phi_vx_xx, float *phi_vz_zx, float *phi_vy_yx, float *phi_vx_xy, float *phi_vz_zy, float *phi_vy_yy, float *phi_vx_xz, float *phi_vz_zz, float *phi_vy_yz, float *phi_vx_z, float *phi_vz_x, float *phi_vx_y, float *phi_vy_x, float *phi_vy_z, float *phi_vz_y, float *phi_pxx_x, float *phi_pxy_y, float *phi_pxz_z, float *phi_pxy_x, float *phi_pyy_y, float *phi_pyz_z, float *phi_pxz_x, float *phi_pyz_y, float *phi_pzz_z, hipfftComplex *dvx, hipfftComplex *dvy, hipfftComplex *dvz, hipfftComplex *partx1, hipfftComplex *partz1, hipfftComplex *party1, hipfftComplex *partx2, hipfftComplex *partz2, hipfftComplex *party2, hipfftComplex *partx3, hipfftComplex *partz3, hipfftComplex *party3, hipfftComplex *partvx_x1, hipfftComplex *partvx_x2, hipfftComplex *partvx_x3, hipfftComplex *partvx_x4, hipfftComplex *partvx_x5, hipfftComplex *partvz_z1, hipfftComplex *partvz_z2, hipfftComplex *partvz_z3, hipfftComplex *partvz_z4, hipfftComplex *partvz_z5, hipfftComplex *partvy_y1, hipfftComplex *partvy_y2, hipfftComplex *partvy_y3, hipfftComplex *partvy_y4, hipfftComplex *partvy_y5, hipfftComplex *partvx_y1, hipfftComplex *partvx_y2, hipfftComplex *partvx_y3, hipfftComplex *partvx_y4, hipfftComplex *partvx_y5, hipfftComplex *partvy_x1, hipfftComplex *partvy_x2, hipfftComplex *partvy_x3, hipfftComplex *partvy_x4, hipfftComplex *partvy_x5, hipfftComplex *partvy_z1, hipfftComplex *partvy_z2, hipfftComplex *partvy_z3, hipfftComplex *partvy_z4, hipfftComplex *partvy_z5, hipfftComplex *partvz_y1, hipfftComplex *partvz_y2, hipfftComplex *partvz_y3, hipfftComplex *partvz_y4, hipfftComplex *partvz_y5, hipfftComplex *partvx_z1, hipfftComplex *partvx_z2, hipfftComplex *partvx_z3, hipfftComplex *partvx_z4, hipfftComplex *partvx_z5, hipfftComplex *partvz_x1, hipfftComplex *partvz_x2, hipfftComplex *partvz_x3, hipfftComplex *partvz_x4, hipfftComplex *partvz_x5 ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx) { vx[ip]=0.0;vy[ip]=0.0;vz[ip]=0.0; pxx[ip]=0.0; pyy[ip]=0.0; pzz[ip]=0.0; pxy[ip]=0.0; pyz[ip]=0.0; pxz[ip]=0.0; phi_vx_xx[ip]=0.0; phi_vz_zx[ip]=0.0; phi_vy_yx[ip]=0.0; phi_vx_xy[ip]=0.0; phi_vz_zy[ip]=0.0; phi_vy_yy[ip]=0.0; phi_vx_xz[ip]=0.0; phi_vz_zz[ip]=0.0; phi_vy_yz[ip]=0.0; phi_vx_z[ip]=0.0; phi_vz_x[ip]=0.0; phi_vx_y[ip]=0.0; phi_vy_x[ip]=0.0; phi_vy_z[ip]=0.0; phi_vz_y[ip]=0.0; phi_pxx_x[ip]=0.0; phi_pxy_y[ip]=0.0; phi_pxz_z[ip]=0.0; phi_pxy_x[ip]=0.0; phi_pyy_y[ip]=0.0; phi_pyz_z[ip]=0.0; phi_pxz_x[ip]=0.0; phi_pyz_y[ip]=0.0; phi_pzz_z[ip]=0.0; partx1[ip].x=0.0; partx1[ip].y=0.0; party1[ip].x=0.0; party1[ip].y=0.0; partz1[ip].x=0.0; partz1[ip].y=0.0; partx2[ip].x=0.0; partx2[ip].y=0.0; party2[ip].x=0.0; party2[ip].y=0.0; partz2[ip].x=0.0; partz2[ip].y=0.0; partx3[ip].x=0.0; partx3[ip].y=0.0; party3[ip].x=0.0; party3[ip].y=0.0; partz3[ip].x=0.0; partz3[ip].y=0.0; partvx_x1[ip].x=0.0; partvx_x1[ip].y=0.0; partvz_z1[ip].x=0.0; partvz_z1[ip].y=0.0; partvy_y1[ip].x=0.0; partvy_y1[ip].y=0.0; partvx_x2[ip].x=0.0; partvx_x2[ip].y=0.0; partvz_z2[ip].x=0.0; partvz_z2[ip].y=0.0; partvy_y2[ip].x=0.0; partvy_y2[ip].y=0.0; partvx_x3[ip].x=0.0; partvx_x3[ip].y=0.0; partvz_z3[ip].x=0.0; partvz_z3[ip].y=0.0; partvy_y3[ip].x=0.0; partvy_y3[ip].y=0.0; partvx_x4[ip].x=0.0; partvx_x4[ip].y=0.0; partvz_z4[ip].x=0.0; partvz_z4[ip].y=0.0; partvy_y4[ip].x=0.0; partvy_y4[ip].y=0.0; partvx_x5[ip].x=0.0; partvx_x5[ip].y=0.0; partvz_z5[ip].x=0.0; partvz_z5[ip].y=0.0; partvy_y5[ip].x=0.0; partvy_y5[ip].y=0.0; partvx_y1[ip].x=0.0; partvx_y1[ip].y=0.0; partvy_x1[ip].x=0.0; partvy_x1[ip].y=0.0; partvx_y2[ip].x=0.0; partvx_y2[ip].y=0.0; partvy_x2[ip].x=0.0; partvy_x2[ip].y=0.0; partvx_y3[ip].x=0.0; partvx_y3[ip].y=0.0; partvy_x3[ip].x=0.0; partvy_x3[ip].y=0.0; partvx_y4[ip].x=0.0; partvx_y4[ip].y=0.0; partvy_x4[ip].x=0.0; partvy_x4[ip].y=0.0; partvx_y5[ip].x=0.0; partvx_y5[ip].y=0.0; partvy_x5[ip].x=0.0; partvy_x5[ip].y=0.0; partvy_z1[ip].x=0.0; partvy_z1[ip].y=0.0; partvz_y1[ip].x=0.0; partvz_y1[ip].y=0.0; partvy_z2[ip].x=0.0; partvy_z2[ip].y=0.0; partvz_y2[ip].x=0.0; partvz_y2[ip].y=0.0; partvy_z3[ip].x=0.0; partvy_z3[ip].y=0.0; partvz_y3[ip].x=0.0; partvz_y3[ip].y=0.0; partvy_z4[ip].x=0.0; partvy_z4[ip].y=0.0; partvz_y4[ip].x=0.0; partvz_y4[ip].y=0.0; partvy_z5[ip].x=0.0; partvy_z5[ip].y=0.0; partvz_y5[ip].x=0.0; partvz_y5[ip].y=0.0; partvz_x1[ip].x=0.0; partvz_x1[ip].y=0.0; partvx_z1[ip].x=0.0; partvx_z1[ip].y=0.0; partvz_x2[ip].x=0.0; partvz_x2[ip].y=0.0; partvx_z2[ip].x=0.0; partvx_z2[ip].y=0.0; partvz_x3[ip].x=0.0; partvz_x3[ip].y=0.0; partvx_z3[ip].x=0.0; partvx_z3[ip].y=0.0; partvz_x4[ip].x=0.0; partvz_x4[ip].y=0.0; partvx_z4[ip].x=0.0; partvx_z4[ip].y=0.0; partvz_x5[ip].x=0.0; partvz_x5[ip].y=0.0; partvx_z5[ip].x=0.0; partvx_z5[ip].y=0.0; dvx[ip].x=0.0; dvx[ip].y=0.0; dvy[ip].x=0.0; dvy[ip].y=0.0; dvz[ip].x=0.0; dvz[ip].y=0.0; } __syncthreads(); } __global__ void cuda_kernel_p_real_to_complex ( int ntx, int nty, int ntz, float *real_pxx, float *real_pyy, float *real_pzz, float *real_pxy, float *real_pxz, float *real_pyz, hipfftComplex *in_pxx, hipfftComplex *in_pyy, hipfftComplex *in_pzz, hipfftComplex *in_pxy, hipfftComplex *in_pxz, hipfftComplex *in_pyz ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx) { in_pxx[ip].x=real_pxx[ip]; in_pxx[ip].y=0.0; in_pyy[ip].x=real_pyy[ip]; in_pyy[ip].y=0.0; in_pzz[ip].x=real_pzz[ip]; in_pzz[ip].y=0.0; in_pxy[ip].x=real_pxy[ip]; in_pxy[ip].y=0.0; in_pyz[ip].x=real_pyz[ip]; in_pyz[ip].y=0.0; in_pxz[ip].x=real_pxz[ip]; in_pxz[ip].y=0.0; } __syncthreads(); } __global__ void cuda_kernel_vxvyvz_real_to_complex ( int ntx, int nty, int ntz, float *real_x, float *real_y, float *real_z, hipfftComplex *inx, hipfftComplex *iny, hipfftComplex *inz ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx) { inx[ip].x=real_x[ip]; inx[ip].y=0.0; iny[ip].x=real_y[ip]; iny[ip].y=0.0; inz[ip].x=real_z[ip]; inz[ip].y=0.0; } __syncthreads(); } __global__ void cuda_kernel_operate_k_pxxpyypzz ( int ntx, int nty, int ntz, float dt, hipfftComplex *outx, hipfftComplex *outy, hipfftComplex *outz, hipfftComplex *dvx, hipfftComplex *dvy, hipfftComplex *dvz, hipfftComplex *inx, hipfftComplex *iny, hipfftComplex *inz, hipfftComplex *k_x, hipfftComplex *k_y, hipfftComplex *k_z, float *k2, int AorB ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; hipfftComplex tmpx, tmpy, tmpz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty) { if(AorB==0) { inx[ip].x=k2[ip]*(k_x[iptt].x*outx[ip].x - k_x[iptt].y*outx[ip].y); inx[ip].y=k2[ip]*(k_x[iptt].x*outx[ip].y + k_x[iptt].y*outx[ip].x); iny[ip].x=k2[ip]*(k_y[ipt].x*outy[ip].x - k_y[ipt].y*outy[ip].y); iny[ip].y=k2[ip]*(k_y[ipt].x*outy[ip].y + k_y[ipt].y*outy[ip].x); inz[ip].x=k2[ip]*(k_z[ip].x*outz[ip].x - k_z[ip].y*outz[ip].y); inz[ip].y=k2[ip]*(k_z[ip].x*outz[ip].y + k_z[ip].y*outz[ip].x); } if(AorB==1) { tmpx.x=(outx[ip].x-dvx[ip].x)/dt; tmpx.y=(outx[ip].y-dvx[ip].y)/dt; tmpy.x=(outy[ip].x-dvy[ip].x)/dt; tmpy.y=(outy[ip].y-dvy[ip].y)/dt; tmpz.x=(outz[ip].x-dvz[ip].x)/dt; tmpz.y=(outz[ip].y-dvz[ip].y)/dt; inx[ip].x=k2[ip]*(k_x[iptt].x*tmpx.x - k_x[iptt].y*tmpx.y); inx[ip].y=k2[ip]*(k_x[iptt].x*tmpx.y + k_x[iptt].y*tmpx.x); iny[ip].x=k2[ip]*(k_y[ipt].x*tmpy.x - k_y[ipt].y*tmpy.y); iny[ip].y=k2[ip]*(k_y[ipt].x*tmpy.y + k_y[ipt].y*tmpy.x); inz[ip].x=k2[ip]*(k_z[ip].x*tmpz.x - k_z[ip].y*tmpz.y); inz[ip].y=k2[ip]*(k_z[ip].x*tmpz.y + k_z[ip].y*tmpz.x); } } __syncthreads(); } __global__ void cuda_kernel_operate_k_pxz ( int ntx, int nty, int ntz, float dt, hipfftComplex *outx, hipfftComplex *outz, hipfftComplex *dvx, hipfftComplex *dvz, hipfftComplex *inx, hipfftComplex *inz, hipfftComplex *k_x, hipfftComplex *k_z, float *k2, int AorB ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; hipfftComplex tmpx, tmpy, tmpz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty) { if(AorB==0) { inx[ip].x=k2[ip]*(k_x[ip].x*outx[ip].x - k_x[ip].y*outx[ip].y); inx[ip].y=k2[ip]*(k_x[ip].x*outx[ip].y + k_x[ip].y*outx[ip].x); inz[ip].x=k2[ip]*(k_z[iptt].x*outz[ip].x - k_z[iptt].y*outz[ip].y); inz[ip].y=k2[ip]*(k_z[iptt].x*outz[ip].y + k_z[iptt].y*outz[ip].x); } if(AorB==1) { tmpx.x=(outx[ip].x-dvx[ip].x)/dt; tmpx.y=(outx[ip].y-dvx[ip].y)/dt; tmpz.x=(outz[ip].x-dvz[ip].x)/dt; tmpz.y=(outz[ip].y-dvz[ip].y)/dt; inx[ip].x=k2[ip]*(k_x[ip].x*tmpx.x - k_x[ip].y*tmpx.y); inx[ip].y=k2[ip]*(k_x[ip].x*tmpx.y + k_x[ip].y*tmpx.x); inz[ip].x=k2[ip]*(k_z[iptt].x*tmpz.x - k_z[iptt].y*tmpz.y); inz[ip].y=k2[ip]*(k_z[iptt].x*tmpz.y + k_z[iptt].y*tmpz.x); } } __syncthreads(); } __global__ void cuda_kernel_operate_k_pxy ( int ntx, int nty, int ntz, float dt, hipfftComplex *outx, hipfftComplex *outy, hipfftComplex *dvx, hipfftComplex *dvy, hipfftComplex *inx, hipfftComplex *iny, hipfftComplex *k_x, hipfftComplex *k_y, float *k2, int AorB ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; hipfftComplex tmpx, tmpy, tmpz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty) { if(AorB==0) { inx[ip].x=k2[ip]*(k_x[ipt].x*outx[ip].x - k_x[ipt].y*outx[ip].y); inx[ip].y=k2[ip]*(k_x[ipt].x*outx[ip].y + k_x[ipt].y*outx[ip].x); iny[ip].x=k2[ip]*(k_y[iptt].x*outy[ip].x - k_y[iptt].y*outy[ip].y); iny[ip].y=k2[ip]*(k_y[iptt].x*outy[ip].y + k_y[iptt].y*outy[ip].x); } if(AorB==1) { tmpx.x=(outx[ip].x-dvx[ip].x)/dt; tmpx.y=(outx[ip].y-dvx[ip].y)/dt; tmpy.x=(outy[ip].x-dvy[ip].x)/dt; tmpy.y=(outy[ip].y-dvy[ip].y)/dt; inx[ip].x=k2[ip]*(k_x[ipt].x*tmpx.x - k_x[ipt].y*tmpx.y); inx[ip].y=k2[ip]*(k_x[ipt].x*tmpx.y + k_x[ipt].y*tmpx.x); iny[ip].x=k2[ip]*(k_y[iptt].x*tmpy.x - k_y[iptt].y*tmpy.y); iny[ip].y=k2[ip]*(k_y[iptt].x*tmpy.y + k_y[iptt].y*tmpy.x); } } __syncthreads(); } __global__ void cuda_kernel_operate_k_pyz ( int ntx, int nty, int ntz, float dt, hipfftComplex *outy, hipfftComplex *outz, hipfftComplex *dvy, hipfftComplex *dvz, hipfftComplex *iny, hipfftComplex *inz, hipfftComplex *k_y, hipfftComplex *k_z, float *k2, int AorB ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; hipfftComplex tmpx, tmpy, tmpz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty) { if(AorB==0) { iny[ip].x=k2[ip]*(k_y[ip].x*outy[ip].x - k_y[ip].y*outy[ip].y); iny[ip].y=k2[ip]*(k_y[ip].x*outy[ip].y + k_y[ip].y*outy[ip].x); inz[ip].x=k2[ip]*(k_z[ipt].x*outz[ip].x - k_z[ipt].y*outz[ip].y); inz[ip].y=k2[ip]*(k_z[ipt].x*outz[ip].y + k_z[ipt].y*outz[ip].x); } if(AorB==1) { tmpy.x=(outy[ip].x-dvy[ip].x)/dt; tmpy.y=(outy[ip].y-dvy[ip].y)/dt; tmpz.x=(outz[ip].x-dvz[ip].x)/dt; tmpz.y=(outz[ip].y-dvz[ip].y)/dt; iny[ip].x=k2[ip]*(k_y[ip].x*tmpy.x - k_y[ip].y*tmpy.y); iny[ip].y=k2[ip]*(k_y[ip].x*tmpy.y + k_y[ip].y*tmpy.x); inz[ip].x=k2[ip]*(k_z[ipt].x*tmpz.x - k_z[ipt].y*tmpz.y); inz[ip].y=k2[ip]*(k_z[ipt].x*tmpz.y + k_z[ipt].y*tmpz.x); } } __syncthreads(); } __global__ void cuda_kernel_operate_k_v ( int ntx, int nty, int ntz, float dt, hipfftComplex *outx, hipfftComplex *outy, hipfftComplex *outz, hipfftComplex *inx, hipfftComplex *iny, hipfftComplex *inz, hipfftComplex *k_x, hipfftComplex *k_y, hipfftComplex *k_z ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; hipfftComplex tmpx, tmpy, tmpz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty) { inx[ip].x=k_x[iptt].x*outx[ip].x - k_x[iptt].y*outx[ip].y; inx[ip].y=k_x[iptt].x*outx[ip].y + k_x[iptt].y*outx[ip].x; iny[ip].x=k_y[ipt].x*outy[ip].x - k_y[ipt].y*outy[ip].y; iny[ip].y=k_y[ipt].x*outy[ip].y + k_y[ipt].y*outy[ip].x; inz[ip].x=k_z[ip].x*outz[ip].x - k_z[ip].y*outz[ip].y; inz[ip].y=k_z[ip].x*outz[ip].y + k_z[ip].y*outz[ip].x; } __syncthreads(); } __global__ void cuda_kernel_forward_IO ( int ntx, int nty, int ntz, int ntp, int pml, int nt, int it, float dx, float dy, float dz, float dt, int s_ix, int s_iy, int s_iz, float *rik, float *record, float *record2, float *record3, int *r_ix, int *r_iy, int r_iz, int rnmax, int rnx_max, int rny_max, int dr, int r_n, float *pxx, float *pyy, float *pzz, float *vx, float *vy, float *vz ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ip11=(ix+15)*area+(iy+15)*ntz+iz; int ir; //============Add source==============// if(iz==s_iz+10 && ix==s_ix && iy==s_iy) { vx[ip]+=rik[it]; //pyy[ip]+=rik[it]; //pzz[ip]+=rik[it]; } //===============seismic record=================// if(ix>=0&&ix<rnx_max && iy>=0&&iy<rny_max&& iz==r_iz) { /* ir=ix*rny_max+iy; // record[it*rnmax+(r_iy[ir]-r_iy[0])/dr*rnx_max+(r_ix[ir]-r_ix[0])/dr]=p2[iz*area+r_iy[ir]*ntx+r_ix[ir]]; record[ir*nt+it]=vx[r_ix[ir]*area+r_iy[ir]*ntz+iz];*/ ir=ix*rny_max+iy; // record[it*rnmax+(r_iy[ir]-r_iy[0])/dr*rnx_max+(r_ix[ir]-r_ix[0])/dr]=p2[iz*area+r_iy[ir]*ntx+r_ix[ir]]; record[it*rny_max*rnx_max+ir]=vx[r_ix[ir]*area+r_iy[ir]*ntz+iz]; record2[it*rny_max*rnx_max+ir]=vy[r_ix[ir]*area+r_iy[ir]*ntz+iz]; record3[it*rny_max*rnx_max+ir]=vz[r_ix[ir]*area+r_iy[ir]*ntz+iz]; } __syncthreads(); } __global__ void cuda_kernel_calculate_p ( int ntx, int nty, int ntz, int ntp, float dt, float *pxx, float *pyy, float *pzz, float *pxy, float *pxz, float *pyz, float *tao_p1, float *tao_s1, float *tao_p2, float *tao_s2, float *eta_p1, float *eta_s1, float *eta_p2, float *eta_s2, float *eta_p3, float *eta_s3, float *gammax, float *a_x, float *b_x, float *gammay, float *a_y, float *b_y, float *gammaz, float *a_z, float *b_z, float *phi_vx_xx, float *phi_vz_zx, float *phi_vy_yx, float *phi_vx_xy, float *phi_vz_zy, float *phi_vy_yy, float *phi_vx_xz, float *phi_vz_zz, float *phi_vy_yz, float *phi_vx_z, float *phi_vz_x, float *phi_vx_y, float *phi_vy_x, float *phi_vy_z, float *phi_vz_y, hipfftComplex *partvx_x1, hipfftComplex *partvx_x2, hipfftComplex *partvx_x3, hipfftComplex *partvx_x4, hipfftComplex *partvx_x5, hipfftComplex *partvz_z1, hipfftComplex *partvz_z2, hipfftComplex *partvz_z3, hipfftComplex *partvz_z4, hipfftComplex *partvz_z5, hipfftComplex *partvy_y1, hipfftComplex *partvy_y2, hipfftComplex *partvy_y3, hipfftComplex *partvy_y4, hipfftComplex *partvy_y5, hipfftComplex *partvx_z1, hipfftComplex *partvx_z2, hipfftComplex *partvx_z3, hipfftComplex *partvx_z4, hipfftComplex *partvx_z5, hipfftComplex *partvz_x1, hipfftComplex *partvz_x2, hipfftComplex *partvz_x3, hipfftComplex *partvz_x4, hipfftComplex *partvz_x5, hipfftComplex *partvx_y1, hipfftComplex *partvx_y2, hipfftComplex *partvx_y3, hipfftComplex *partvx_y4, hipfftComplex *partvx_y5, hipfftComplex *partvy_x1, hipfftComplex *partvy_x2, hipfftComplex *partvy_x3, hipfftComplex *partvy_x4, hipfftComplex *partvy_x5, hipfftComplex *partvy_z1, hipfftComplex *partvy_z2, hipfftComplex *partvy_z3, hipfftComplex *partvy_z4, hipfftComplex *partvy_z5, hipfftComplex *partvz_y1, hipfftComplex *partvz_y2, hipfftComplex *partvz_y3, hipfftComplex *partvz_y4, hipfftComplex *partvz_y5 ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; float alpha=1.0; float w, s, t11, t12, t13; float sign_of_tao; if(iz>=0 && iz<ntz-1 && ix>=0 && ix<ntx-1 && iy>=0 && iy<nty-1) { phi_vx_xx[ip] = phi_vx_xx[ip] + dt*(-a_x[ix]*phi_vx_xx[ip] - b_x[ix]*( eta_p1[ip]*partvx_x1[ip].x/ntp+eta_p2[ip]*partvx_x2[ip].x/ntp+eta_p3[ip]*partvx_x3[ip].x/ntp+tao_p1[ip]*partvx_x4[ip].x/ntp+tao_p2[ip]*partvx_x5[ip].x/ntp)); phi_vz_zx[ip] = phi_vz_zx[ip] + dt*(-a_z[iz]*phi_vz_zx[ip] - b_z[iz]*( (eta_p1[ip]-2*eta_s1[ip])*partvz_z1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvz_z2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvz_z3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvz_z4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvz_z5[ip].x/ntp)); phi_vy_yx[ip] = phi_vy_yx[ip] + dt*(-a_y[iy]*phi_vy_yx[ip] - b_y[iy]*( (eta_p1[ip]-2*eta_s1[ip])*partvy_y1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvy_y2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvy_y3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvy_y4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvy_y5[ip].x/ntp)); phi_vz_zz[ip] = phi_vz_zz[ip] + dt*(-a_z[iz]*phi_vz_zz[ip] - b_z[iz]*( eta_p1[ip]*partvz_z1[ip].x/ntp+eta_p2[ip]*partvz_z2[ip].x/ntp+eta_p3[ip]*partvz_z3[ip].x/ntp+tao_p1[ip]*partvz_z4[ip].x/ntp+tao_p2[ip]*partvz_z5[ip].x/ntp)); phi_vx_xz[ip] = phi_vx_xz[ip] + dt*(-a_x[ix]*phi_vx_xz[ip] - b_x[ix]*( (eta_p1[ip]-2*eta_s1[ip])*partvx_x1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvx_x2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvx_x3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvx_x4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvx_x5[ip].x/ntp)); phi_vy_yz[ip] = phi_vy_yz[ip] + dt*(-a_y[iy]*phi_vy_yz[ip] - b_y[iy]*( (eta_p1[ip]-2*eta_s1[ip])*partvy_y1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvy_y2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvy_y3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvy_y4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvy_y5[ip].x/ntp)); phi_vy_yy[ip] = phi_vy_yy[ip] + dt*(-a_y[iy]*phi_vy_yy[ip] - b_y[iy]*( eta_p1[ip]*partvy_y1[ip].x/ntp+eta_p2[ip]*partvy_y2[ip].x/ntp+eta_p3[ip]*partvy_y3[ip].x/ntp+tao_p1[ip]*partvy_y4[ip].x/ntp+tao_p2[ip]*partvy_y5[ip].x/ntp)); phi_vx_xy[ip] = phi_vx_xy[ip] + dt*(-a_x[ix]*phi_vx_xy[ip] - b_x[ix]*( (eta_p1[ip]-2*eta_s1[ip])*partvx_x1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvx_x2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvx_x3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvx_x4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvx_x5[ip].x/ntp)); phi_vz_zy[ip] = phi_vz_zy[ip] + dt*(-a_z[iz]*phi_vz_zy[ip] - b_z[iz]*( (eta_p1[ip]-2*eta_s1[ip])*partvz_z1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvz_z2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvz_z3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvz_z4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvz_z5[ip].x/ntp)); pxx[ip] = pxx[ip] + dt*( 1.0/gammax[ix]*(eta_p1[ip]*partvx_x1[ip].x/ntp+eta_p2[ip]*partvx_x2[ip].x/ntp+eta_p3[ip]*partvx_x3[ip].x/ntp+tao_p1[ip]*partvx_x4[ip].x/ntp+tao_p2[ip]*partvx_x5[ip].x/ntp)+ 1.0/gammay[iy]*((eta_p1[ip]-2*eta_s1[ip])*partvy_y1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvy_y2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvy_y3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvy_y4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvy_y5[ip].x/ntp)+ 1.0/gammaz[iz]*((eta_p1[ip]-2*eta_s1[ip])*partvz_z1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvz_z2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvz_z3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvz_z4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvz_z5[ip].x/ntp) +(phi_vx_xx[ip]+phi_vy_yx[ip]+phi_vz_zx[ip]) ); pyy[ip] = pyy[ip] + dt*( 1.0/gammax[ix]*((eta_p1[ip]-2*eta_s1[ip])*partvx_x1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvx_x2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvx_x3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvx_x4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvx_x5[ip].x/ntp)+ 1.0/gammay[iy]*(eta_p1[ip]*partvy_y1[ip].x/ntp+eta_p2[ip]*partvy_y2[ip].x/ntp+eta_p3[ip]*partvy_y3[ip].x/ntp+tao_p1[ip]*partvy_y4[ip].x/ntp+tao_p2[ip]*partvy_y5[ip].x/ntp)+ 1.0/gammaz[iz]*((eta_p1[ip]-2*eta_s1[ip])*partvz_z1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvz_z2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvz_z3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvz_z4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvz_z5[ip].x/ntp) +(phi_vx_xy[ip]+phi_vy_yy[ip]+phi_vz_zy[ip]) ); pzz[ip] = pzz[ip] + dt*( 1.0/gammax[ix]*((eta_p1[ip]-2*eta_s1[ip])*partvx_x1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvx_x2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvx_x3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvx_x4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvx_x5[ip].x/ntp)+ 1.0/gammay[iy]*((eta_p1[ip]-2*eta_s1[ip])*partvy_y1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvy_y2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvy_y3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvy_y4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvy_y5[ip].x/ntp)+ 1.0/gammaz[iz]*(eta_p1[ip]*partvz_z1[ip].x/ntp+eta_p2[ip]*partvz_z2[ip].x/ntp+eta_p3[ip]*partvz_z3[ip].x/ntp+tao_p1[ip]*partvz_z4[ip].x/ntp+tao_p2[ip]*partvz_z5[ip].x/ntp) +(phi_vx_xz[ip]+phi_vy_yz[ip]+phi_vz_zz[ip]) ); phi_vx_z[ip] = phi_vx_z[ip] + dt*(-0.5*(a_z[iz]+a_z[iz+1])*phi_vx_z[ip] - 0.5*(b_z[iz]+b_z[iz+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvx_z1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvx_z2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvx_z3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvx_z4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvx_z5[ip].x)/ntp ) ); phi_vz_x[ip] = phi_vz_x[ip] + dt*(-0.5*(a_x[ix]+a_x[ix+1])*phi_vz_x[ip] - 0.5*(b_x[ix]+b_x[ix+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvz_x1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvz_x2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvz_x3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvz_x4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvz_x5[ip].x)/ntp ) ); phi_vx_y[ip] = phi_vx_y[ip] + dt*(-0.5*(a_y[iy]+a_y[iy+1])*phi_vx_y[ip] - 0.5*(b_y[iy]+b_y[iy+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvx_y1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvx_y2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvx_y3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvx_y4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvx_y5[ip].x)/ntp ) ); phi_vy_x[ip] = phi_vy_x[ip] + dt*(-0.5*(a_x[ix]+a_x[ix+1])*phi_vy_x[ip] - 0.5*(b_x[ix]+b_x[ix+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvy_x1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvy_x2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvy_x3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvy_x4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvy_x5[ip].x)/ntp ) ); phi_vy_z[ip] = phi_vy_z[ip] + dt*(-0.5*(a_z[iz]+a_z[iz+1])*phi_vy_z[ip] - 0.5*(b_z[iz]+b_z[iz+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvy_z1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvy_z2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvy_z3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvy_z4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvy_z5[ip].x)/ntp ) ); phi_vz_y[ip] = phi_vz_y[ip] + dt*(-0.5*(a_y[iy]+a_y[iy+1])*phi_vz_y[ip] - 0.5*(b_y[iy]+b_y[iy+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvz_y1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvz_y2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvz_y3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvz_y4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvz_y5[ip].x)/ntp ) ); pxz[ip] = pxz[ip] + dt*( 1.0/(0.5*(gammaz[iz]+gammaz[iz+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvx_z1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvx_z2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvx_z3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvx_z4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvx_z5[ip].x)/ntp ) + phi_vx_z[ip] + 1.0/(0.5*(gammax[ix]+gammax[ix+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvz_x1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvz_x2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvz_x3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvz_x4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvz_x5[ip].x)/ntp ) + phi_vz_x[ip] ); pyz[ip] = pyz[ip] + dt*( 1.0/(0.5*(gammaz[iz]+gammaz[iz+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvy_z1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvy_z2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvy_z3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvy_z4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvy_z5[ip].x)/ntp ) + phi_vy_z[ip] + 1.0/(0.5*(gammay[iy]+gammay[iy+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvz_y1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvz_y2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvz_y3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvz_y4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvz_y5[ip].x)/ntp ) + phi_vz_y[ip] ); pxy[ip] = pxy[ip] + dt*( 1.0/(0.5*(gammay[iy]+gammay[iy+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvx_y1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvx_y2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvx_y3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvx_y4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvx_y5[ip].x)/ntp ) + phi_vx_y[ip] + 1.0/(0.5*(gammax[ix]+gammax[ix+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvy_x1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvy_x2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvy_x3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvy_x4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvy_x5[ip].x)/ntp ) + phi_vy_x[ip] ); } __syncthreads(); } __global__ void cuda_kernel_calculate_v ( int ntx, int nty, int ntz, int ntp, float dt, float *rho, float *vx, float *vz, float *vy, float *gammax, float *a_x, float *b_x, float *gammay, float *a_y, float *b_y, float *gammaz, float *a_z, float *b_z, float *phi_pxx_x, float *phi_pxy_y, float *phi_pxz_z, float *phi_pxy_x, float *phi_pyy_y, float *phi_pyz_z, float *phi_pxz_x, float *phi_pyz_y, float *phi_pzz_z, hipfftComplex *partx1, hipfftComplex *partz1, hipfftComplex *party1, hipfftComplex *partx2, hipfftComplex *partz2, hipfftComplex *party2, hipfftComplex *partx3, hipfftComplex *partz3, hipfftComplex *party3 ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int ip=ix*nty*ntz+iy*ntz+iz; if(iz>=0 && iz<ntz-1 && ix>=0 && ix<ntx-1&& iy>=0 && iy<nty-1) { phi_pxx_x[ip] = phi_pxx_x[ip] + dt*(-0.5*(a_x[ix]+a_x[ix+1])*phi_pxx_x[ip]-0.5*(b_x[ix]+b_x[ix+1])*partx1[ip].x/ntp); phi_pxy_y[ip] = phi_pxy_y[ip] + dt*(-a_y[iy]*phi_pxy_y[ip]-b_y[iy]*party1[ip].x/ntp); phi_pxz_z[ip] = phi_pxz_z[ip] + dt*(-a_z[iz]*phi_pxz_z[ip]-b_z[iz]*partz1[ip].x/ntp); phi_pxy_x[ip] = phi_pxy_x[ip] + dt*(-a_x[ix]*phi_pxy_x[ip]-b_x[ix]*partx3[ip].x/ntp); phi_pyy_y[ip] = phi_pyy_y[ip] + dt*(-0.5*(a_y[iy]+a_y[iy+1])*phi_pyy_y[ip]-0.5*(b_y[iy]+b_y[iy+1])*party3[ip].x/ntp); phi_pyz_z[ip] = phi_pyz_z[ip] + dt*(-a_z[iz]*phi_pyz_z[ip]-b_z[iz]*partz3[ip].x/ntp); phi_pxz_x[ip] = phi_pxz_x[ip] + dt*(-a_x[ix]*phi_pxz_x[ip]-b_x[ix]*partx2[ip].x/ntp); phi_pyz_y[ip] = phi_pyz_y[ip] + dt*(-a_y[iy]*phi_pyz_y[ip]-b_y[iy]*party2[ip].x/ntp); phi_pzz_z[ip] = phi_pzz_z[ip] + dt*(-0.5*(a_z[iz]+a_z[iz+1])*phi_pzz_z[ip]-0.5*(b_z[iz]+b_z[iz+1])*partz2[ip].x/ntp); vx[ip] = vx[ip] + dt/(0.5*(rho[ip]+rho[ip+nty*ntz]))* ( 1.0/(0.5*(gammax[ix]+gammax[ix+1]))*partx1[ip].x/ntp + phi_pxx_x[ip] + 1.0/gammay[iy]*party1[ip].x/ntp + phi_pxy_y[ip] + 1.0/gammaz[iz]*partz1[ip].x/ntp + phi_pxz_z[ip] ); vy[ip] = vy[ip] + dt/(0.5*(rho[ip]+rho[ip+ntz]))* ( 1.0/gammax[ix]*partx3[ip].x/ntp + phi_pxy_x[ip] + 1.0/(0.5*(gammay[iy]+gammay[iy+1]))*party3[ip].x/ntp + phi_pyy_y[ip] + 1.0/gammaz[iz]*partz3[ip].x/ntp + phi_pyz_z[ip] ); vz[ip] = vz[ip] + dt/(0.5*(rho[ip]+rho[ip+1]))* ( 1.0/gammax[ix]*partx2[ip].x/ntp + phi_pxz_x[ip] + 1.0/gammay[iy]*party2[ip].x/ntp + phi_pyz_y[ip] + 1.0/(0.5*(gammaz[iz]+gammaz[iz+1]))*partz2[ip].x/ntp + phi_pzz_z[ip] ); } __syncthreads(); } __global__ void cuda_kernel_get_dv_renewed ( int ntx, int nty, int ntz, hipfftComplex *outx, hipfftComplex *outy, hipfftComplex *outz, hipfftComplex *dvx, hipfftComplex *dvy, hipfftComplex *dvz ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int ip=ix*nty*ntz+iy*ntz+iz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy<nty) { dvx[ip].x=outx[ip].x; dvx[ip].y=outx[ip].y; dvz[ip].x=outz[ip].x; dvz[ip].y=outz[ip].y; dvy[ip].x=outy[ip].x; dvy[ip].y=outy[ip].y; } __syncthreads(); } extern "C" void cuda_forward_acoustic_3D ( int myid, int is, int nt, int ntx, int nty, int ntz, int ntp, int nx, int ny, int nz, int pml, float dx, float dy, float dz, float dt, float f0, float w0, float velp_max, float *rik, float *velp, float *gama_p,float *vels, float *gama_s, float *rho, struct Source ss[], struct MultiGPU plan[], int GPU_N, int rnmax, int rnx_max, int rny_max, int dr ) { int i, it, ix, iy, iz; size_t size_model=sizeof(float)*ntp; char filename[150]; FILE *fp; // define multistream variable Multistream plans[GPU_N]; float *tmp; tmp=(float*)malloc(sizeof(float)*ntp); // block size 16*16; // grid size ntx/16*ntz/16 dim3 dimBlock(BLOCK_WIDTH,BLOCK_HEIGHT); dim3 dimGrid((ntx*ntz+dimBlock.x-1)/dimBlock.x,(nty+dimBlock.y-1)/dimBlock.y); for(i=0;i<GPU_N;i++) { hipSetDevice(i); // define streaming cufft handle (very important!!!) hipStreamCreate(&plans[i].stream); hipfftSetStream(plan[i].PLAN_FORWARD, plans[i].stream); hipfftSetStream(plan[i].PLAN_BACKWARD, plans[i].stream); } for(i=0;i<GPU_N;i++) { hipSetDevice(i); // copy the vectors from the host to the device hipMemcpyAsync(plan[i].d_r_ix,ss[is+i].r_ix,sizeof(float)*rnmax,hipMemcpyHostToDevice,plans[i].stream); hipMemcpyAsync(plan[i].d_r_iy,ss[is+i].r_iy,sizeof(float)*rnmax,hipMemcpyHostToDevice,plans[i].stream); hipMemcpyAsync(plan[i].d_velp,velp,size_model,hipMemcpyHostToDevice,plans[i].stream); hipMemcpyAsync(plan[i].d_gama_p,gama_p,size_model,hipMemcpyHostToDevice,plans[i].stream); hipMemcpyAsync(plan[i].d_vels,vels,size_model,hipMemcpyHostToDevice,plans[i].stream); hipMemcpyAsync(plan[i].d_gama_s,gama_s,size_model,hipMemcpyHostToDevice,plans[i].stream); hipMemcpyAsync(plan[i].d_rho,rho,size_model,hipMemcpyHostToDevice,plans[i].stream); hipMemcpyAsync(plan[i].d_rik,rik,sizeof(float)*nt,hipMemcpyHostToDevice,plans[i].stream); } for(i=0;i<GPU_N;i++) { hipSetDevice(i); //===============define wavenumber variables============// hipLaunchKernelGGL(( cuda_kernel_wavenumber), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dx, dy, dz, plan[i].d_kx, plan[i].d_ky, plan[i].d_kz, plan[i].d_k, plan[i].d_kvx_x, plan[i].d_kvx_z, plan[i].d_kvx_y, plan[i].d_kvz_x, plan[i].d_kvz_z, plan[i].d_kvz_y, plan[i].d_kvy_x, plan[i].d_kvy_z, plan[i].d_kvy_y ); //===============define viscoacoustic variables============// hipLaunchKernelGGL(( cuda_kernel_viscoacoustic_parameters), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dx, dy, dz, dt, w0, plan[i].d_velp, plan[i].d_vels, plan[i].d_rho, plan[i].d_k, plan[i].d_gama_p, plan[i].d_gama_s, plan[i].d_Ap1, plan[i].d_Ap2, plan[i].d_Ap3, plan[i].d_tao_p1, plan[i].d_tao_p2, plan[i].d_eta_p1, plan[i].d_eta_p2, plan[i].d_eta_p3, plan[i].d_tao_s1, plan[i].d_tao_s2, plan[i].d_eta_s1, plan[i].d_eta_s2, plan[i].d_eta_s3 ); //===============PML parameters============// hipLaunchKernelGGL(( cuda_kernel_pml_parameters), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, pml, dx, dy, dz, dt, f0, velp_max, plan[i].d_gammax, plan[i].d_alphax, plan[i].d_Omegax, plan[i].d_a_x, plan[i].d_b_x, plan[i].d_gammay, plan[i].d_alphay, plan[i].d_Omegay, plan[i].d_a_y, plan[i].d_b_y, plan[i].d_gammaz, plan[i].d_alphaz, plan[i].d_Omegaz, plan[i].d_a_z, plan[i].d_b_z ); //===============initialization============// hipLaunchKernelGGL(( cuda_kernel_initialization), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, plan[i].d_vx, plan[i].d_vy, plan[i].d_vz, plan[i].d_pxx, plan[i].d_pyy, plan[i].d_pzz, plan[i].d_pxy, plan[i].d_pyz, plan[i].d_pxz, plan[i].d_phi_vx_xx, plan[i].d_phi_vz_zx, plan[i].d_phi_vy_yx, plan[i].d_phi_vx_xy, plan[i].d_phi_vz_zy, plan[i].d_phi_vy_yy, plan[i].d_phi_vx_xz, plan[i].d_phi_vz_zz, plan[i].d_phi_vy_yz, plan[i].d_phi_vx_z, plan[i].d_phi_vz_x,plan[i].d_phi_vx_y, plan[i].d_phi_vy_x,plan[i].d_phi_vy_z, plan[i].d_phi_vz_y, plan[i].d_phi_pxx_x, plan[i].d_phi_pxy_y, plan[i].d_phi_pxz_z, plan[i].d_phi_pxy_x, plan[i].d_phi_pyy_y, plan[i].d_phi_pyz_z, plan[i].d_phi_pxz_x, plan[i].d_phi_pyz_y, plan[i].d_phi_pzz_z, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_partx1, plan[i].d_partz1, plan[i].d_party1, plan[i].d_partx2, plan[i].d_partz2, plan[i].d_party2, plan[i].d_partx3, plan[i].d_partz3, plan[i].d_party3, plan[i].d_partvx_x1, plan[i].d_partvx_x2, plan[i].d_partvx_x3, plan[i].d_partvx_x4, plan[i].d_partvx_x5, plan[i].d_partvz_z1, plan[i].d_partvz_z2, plan[i].d_partvz_z3, plan[i].d_partvz_z4, plan[i].d_partvz_z5, plan[i].d_partvy_y1, plan[i].d_partvy_y2, plan[i].d_partvy_y3, plan[i].d_partvy_y4, plan[i].d_partvy_y5, plan[i].d_partvx_y1, plan[i].d_partvx_y2, plan[i].d_partvx_y3, plan[i].d_partvx_y4, plan[i].d_partvx_y5, plan[i].d_partvy_x1, plan[i].d_partvy_x2, plan[i].d_partvy_x3, plan[i].d_partvy_x4, plan[i].d_partvy_x5, plan[i].d_partvy_z1, plan[i].d_partvy_z2, plan[i].d_partvy_z3, plan[i].d_partvy_z4, plan[i].d_partvy_z5, plan[i].d_partvz_y1, plan[i].d_partvz_y2, plan[i].d_partvz_y3, plan[i].d_partvz_y4, plan[i].d_partvz_y5, plan[i].d_partvx_z1, plan[i].d_partvx_z2, plan[i].d_partvx_z3, plan[i].d_partvx_z4, plan[i].d_partvx_z5, plan[i].d_partvz_x1, plan[i].d_partvz_x2, plan[i].d_partvz_x3, plan[i].d_partvz_x4, plan[i].d_partvz_x5 ); } //===================time begin===========================// //===================time begin===========================// for(it=0;it<nt;it++) { for(i=0;i<GPU_N;i++) { hipSetDevice(i); //===============calculate k-space spatial derivative============// //===============calculate k-space spatial derivative============// hipLaunchKernelGGL(( cuda_kernel_p_real_to_complex), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, plan[i].d_pxx, plan[i].d_pyy, plan[i].d_pzz, plan[i].d_pxy, plan[i].d_pxz, plan[i].d_pyz, plan[i].d_in_pxx, plan[i].d_in_pyy, plan[i].d_in_pzz, plan[i].d_in_pxy, plan[i].d_in_pxz, plan[i].d_in_pyz ); hipfftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pxx,plan[i].d_outpxx,HIPFFT_FORWARD); hipfftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pyy,plan[i].d_outpyy,HIPFFT_FORWARD); hipfftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pzz,plan[i].d_outpzz,HIPFFT_FORWARD); hipfftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pxy,plan[i].d_outpxy,HIPFFT_FORWARD); hipfftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pxz,plan[i].d_outpxz,HIPFFT_FORWARD); hipfftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pyz,plan[i].d_outpyz,HIPFFT_FORWARD); hipLaunchKernelGGL(( cuda_kernel_operate_k_v), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outpxx, plan[i].d_outpxy, plan[i].d_outpxz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvz_x, plan[i].d_kvy_y, plan[i].d_kvz_z ); hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partx1, HIPFFT_BACKWARD); //dpxxdx hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_party1, HIPFFT_BACKWARD); //dpxydy hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partz1, HIPFFT_BACKWARD); //dpxzdz hipLaunchKernelGGL(( cuda_kernel_operate_k_v), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outpxz, plan[i].d_outpyz, plan[i].d_outpzz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvx_z ); hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partx2, HIPFFT_BACKWARD); //dpxzdx hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_party2, HIPFFT_BACKWARD); //dpyzdy hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partz2, HIPFFT_BACKWARD); //dpzzdz hipLaunchKernelGGL(( cuda_kernel_operate_k_v), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outpxy, plan[i].d_outpyy, plan[i].d_outpyz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvz_y, plan[i].d_kvz_z ); hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partx3, HIPFFT_BACKWARD); //dpxydx hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_party3, HIPFFT_BACKWARD); //dpyydy hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partz3, HIPFFT_BACKWARD); //dpyzdz //===================calculate vx vy and vz==================// //===================calculate vx vy and vz==================// hipLaunchKernelGGL(( cuda_kernel_calculate_v), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, ntp, dt, plan[i].d_rho, plan[i].d_vx, plan[i].d_vz, plan[i].d_vy, plan[i].d_gammax, plan[i].d_a_x, plan[i].d_b_x, plan[i].d_gammay, plan[i].d_a_y, plan[i].d_b_y, plan[i].d_gammaz, plan[i].d_a_z, plan[i].d_b_z, plan[i].d_phi_pxx_x, plan[i].d_phi_pxy_y, plan[i].d_phi_pxz_z, plan[i].d_phi_pxy_x, plan[i].d_phi_pyy_y, plan[i].d_phi_pyz_z, plan[i].d_phi_pxz_x, plan[i].d_phi_pyz_y, plan[i].d_phi_pzz_z, plan[i].d_partx1, plan[i].d_partz1, plan[i].d_party1, plan[i].d_partx2, plan[i].d_partz2, plan[i].d_party2, plan[i].d_partx3, plan[i].d_partz3, plan[i].d_party3 ); //===============calculate k-space spatial derivatives============// //===============calculate k-space spatial derivatives============// hipLaunchKernelGGL(( cuda_kernel_vxvyvz_real_to_complex), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, plan[i].d_vx, plan[i].d_vy, plan[i].d_vz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz ); hipfftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_inx,plan[i].d_outx,HIPFFT_FORWARD); hipfftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_iny,plan[i].d_outy,HIPFFT_FORWARD); hipfftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_inz,plan[i].d_outz,HIPFFT_FORWARD); //////////////////////////////////// sigma xx yy zz dispersion_3 parts ////////////////////////// hipLaunchKernelGGL(( cuda_kernel_operate_k_pxxpyypzz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvz_z, plan[i].d_Ap1, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_x1, HIPFFT_BACKWARD); //dvxdx, k^-0.5 hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_y1, HIPFFT_BACKWARD); //dvydy hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_z1, HIPFFT_BACKWARD); //dvzdz hipLaunchKernelGGL(( cuda_kernel_operate_k_pxxpyypzz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvz_z, plan[i].d_Ap2, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_x2, HIPFFT_BACKWARD); //dvxdx, 1* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_y2, HIPFFT_BACKWARD); //dvydy hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_z2, HIPFFT_BACKWARD); //dvzdz hipLaunchKernelGGL(( cuda_kernel_operate_k_pxxpyypzz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvz_z, plan[i].d_Ap3, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_x3, HIPFFT_BACKWARD); //dvxdx, k^0.5* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_y3, HIPFFT_BACKWARD); //dvydy hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_z3, HIPFFT_BACKWARD); //dvzdz //////////////////////////////////// sigma xz zx dispersion_3 parts ////////////////////////// hipLaunchKernelGGL(( cuda_kernel_operate_k_pxz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvz, plan[i].d_inx, plan[i].d_inz, plan[i].d_kvx_z, plan[i].d_kvz_x, plan[i].d_Ap1, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_z1, HIPFFT_BACKWARD); //dvxdz, k^-0.5* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_x1, HIPFFT_BACKWARD); //dvzdx hipLaunchKernelGGL(( cuda_kernel_operate_k_pxz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvz, plan[i].d_inx, plan[i].d_inz, plan[i].d_kvx_z, plan[i].d_kvz_x, plan[i].d_Ap2, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_z2, HIPFFT_BACKWARD); //dvxdz, 1* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_x2, HIPFFT_BACKWARD); //dvzdx hipLaunchKernelGGL(( cuda_kernel_operate_k_pxz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvz, plan[i].d_inx, plan[i].d_inz, plan[i].d_kvx_z, plan[i].d_kvz_x, plan[i].d_Ap3, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_z3, HIPFFT_BACKWARD); //dvxdz, k^0.5* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_x3, HIPFFT_BACKWARD); //dvzdx //////////////////////////////////// sigma xy yx dispersion_3 parts ////////////////////////// hipLaunchKernelGGL(( cuda_kernel_operate_k_pxy), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_inx, plan[i].d_iny, plan[i].d_kvx_y, plan[i].d_kvy_x, plan[i].d_Ap1, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_y1, HIPFFT_BACKWARD); //dvxdy, k^-0.5* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_x1, HIPFFT_BACKWARD); //dvydx hipLaunchKernelGGL(( cuda_kernel_operate_k_pxy), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_inx, plan[i].d_iny, plan[i].d_kvx_y, plan[i].d_kvy_x, plan[i].d_Ap2, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_y2, HIPFFT_BACKWARD); //dvxdy, 1* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_x2, HIPFFT_BACKWARD); //dvydx hipLaunchKernelGGL(( cuda_kernel_operate_k_pxy), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_inx, plan[i].d_iny, plan[i].d_kvx_y, plan[i].d_kvy_x, plan[i].d_Ap3, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_y3, HIPFFT_BACKWARD); //dvxdy, k^0.5 hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_x3, HIPFFT_BACKWARD); //dvydx //////////////////////////////////// sigma yz zy dispersion_3 parts ////////////////////////// hipLaunchKernelGGL(( cuda_kernel_operate_k_pyz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvy_z, plan[i].d_kvz_y, plan[i].d_Ap1, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_z1, HIPFFT_BACKWARD); //dvydz, k^-0.5 hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_y1, HIPFFT_BACKWARD); //dvzdy hipLaunchKernelGGL(( cuda_kernel_operate_k_pyz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvy_z, plan[i].d_kvz_y, plan[i].d_Ap2, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_z2, HIPFFT_BACKWARD); //dvydz, 1* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_y2, HIPFFT_BACKWARD); //dvzdy hipLaunchKernelGGL(( cuda_kernel_operate_k_pyz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvy_z, plan[i].d_kvz_y, plan[i].d_Ap3, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_z3, HIPFFT_BACKWARD); //dvydz, k^0.5 hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_y3, HIPFFT_BACKWARD); //dvzdy //////////////////////////////////// sigma xx yy zz amplitude-loss_2 parts ////////////////////////// hipLaunchKernelGGL(( cuda_kernel_operate_k_pxxpyypzz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvz_z, plan[i].d_Ap1, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_x4, HIPFFT_BACKWARD); //dvxdx, k^-0.5 hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_y4, HIPFFT_BACKWARD); //dvydy hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_z4, HIPFFT_BACKWARD); //dvzdz hipLaunchKernelGGL(( cuda_kernel_operate_k_pxxpyypzz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvz_z, plan[i].d_Ap2, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_x5, HIPFFT_BACKWARD); //dvxdx, 1* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_y5, HIPFFT_BACKWARD); //dvydy hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_z5, HIPFFT_BACKWARD); //dvzdz //////////////////////////////////// sigma xz zx amplitude-loss_2 parts ////////////////////////// hipLaunchKernelGGL(( cuda_kernel_operate_k_pxz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvz, plan[i].d_inx, plan[i].d_inz, plan[i].d_kvx_z, plan[i].d_kvz_x, plan[i].d_Ap1, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_z4, HIPFFT_BACKWARD); //dvxdz, k^-0.5* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_x4, HIPFFT_BACKWARD); //dvzdx hipLaunchKernelGGL(( cuda_kernel_operate_k_pxz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvz, plan[i].d_inx, plan[i].d_inz, plan[i].d_kvx_z, plan[i].d_kvz_x, plan[i].d_Ap2, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_z5, HIPFFT_BACKWARD); //dvxdz, 1* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_x5, HIPFFT_BACKWARD); //dvzdx //////////////////////////////////// sigma xy yx amplitude-loss_2 parts ////////////////////////// hipLaunchKernelGGL(( cuda_kernel_operate_k_pxy), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_inx, plan[i].d_iny, plan[i].d_kvx_y, plan[i].d_kvy_x, plan[i].d_Ap1, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_y4, HIPFFT_BACKWARD); //dvxdy, k^-0.5* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_x4, HIPFFT_BACKWARD); //dvydx hipLaunchKernelGGL(( cuda_kernel_operate_k_pxy), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_inx, plan[i].d_iny, plan[i].d_kvx_y, plan[i].d_kvy_x, plan[i].d_Ap2, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_y5, HIPFFT_BACKWARD); //dvxdy, 1* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_x5, HIPFFT_BACKWARD); //dvydx //////////////////////////////////// sigma yz zy amplitude-loss_2 parts ////////////////////////// hipLaunchKernelGGL(( cuda_kernel_operate_k_pyz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvy_z, plan[i].d_kvz_y, plan[i].d_Ap1, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_z4, HIPFFT_BACKWARD); //dvydz, k^-0.5 hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_y4, HIPFFT_BACKWARD); //dvzdy hipLaunchKernelGGL(( cuda_kernel_operate_k_pyz), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, dt, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvy_z, plan[i].d_kvz_y, plan[i].d_Ap2, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_z5, HIPFFT_BACKWARD); //dvydz, 1* hipfftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_y5, HIPFFT_BACKWARD); //dvzdy //===================calculate p ==================// //===================calculate p ==================// hipLaunchKernelGGL(( cuda_kernel_calculate_p), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, ntp, dt, plan[i].d_pxx, plan[i].d_pyy, plan[i].d_pzz, plan[i].d_pxy, plan[i].d_pxz, plan[i].d_pyz, plan[i].d_tao_p1, plan[i].d_tao_s1,plan[i].d_tao_p2,plan[i].d_tao_s2,plan[i].d_eta_p1,plan[i].d_eta_s1,plan[i].d_eta_p2,plan[i].d_eta_s2,plan[i].d_eta_p3,plan[i].d_eta_s3, plan[i].d_gammax, plan[i].d_a_x, plan[i].d_b_x, plan[i].d_gammay, plan[i].d_a_y, plan[i].d_b_y, plan[i].d_gammaz, plan[i].d_a_z, plan[i].d_b_z, plan[i].d_phi_vx_xx, plan[i].d_phi_vz_zx, plan[i].d_phi_vy_yx, plan[i].d_phi_vx_xy, plan[i].d_phi_vz_zy, plan[i].d_phi_vy_yy, plan[i].d_phi_vx_xz, plan[i].d_phi_vz_zz, plan[i].d_phi_vy_yz, plan[i].d_phi_vx_z, plan[i].d_phi_vz_x, plan[i].d_phi_vx_y, plan[i].d_phi_vy_x, plan[i].d_phi_vy_z, plan[i].d_phi_vz_y, plan[i].d_partvx_x1, plan[i].d_partvx_x2, plan[i].d_partvx_x3, plan[i].d_partvx_x4, plan[i].d_partvx_x5, plan[i].d_partvz_z1, plan[i].d_partvz_z2, plan[i].d_partvz_z3, plan[i].d_partvz_z4, plan[i].d_partvz_z5, plan[i].d_partvy_y1, plan[i].d_partvy_y2, plan[i].d_partvy_y3, plan[i].d_partvy_y4, plan[i].d_partvy_y5, plan[i].d_partvx_z1, plan[i].d_partvx_z2, plan[i].d_partvx_z3, plan[i].d_partvx_z4, plan[i].d_partvx_z5, plan[i].d_partvz_x1, plan[i].d_partvz_x2, plan[i].d_partvz_x3, plan[i].d_partvz_x4, plan[i].d_partvz_x5, plan[i].d_partvx_y1, plan[i].d_partvx_y2, plan[i].d_partvx_y3, plan[i].d_partvx_y4, plan[i].d_partvx_y5, plan[i].d_partvy_x1, plan[i].d_partvy_x2, plan[i].d_partvy_x3, plan[i].d_partvy_x4, plan[i].d_partvy_x5, plan[i].d_partvy_z1, plan[i].d_partvy_z2, plan[i].d_partvy_z3, plan[i].d_partvy_z4, plan[i].d_partvy_z5, plan[i].d_partvz_y1, plan[i].d_partvz_y2, plan[i].d_partvz_y3, plan[i].d_partvz_y4, plan[i].d_partvz_y5 ); hipLaunchKernelGGL(( cuda_kernel_forward_IO), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, ntp, pml, nt, it, dx, dy, dz, dt, ss[is+i].s_ix, ss[is+i].s_iy, ss[is+i].s_iz, plan[i].d_rik, plan[i].d_record, plan[i].d_record2, plan[i].d_record3, plan[i].d_r_ix, plan[i].d_r_iy, ss[is+i].r_iz, rnmax, rnx_max, rny_max, dr, ss[is+i].r_n, plan[i].d_pxx, plan[i].d_pyy, plan[i].d_pzz, plan[i].d_vx, plan[i].d_vy, plan[i].d_vz ); hipLaunchKernelGGL(( cuda_kernel_get_dv_renewed), dim3(dimGrid),dim3(dimBlock),0,plans[i].stream, ntx, nty, ntz, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz ); //================updating wavefields==================// if(it%200==0 && myid==0 && i==0) { printf("forward using real model,is=%2d,it=%4d\n",is+i,it); /* hipMemcpyAsync(tmp,plan[i].d_p2,size_model,hipMemcpyDeviceToHost,plans[i].stream); sprintf(filename,"./output/shot%dsnap%d.bin",is+i,it); fp=fopen(filename,"wb"); for(ix=pml;ix<ntx-pml;ix++) for(iy=pml; iy<nty-pml; iy++) for(iz=pml; iz<ntz-pml; iz++) { fwrite(&tmp[ix*nty*ntz+iy*ntz+iz],sizeof(float),1,fp); } fclose(fp); */ } } //end of GPU_N loop } //end of time loop for(i=0;i<GPU_N;i++) { hipSetDevice(i); hipMemcpyAsync(plan[i].record, plan[i].d_record,sizeof(float)*nt*rnmax, hipMemcpyDeviceToHost,plans[i].stream); hipMemcpyAsync(plan[i].record2, plan[i].d_record2,sizeof(float)*nt*rnmax, hipMemcpyDeviceToHost,plans[i].stream); hipMemcpyAsync(plan[i].record3, plan[i].d_record3,sizeof(float)*nt*rnmax, hipMemcpyDeviceToHost,plans[i].stream); hipDeviceSynchronize(); hipStreamDestroy(plans[i].stream); } free(tmp); } //========================================================= // Initializating the memory for variables in device // ======================================================= extern "C" void cuda_Host_initialization ( int ntp, int nt, int rnmax, struct MultiGPU plan[], int GPU_N ) { int i; for(i=0;i<GPU_N;i++) { hipSetDevice(i); memset(plan[i].pxx, 0, ntp*sizeof(float)); memset(plan[i].pyy, 0, ntp*sizeof(float)); memset(plan[i].pzz, 0, ntp*sizeof(float)); memset(plan[i].pxy, 0, ntp*sizeof(float)); memset(plan[i].pxz, 0, ntp*sizeof(float)); memset(plan[i].pyz, 0, ntp*sizeof(float)); memset(plan[i].vx, 0, ntp*sizeof(float)); memset(plan[i].vy, 0, ntp*sizeof(float)); memset(plan[i].vz, 0, ntp*sizeof(float)); memset(plan[i].record, 0, nt*rnmax*sizeof(float)); memset(plan[i].record2, 0, nt*rnmax*sizeof(float)); memset(plan[i].record3, 0, nt*rnmax*sizeof(float)); } } //=================================================// // Allocate the memory for variables in device // ================================================// extern "C" void cuda_Device_malloc ( int ntx, int nty, int ntz, int ntp, int nx, int ny, int nz, int nt, int rnmax, struct MultiGPU plan[], int GPU_N ) { int i; size_t size_model=sizeof(float)*ntp; for(i=0;i<GPU_N;i++) { hipSetDevice(i); hipfftPlan3d(&plan[i].PLAN_FORWARD,ntx, nty, ntz,HIPFFT_C2C); hipfftPlan3d(&plan[i].PLAN_BACKWARD,ntx, nty, ntz,HIPFFT_C2C); //===========Host======================// //===========Host======================// hipHostMalloc((void **)&plan[i].pxx, size_model); hipHostMalloc((void **)&plan[i].pyy, size_model); hipHostMalloc((void **)&plan[i].pzz, size_model); hipHostMalloc((void **)&plan[i].pxy, size_model); hipHostMalloc((void **)&plan[i].pxz, size_model); hipHostMalloc((void **)&plan[i].pyz, size_model); hipHostMalloc((void **)&plan[i].vx, size_model); hipHostMalloc((void **)&plan[i].vy, size_model); hipHostMalloc((void **)&plan[i].vz, size_model); hipHostMalloc((void **)&plan[i].record, sizeof(float)*rnmax*nt); hipHostMalloc((void **)&plan[i].record2, sizeof(float)*rnmax*nt); hipHostMalloc((void **)&plan[i].record3, sizeof(float)*rnmax*nt); //===========device======================// //===========device======================// hipMalloc((void **)&plan[i].d_r_ix,sizeof(int)*rnmax); hipMalloc((void **)&plan[i].d_r_iy,sizeof(int)*rnmax); hipMalloc((void **)&plan[i].d_rik,sizeof(float)*nt); hipMalloc((void **)&plan[i].d_velp, size_model); hipMalloc((void **)&plan[i].d_gama_p, size_model); hipMalloc((void **)&plan[i].d_vels, size_model); hipMalloc((void **)&plan[i].d_gama_s, size_model); hipMalloc((void **)&plan[i].d_rho, size_model); hipMalloc((void **)&plan[i].d_pxx, size_model); hipMalloc((void **)&plan[i].d_pyy, size_model); hipMalloc((void **)&plan[i].d_pzz, size_model); hipMalloc((void **)&plan[i].d_pxy, size_model); hipMalloc((void **)&plan[i].d_pxz, size_model); hipMalloc((void **)&plan[i].d_pyz, size_model); hipMalloc((void **)&plan[i].d_vx, size_model); hipMalloc((void **)&plan[i].d_vy, size_model); hipMalloc((void **)&plan[i].d_vz, size_model); //////////////// pml ////////////// hipMalloc((void **)&plan[i].d_gammax,sizeof(float)*ntx); hipMalloc((void **)&plan[i].d_alphax,sizeof(float)*ntx); hipMalloc((void **)&plan[i].d_Omegax,sizeof(float)*ntx); hipMalloc((void **)&plan[i].d_a_x,sizeof(float)*ntx); hipMalloc((void **)&plan[i].d_b_x,sizeof(float)*ntx); hipMalloc((void **)&plan[i].d_gammay,sizeof(float)*nty); hipMalloc((void **)&plan[i].d_alphay,sizeof(float)*nty); hipMalloc((void **)&plan[i].d_Omegay,sizeof(float)*nty); hipMalloc((void **)&plan[i].d_a_y,sizeof(float)*nty); hipMalloc((void **)&plan[i].d_b_y,sizeof(float)*nty); hipMalloc((void **)&plan[i].d_gammaz,sizeof(float)*ntz); hipMalloc((void **)&plan[i].d_alphaz,sizeof(float)*ntz); hipMalloc((void **)&plan[i].d_Omegaz,sizeof(float)*ntz); hipMalloc((void **)&plan[i].d_a_z,sizeof(float)*ntz); hipMalloc((void **)&plan[i].d_b_z,sizeof(float)*ntz); hipMalloc((void**)&plan[i].d_phi_vx_xx,size_model); hipMalloc((void**)&plan[i].d_phi_vy_yx,size_model); hipMalloc((void**)&plan[i].d_phi_vz_zx,size_model); hipMalloc((void**)&plan[i].d_phi_vx_xy,size_model); hipMalloc((void**)&plan[i].d_phi_vy_yy,size_model); hipMalloc((void**)&plan[i].d_phi_vz_zy,size_model); hipMalloc((void**)&plan[i].d_phi_vx_xz,size_model); hipMalloc((void**)&plan[i].d_phi_vy_yz,size_model); hipMalloc((void**)&plan[i].d_phi_vz_zz,size_model); hipMalloc((void**)&plan[i].d_phi_vx_z,size_model); hipMalloc((void**)&plan[i].d_phi_vz_x,size_model); hipMalloc((void**)&plan[i].d_phi_vx_y,size_model); hipMalloc((void**)&plan[i].d_phi_vy_x,size_model); hipMalloc((void**)&plan[i].d_phi_vy_z,size_model); hipMalloc((void**)&plan[i].d_phi_vz_y,size_model); hipMalloc((void**)&plan[i].d_phi_pxx_x,size_model); hipMalloc((void**)&plan[i].d_phi_pxy_y,size_model); hipMalloc((void**)&plan[i].d_phi_pxz_z,size_model); hipMalloc((void**)&plan[i].d_phi_pxy_x,size_model); hipMalloc((void**)&plan[i].d_phi_pyy_y,size_model); hipMalloc((void**)&plan[i].d_phi_pyz_z,size_model); hipMalloc((void**)&plan[i].d_phi_pxz_x,size_model); hipMalloc((void**)&plan[i].d_phi_pyz_y,size_model); hipMalloc((void**)&plan[i].d_phi_pzz_z,size_model); /////////////////////////////////////////////////////////////////////// hipMalloc((void **)&plan[i].d_inx, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_iny, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_inz, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_in_pxx, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_in_pyy, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_in_pzz, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_in_pxy, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_in_pxz, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_in_pyz, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_outx, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_outy, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_outz, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_outpxx, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_outpyy, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_outpzz, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_outpxy, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_outpxz, sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_outpyz, sizeof(hipfftComplex)*ntp); hipMalloc((void**)&plan[i].d_kx,sizeof(float)*ntx); hipMalloc((void**)&plan[i].d_ky,sizeof(float)*nty); hipMalloc((void**)&plan[i].d_kz,sizeof(float)*ntz); hipMalloc((void**)&plan[i].d_k,size_model); hipMalloc((void **)&plan[i].d_kvx_x,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_kvy_y,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_kvz_z,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_kvx_z,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_kvz_x,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_kvx_y,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_kvy_x,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_kvy_z,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_kvz_y,sizeof(hipfftComplex)*ntp); ////////////////////////////////////////////////////////////// hipMalloc((void **)&plan[i].d_eta_p1,size_model); hipMalloc((void **)&plan[i].d_eta_p2,size_model); hipMalloc((void **)&plan[i].d_eta_p3,size_model); hipMalloc((void **)&plan[i].d_eta_s1,size_model); hipMalloc((void **)&plan[i].d_eta_s2,size_model); hipMalloc((void **)&plan[i].d_eta_s3,size_model); hipMalloc((void **)&plan[i].d_tao_p1,size_model); hipMalloc((void **)&plan[i].d_tao_p2,size_model); hipMalloc((void **)&plan[i].d_tao_s1,size_model); hipMalloc((void **)&plan[i].d_tao_s2,size_model); ////////////////////////////////////////////////////////////// //////////////////////////////////////// hipMalloc((void **)&plan[i].d_Ap1,size_model); hipMalloc((void **)&plan[i].d_Ap2,size_model); hipMalloc((void **)&plan[i].d_Ap3,size_model); /////////////////////////////////////// hipMalloc((void **)&plan[i].d_partx1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_party1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partz1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partx2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_party2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partz2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partx3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_party3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partz3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_x1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_x2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_x3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_x4,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_x5,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_y1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_y2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_y3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_y4,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_y5,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_z1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_z2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_z3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_z4,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_z5,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_y1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_y2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_y3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_y4,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_y5,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_x1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_x2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_x3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_x4,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_x5,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_x1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_x2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_x3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_x4,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_x5,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_z1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_z2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_z3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_z4,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvx_z5,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_z1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_z2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_z3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_z4,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvy_z5,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_y1,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_y2,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_y3,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_y4,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_partvz_y5,sizeof(hipfftComplex)*ntp); //////////// hipMalloc((void **)&plan[i].d_record, sizeof(float)*rnmax*nt); hipMalloc((void **)&plan[i].d_record2, sizeof(float)*rnmax*nt); hipMalloc((void **)&plan[i].d_record3, sizeof(float)*rnmax*nt); hipMalloc((void **)&plan[i].d_dvx,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_dvy,sizeof(hipfftComplex)*ntp); hipMalloc((void **)&plan[i].d_dvz,sizeof(hipfftComplex)*ntp); } } //========================================================= // Free the memory for variables in device // ======================================================= extern "C" void cuda_Device_free ( struct MultiGPU plan[], int GPU_N ) { int i; for(i=0;i<GPU_N;i++) { hipSetDevice(i); hipfftDestroy(plan[i].PLAN_FORWARD); hipfftDestroy(plan[i].PLAN_BACKWARD); hipHostFree(plan[i].pxx); hipHostFree(plan[i].pyy); hipHostFree(plan[i].pzz); hipHostFree(plan[i].pxy); hipHostFree(plan[i].pxz); hipHostFree(plan[i].pyz); hipHostFree(plan[i].vx); hipHostFree(plan[i].vy); hipHostFree(plan[i].vz); hipHostFree(plan[i].record); hipHostFree(plan[i].record2); hipHostFree(plan[i].record3); hipFree(plan[i].d_r_ix); hipFree(plan[i].d_r_iy); hipFree(plan[i].d_rik); hipFree(plan[i].d_velp); hipFree(plan[i].d_gama_p); hipFree(plan[i].d_vels); hipFree(plan[i].d_gama_s); hipFree(plan[i].d_rho); hipFree(plan[i].d_pxx); hipFree(plan[i].d_pyy); hipFree(plan[i].d_pzz); hipFree(plan[i].d_pxy); hipFree(plan[i].d_pxz); hipFree(plan[i].d_pyz); hipFree(plan[i].d_vx); hipFree(plan[i].d_vy); hipFree(plan[i].d_vz); //////////////////pml ///////////////// hipFree(plan[i].d_gammax); hipFree(plan[i].d_alphax); hipFree(plan[i].d_Omegax); hipFree(plan[i].d_a_x); hipFree(plan[i].d_b_x); hipFree(plan[i].d_gammay); hipFree(plan[i].d_alphay); hipFree(plan[i].d_Omegay); hipFree(plan[i].d_a_y); hipFree(plan[i].d_b_y); hipFree(plan[i].d_gammaz); hipFree(plan[i].d_alphaz); hipFree(plan[i].d_Omegaz); hipFree(plan[i].d_a_z); hipFree(plan[i].d_b_z); hipFree(plan[i].d_phi_vx_xx); hipFree(plan[i].d_phi_vy_yx); hipFree(plan[i].d_phi_vz_zx); hipFree(plan[i].d_phi_vx_xy); hipFree(plan[i].d_phi_vy_yy); hipFree(plan[i].d_phi_vz_zy); hipFree(plan[i].d_phi_vx_xz); hipFree(plan[i].d_phi_vy_yz); hipFree(plan[i].d_phi_vz_zz); hipFree(plan[i].d_phi_vx_z); hipFree(plan[i].d_phi_vz_x); hipFree(plan[i].d_phi_vx_y); hipFree(plan[i].d_phi_vy_x); hipFree(plan[i].d_phi_vy_z); hipFree(plan[i].d_phi_vz_y); hipFree(plan[i].d_phi_pxx_x); hipFree(plan[i].d_phi_pxy_y); hipFree(plan[i].d_phi_pxz_z); hipFree(plan[i].d_phi_pxy_x); hipFree(plan[i].d_phi_pyy_y); hipFree(plan[i].d_phi_pyz_z); hipFree(plan[i].d_phi_pxz_x); hipFree(plan[i].d_phi_pyz_y); hipFree(plan[i].d_phi_pzz_z); ////////////////////////////////////////////////////// hipFree(plan[i].d_inx); hipFree(plan[i].d_iny); hipFree(plan[i].d_inz); hipFree(plan[i].d_in_pxx); hipFree(plan[i].d_in_pyy); hipFree(plan[i].d_in_pzz); hipFree(plan[i].d_in_pxy); hipFree(plan[i].d_in_pxz); hipFree(plan[i].d_in_pyz); hipFree(plan[i].d_outx); hipFree(plan[i].d_outy); hipFree(plan[i].d_outz); hipFree(plan[i].d_outpxx); hipFree(plan[i].d_outpyy); hipFree(plan[i].d_outpzz); hipFree(plan[i].d_outpxy); hipFree(plan[i].d_outpxz); hipFree(plan[i].d_outpyz); hipFree(plan[i].d_kx); hipFree(plan[i].d_ky); hipFree(plan[i].d_kz); hipFree(plan[i].d_k); hipFree(plan[i].d_kvx_x); hipFree(plan[i].d_kvy_y); hipFree(plan[i].d_kvz_z); hipFree(plan[i].d_kvx_z); hipFree(plan[i].d_kvz_x); hipFree(plan[i].d_kvy_z); hipFree(plan[i].d_kvz_y); hipFree(plan[i].d_kvx_y); hipFree(plan[i].d_kvy_x); hipFree(plan[i].d_eta_p1); hipFree(plan[i].d_eta_p2); hipFree(plan[i].d_eta_p3); hipFree(plan[i].d_eta_s1); hipFree(plan[i].d_eta_s2); hipFree(plan[i].d_eta_s3); hipFree(plan[i].d_tao_p1); hipFree(plan[i].d_tao_p2); hipFree(plan[i].d_tao_s1); hipFree(plan[i].d_tao_s2); hipFree(plan[i].d_Ap1); hipFree(plan[i].d_Ap2); hipFree(plan[i].d_Ap3); hipFree(plan[i].d_partx1); hipFree(plan[i].d_party1); hipFree(plan[i].d_partz1); hipFree(plan[i].d_partx2); hipFree(plan[i].d_party2); hipFree(plan[i].d_partz2); hipFree(plan[i].d_partx3); hipFree(plan[i].d_party3); hipFree(plan[i].d_partz3); hipFree(plan[i].d_partvx_x1); hipFree(plan[i].d_partvx_x2); hipFree(plan[i].d_partvx_x3); hipFree(plan[i].d_partvx_x4); hipFree(plan[i].d_partvx_x5); hipFree(plan[i].d_partvy_y1); hipFree(plan[i].d_partvy_y2); hipFree(plan[i].d_partvy_y3); hipFree(plan[i].d_partvy_y4); hipFree(plan[i].d_partvy_y5); hipFree(plan[i].d_partvz_z1); hipFree(plan[i].d_partvz_z2); hipFree(plan[i].d_partvz_z3); hipFree(plan[i].d_partvz_z4); hipFree(plan[i].d_partvz_z5); hipFree(plan[i].d_partvx_y1); hipFree(plan[i].d_partvx_y2); hipFree(plan[i].d_partvx_y3); hipFree(plan[i].d_partvx_y4); hipFree(plan[i].d_partvx_y5); hipFree(plan[i].d_partvy_x1); hipFree(plan[i].d_partvy_x2); hipFree(plan[i].d_partvy_x3); hipFree(plan[i].d_partvy_x4); hipFree(plan[i].d_partvy_x5); hipFree(plan[i].d_partvx_z1); hipFree(plan[i].d_partvx_z2); hipFree(plan[i].d_partvx_z3); hipFree(plan[i].d_partvx_z4); hipFree(plan[i].d_partvx_z5); hipFree(plan[i].d_partvz_x1); hipFree(plan[i].d_partvz_x2); hipFree(plan[i].d_partvz_x3); hipFree(plan[i].d_partvz_x4); hipFree(plan[i].d_partvz_x5); hipFree(plan[i].d_partvy_z1); hipFree(plan[i].d_partvy_z2); hipFree(plan[i].d_partvy_z3); hipFree(plan[i].d_partvy_z4); hipFree(plan[i].d_partvy_z5); hipFree(plan[i].d_partvz_y1); hipFree(plan[i].d_partvz_y2); hipFree(plan[i].d_partvz_y3); hipFree(plan[i].d_partvz_y4); hipFree(plan[i].d_partvz_y5); hipFree(plan[i].d_record); hipFree(plan[i].d_record2); hipFree(plan[i].d_record3); hipFree(plan[i].d_dvx); hipFree(plan[i].d_dvy); hipFree(plan[i].d_dvz); } } extern "C" void getdevice(int *GPU_N) { hipGetDeviceCount(GPU_N); }
d9916908c267133471ce51fed932f8fb2f173253.cu
#include "stdio.h" #include "stdlib.h" #include "math.h" #include "time.h" #include "string.h" #include "cuda.h" #include "cufft.h" #include "Myfunctions.h" using namespace std; #define BLOCK_WIDTH 16 #define BLOCK_HEIGHT 16 //#define WITH_SHARED_MEMORY 0 #define pi 3.1415926 struct Multistream { cudaStream_t stream,stream_back; }; __global__ void cuda_kernel_wavenumber ( int ntx, int nty, int ntz, float dx, float dy, float dz, float *kx, float *kz, float *ky, float *k, cufftComplex *kvx_x, cufftComplex *kvx_z, cufftComplex *kvx_y, cufftComplex *kvz_x, cufftComplex *kvz_z, cufftComplex *kvz_y, cufftComplex *kvy_x, cufftComplex *kvy_z, cufftComplex *kvy_y ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; float dkx,dky,dkz; dkz=1.0/ntz/dz; dky=1.0/nty/dy; dkx=1.0/ntx/dx; float tmpx,tmpy,tmpz; tmpx=2*pi*dkx; tmpy=2*pi*dky; tmpz=2*pi*dkz; if(ix>=0 && ix<ntx && iy>=0 && iy< nty && iz>=0 && iz<ntz/2+1) kz[iz]=2*pi/ntz/dz*iz; if(ix>=0 && ix<ntx && iy>=0 && iy< nty && iz>=ntz/2+1 && iz<ntz) kz[iz]=2*pi/ntz/dz*(ntz-iz); if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx/2+1) kx[ix]=2*pi/ntx/dx*ix; if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=ntx/2+1 && ix<ntx) kx[ix]=2*pi/ntx/dx*(ntx-ix); if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty/2+1) ky[iy]=2*pi/nty/dy*iy; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=nty/2+1 && iy<nty) ky[iy]=2*pi/nty/dy*(nty-iy); if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx) { k[ip]=pow(kx[ix],2)+pow(kz[iz],2)+pow(ky[iy],2); } k[0]=1e-10; if(ix>=0 && ix<ntx && iy>=0 && iy< nty && iz>=0 && iz<ntz/2+1) { kvz_z[ip].x=-tmpz*iz*sin(iz*pi/ntz); kvz_z[ip].y=tmpz*iz*cos(iz*pi/ntz); kvx_z[ip].x=tmpz*iz*sin(iz*pi/ntz); kvx_z[ip].y=tmpz*iz*cos(iz*pi/ntz); kvy_z[ip].x=tmpz*iz*sin(iz*pi/ntz); kvy_z[ip].y=tmpz*iz*cos(iz*pi/ntz); } if(ix>=0 && ix<ntx && iy>=0 && iy< nty && iz>=ntz/2+1 && iz<ntz) { kvz_z[ip].x=-tmpz*(ntz-iz)*sin((ntz-iz)*pi/ntz); kvz_z[ip].y=-tmpz*(ntz-iz)*cos((ntz-iz)*pi/ntz); kvx_z[ip].x=tmpz*(ntz-iz)*sin((ntz-iz)*pi/ntz); kvx_z[ip].y=-tmpz*(ntz-iz)*cos((ntz-iz)*pi/ntz); kvy_z[ip].x=tmpz*(ntz-iz)*sin((ntz-iz)*pi/ntz); kvy_z[ip].y=-tmpz*(ntz-iz)*cos((ntz-iz)*pi/ntz); } if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx/2+1) { kvx_x[iptt].x=-tmpx*ix*sin(ix*pi/ntx); kvx_x[iptt].y=tmpx*ix*cos(ix*pi/ntx); kvz_x[iptt].x=tmpx*ix*sin(ix*pi/ntx); kvz_x[iptt].y=tmpx*ix*cos(ix*pi/ntx); kvy_x[iptt].x=tmpx*ix*sin(ix*pi/ntx); kvy_x[iptt].y=tmpx*ix*cos(ix*pi/ntx); } if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=ntx/2+1 && ix<ntx) { kvx_x[iptt].x=-tmpx*(ntx-ix)*sin((ntx-ix)*pi/ntx); kvx_x[iptt].y=-tmpx*(ntx-ix)*cos((ntx-ix)*pi/ntx); kvz_x[iptt].x=tmpx*(ntx-ix)*sin((ntx-ix)*pi/ntx); kvz_x[iptt].y=-tmpx*(ntx-ix)*cos((ntx-ix)*pi/ntx); kvy_x[iptt].x=tmpx*(ntx-ix)*sin((ntx-ix)*pi/ntx); kvy_x[iptt].y=-tmpx*(ntx-ix)*cos((ntx-ix)*pi/ntx); } if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty/2+1) { kvy_y[ipt].x=-tmpy*iy*sin(iy*pi/nty); kvy_y[ipt].y=tmpy*iy*cos(iy*pi/nty); kvz_y[ipt].x=tmpy*iy*sin(iy*pi/nty); kvz_y[ipt].y=tmpy*iy*cos(iy*pi/nty); kvx_y[ipt].x=tmpy*iy*sin(iy*pi/nty); kvx_y[ipt].y=tmpy*iy*cos(iy*pi/nty); } if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=nty/2+1 && iy<nty) { kvy_y[ipt].x=-tmpy*(nty-iy)*sin((nty-iy)*pi/nty); kvy_y[ipt].y=-tmpy*(nty-iy)*cos((nty-iy)*pi/nty); kvz_y[ipt].x=tmpy*(nty-iy)*sin((nty-iy)*pi/nty); kvz_y[ipt].y=-tmpy*(nty-iy)*cos((nty-iy)*pi/nty); kvx_y[ipt].x=tmpy*(nty-iy)*sin((nty-iy)*pi/nty); kvx_y[ipt].y=-tmpy*(nty-iy)*cos((nty-iy)*pi/nty); } __syncthreads(); } __global__ void cuda_kernel_viscoacoustic_parameters ( int ntx, int nty, int ntz, float dx, float dy, float dz, float dt, float w0, float *velp, float *vels, float *rho, float *k, float *gama_p, float *gama_s, float *Ap1, float *Ap2, float *Ap3, float *tao_p1, float *tao_p2, float *eta_p1, float *eta_p2, float *eta_p3, float *tao_s1, float *tao_s2, float *eta_s1, float *eta_s2, float *eta_s3 ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; float sinc2nd; float vel; if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx) { // sinc2nd =1.0; //pow(sin(velp_max*powf(k[ip],0.5)*dt/2)/(velp_max*powf(k[ip],0.5)*dt/2),2); sinc2nd =1.0; //pow(sin(vel*powf(k[ip],0.5)*dt/2)/(vel*powf(k[ip],0.5)*dt/2),2); ////////////////////////////// Ap1[ip]=sinc2nd*powf(k[ip],-0.5); Ap2[ip]=sinc2nd; Ap3[ip]=sinc2nd*powf(k[ip],0.5); ///////////////////////////// ////////////////////////////////// tao_p1[ip]=rho[ip]*pow(velp[ip]*cos(gama_p[ip]*pi/2.0),1)*gama_p[ip]*pi; tao_s1[ip]=rho[ip]*pow(vels[ip]*cos(gama_s[ip]*pi/2.0),1)*gama_s[ip]*pi; tao_p2[ip]=rho[ip]*pow(velp[ip]*cos(gama_p[ip]*pi/2.0),2)*pow(gama_p[ip],2)*pi/w0; tao_s2[ip]=rho[ip]*pow(vels[ip]*cos(gama_s[ip]*pi/2.0),2)*pow(gama_s[ip],2)*pi/w0; eta_p1[ip]=-rho[ip]*pow(velp[ip]*cos(gama_p[ip]*pi/2.0),1)*gama_p[ip]*w0; eta_s1[ip]=-rho[ip]*pow(vels[ip]*cos(gama_s[ip]*pi/2.0),1)*gama_s[ip]*w0; eta_p2[ip]=rho[ip]*pow(velp[ip]*cos(gama_p[ip]*pi/2.0),2); eta_s2[ip]=rho[ip]*pow(vels[ip]*cos(gama_s[ip]*pi/2.0),2); eta_p3[ip]=rho[ip]*pow(velp[ip]*cos(gama_p[ip]*pi/2.0),3)*gama_p[ip]/w0; eta_s3[ip]=rho[ip]*pow(vels[ip]*cos(gama_s[ip]*pi/2.0),3)*gama_s[ip]/w0; ///////////////////////////////// } __syncthreads(); } __global__ void cuda_kernel_pml_parameters ( int ntx, int nty, int ntz, int pml, float dx, float dy, float dz, float dt, float f0, float velp_max, float *gammax, float *alphax, float *Omegax, float *a_x, float *b_x, float *gammay, float *alphay, float *Omegay, float *a_y, float *b_y, float *gammaz, float *alphaz, float *Omegaz, float *a_z, float *b_z ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int n1=2; int n2=1; int n3=2; float R=1e-2; velp_max=5000; // float gamma_max = 1.0; float alpha_max = pi*f0; float Omegax_max = (1+n1+n2)*velp_max*log(1.0/R)/((n1+n2-1)*pml*dx); float Omegay_max = (1+n1+n2)*velp_max*log(1.0/R)/((n1+n2-1)*pml*dy); float Omegaz_max = (1+n1+n2)*velp_max*log(1.0/R)/((n1+n2-1)*pml*dz); if(ix>=0&&ix<=pml-1) { gammax[ix] = 1.0;// + (gamma_max-1)*powf(1.0*ix/(pml-1),n1); alphax[ix] = alpha_max*powf(1.0*ix/(pml-1),n3); Omegax[ix] = Omegax_max*powf(1.0*(pml-1-ix)/pml,n1+n2); gammax[ntx-1-ix] = gammax[ix]; alphax[ntx-1-ix] = alphax[ix]; Omegax[ntx-1-ix] = Omegax[ix]; } if(ix>=pml&&ix<=ntx-1-pml) { gammax[ix] = 1.0; alphax[ix] = alpha_max; Omegax[ix] = 0.0; } if(iy>=0&&iy<=pml-1) { gammay[iy] = 1.0;// + (gamma_max-1)*powf(1.0*ix/(pml-1),n1); alphay[iy] = alpha_max*powf(1.0*iy/(pml-1),n3); Omegay[iy] = Omegay_max*powf(1.0*(pml-1-iy)/pml,n1+n2); gammay[nty-1-iy] = gammay[iy]; alphay[nty-1-iy] = alphay[iy]; Omegay[nty-1-iy] = Omegay[iy]; } if(iy>=pml&&iy<=nty-1-pml) { gammay[iy] = 1.0; alphay[iy] = alpha_max; Omegay[iy] = 0.0; } if(iz>=0&&iz<=pml-1) { gammaz[iz] = 1.0;// + (gamma_max-1)*gamma_max*powf(1.0*iz/(pml-1),n1); alphaz[iz] = alpha_max*powf(1.0*iz/(pml-1),n3); Omegaz[iz] = Omegaz_max*powf(1.0*(pml-1-iz)/pml,n1+n2); gammaz[ntz-1-iz] = gammaz[iz]; alphaz[ntz-1-iz] = alphaz[iz]; Omegaz[ntz-1-iz] = Omegaz[iz]; } if(iz>=pml&&iz<=ntz-1-pml) { gammaz[iz] = 1.0; alphaz[iz] = alpha_max; Omegaz[iz] = 0.0; } if(ix>=0&&ix<=ntx-1) { a_x[ix] = alphax[ix] + Omegax[ix]/gammax[ix]; b_x[ix] = Omegax[ix]/powf(gammax[ix],2.0); } if(iy>=0&&iy<=nty-1) { a_y[iy] = alphay[iy] + Omegay[iy]/gammay[iy]; b_y[iy] = Omegay[iy]/powf(gammay[iy],2.0); } if(iz>=0&&iz<=ntz-1) { a_z[iz] = alphaz[iz] + Omegaz[iz]/gammaz[iz]; b_z[iz] = Omegaz[iz]/powf(gammaz[iz],2.0); } __syncthreads(); } __global__ void cuda_kernel_initialization ( int ntx, int nty, int ntz, float *vx, float *vy, float *vz, float *pxx, float *pyy, float *pzz, float *pxy, float *pyz, float *pxz, float *phi_vx_xx, float *phi_vz_zx, float *phi_vy_yx, float *phi_vx_xy, float *phi_vz_zy, float *phi_vy_yy, float *phi_vx_xz, float *phi_vz_zz, float *phi_vy_yz, float *phi_vx_z, float *phi_vz_x, float *phi_vx_y, float *phi_vy_x, float *phi_vy_z, float *phi_vz_y, float *phi_pxx_x, float *phi_pxy_y, float *phi_pxz_z, float *phi_pxy_x, float *phi_pyy_y, float *phi_pyz_z, float *phi_pxz_x, float *phi_pyz_y, float *phi_pzz_z, cufftComplex *dvx, cufftComplex *dvy, cufftComplex *dvz, cufftComplex *partx1, cufftComplex *partz1, cufftComplex *party1, cufftComplex *partx2, cufftComplex *partz2, cufftComplex *party2, cufftComplex *partx3, cufftComplex *partz3, cufftComplex *party3, cufftComplex *partvx_x1, cufftComplex *partvx_x2, cufftComplex *partvx_x3, cufftComplex *partvx_x4, cufftComplex *partvx_x5, cufftComplex *partvz_z1, cufftComplex *partvz_z2, cufftComplex *partvz_z3, cufftComplex *partvz_z4, cufftComplex *partvz_z5, cufftComplex *partvy_y1, cufftComplex *partvy_y2, cufftComplex *partvy_y3, cufftComplex *partvy_y4, cufftComplex *partvy_y5, cufftComplex *partvx_y1, cufftComplex *partvx_y2, cufftComplex *partvx_y3, cufftComplex *partvx_y4, cufftComplex *partvx_y5, cufftComplex *partvy_x1, cufftComplex *partvy_x2, cufftComplex *partvy_x3, cufftComplex *partvy_x4, cufftComplex *partvy_x5, cufftComplex *partvy_z1, cufftComplex *partvy_z2, cufftComplex *partvy_z3, cufftComplex *partvy_z4, cufftComplex *partvy_z5, cufftComplex *partvz_y1, cufftComplex *partvz_y2, cufftComplex *partvz_y3, cufftComplex *partvz_y4, cufftComplex *partvz_y5, cufftComplex *partvx_z1, cufftComplex *partvx_z2, cufftComplex *partvx_z3, cufftComplex *partvx_z4, cufftComplex *partvx_z5, cufftComplex *partvz_x1, cufftComplex *partvz_x2, cufftComplex *partvz_x3, cufftComplex *partvz_x4, cufftComplex *partvz_x5 ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx) { vx[ip]=0.0;vy[ip]=0.0;vz[ip]=0.0; pxx[ip]=0.0; pyy[ip]=0.0; pzz[ip]=0.0; pxy[ip]=0.0; pyz[ip]=0.0; pxz[ip]=0.0; phi_vx_xx[ip]=0.0; phi_vz_zx[ip]=0.0; phi_vy_yx[ip]=0.0; phi_vx_xy[ip]=0.0; phi_vz_zy[ip]=0.0; phi_vy_yy[ip]=0.0; phi_vx_xz[ip]=0.0; phi_vz_zz[ip]=0.0; phi_vy_yz[ip]=0.0; phi_vx_z[ip]=0.0; phi_vz_x[ip]=0.0; phi_vx_y[ip]=0.0; phi_vy_x[ip]=0.0; phi_vy_z[ip]=0.0; phi_vz_y[ip]=0.0; phi_pxx_x[ip]=0.0; phi_pxy_y[ip]=0.0; phi_pxz_z[ip]=0.0; phi_pxy_x[ip]=0.0; phi_pyy_y[ip]=0.0; phi_pyz_z[ip]=0.0; phi_pxz_x[ip]=0.0; phi_pyz_y[ip]=0.0; phi_pzz_z[ip]=0.0; partx1[ip].x=0.0; partx1[ip].y=0.0; party1[ip].x=0.0; party1[ip].y=0.0; partz1[ip].x=0.0; partz1[ip].y=0.0; partx2[ip].x=0.0; partx2[ip].y=0.0; party2[ip].x=0.0; party2[ip].y=0.0; partz2[ip].x=0.0; partz2[ip].y=0.0; partx3[ip].x=0.0; partx3[ip].y=0.0; party3[ip].x=0.0; party3[ip].y=0.0; partz3[ip].x=0.0; partz3[ip].y=0.0; partvx_x1[ip].x=0.0; partvx_x1[ip].y=0.0; partvz_z1[ip].x=0.0; partvz_z1[ip].y=0.0; partvy_y1[ip].x=0.0; partvy_y1[ip].y=0.0; partvx_x2[ip].x=0.0; partvx_x2[ip].y=0.0; partvz_z2[ip].x=0.0; partvz_z2[ip].y=0.0; partvy_y2[ip].x=0.0; partvy_y2[ip].y=0.0; partvx_x3[ip].x=0.0; partvx_x3[ip].y=0.0; partvz_z3[ip].x=0.0; partvz_z3[ip].y=0.0; partvy_y3[ip].x=0.0; partvy_y3[ip].y=0.0; partvx_x4[ip].x=0.0; partvx_x4[ip].y=0.0; partvz_z4[ip].x=0.0; partvz_z4[ip].y=0.0; partvy_y4[ip].x=0.0; partvy_y4[ip].y=0.0; partvx_x5[ip].x=0.0; partvx_x5[ip].y=0.0; partvz_z5[ip].x=0.0; partvz_z5[ip].y=0.0; partvy_y5[ip].x=0.0; partvy_y5[ip].y=0.0; partvx_y1[ip].x=0.0; partvx_y1[ip].y=0.0; partvy_x1[ip].x=0.0; partvy_x1[ip].y=0.0; partvx_y2[ip].x=0.0; partvx_y2[ip].y=0.0; partvy_x2[ip].x=0.0; partvy_x2[ip].y=0.0; partvx_y3[ip].x=0.0; partvx_y3[ip].y=0.0; partvy_x3[ip].x=0.0; partvy_x3[ip].y=0.0; partvx_y4[ip].x=0.0; partvx_y4[ip].y=0.0; partvy_x4[ip].x=0.0; partvy_x4[ip].y=0.0; partvx_y5[ip].x=0.0; partvx_y5[ip].y=0.0; partvy_x5[ip].x=0.0; partvy_x5[ip].y=0.0; partvy_z1[ip].x=0.0; partvy_z1[ip].y=0.0; partvz_y1[ip].x=0.0; partvz_y1[ip].y=0.0; partvy_z2[ip].x=0.0; partvy_z2[ip].y=0.0; partvz_y2[ip].x=0.0; partvz_y2[ip].y=0.0; partvy_z3[ip].x=0.0; partvy_z3[ip].y=0.0; partvz_y3[ip].x=0.0; partvz_y3[ip].y=0.0; partvy_z4[ip].x=0.0; partvy_z4[ip].y=0.0; partvz_y4[ip].x=0.0; partvz_y4[ip].y=0.0; partvy_z5[ip].x=0.0; partvy_z5[ip].y=0.0; partvz_y5[ip].x=0.0; partvz_y5[ip].y=0.0; partvz_x1[ip].x=0.0; partvz_x1[ip].y=0.0; partvx_z1[ip].x=0.0; partvx_z1[ip].y=0.0; partvz_x2[ip].x=0.0; partvz_x2[ip].y=0.0; partvx_z2[ip].x=0.0; partvx_z2[ip].y=0.0; partvz_x3[ip].x=0.0; partvz_x3[ip].y=0.0; partvx_z3[ip].x=0.0; partvx_z3[ip].y=0.0; partvz_x4[ip].x=0.0; partvz_x4[ip].y=0.0; partvx_z4[ip].x=0.0; partvx_z4[ip].y=0.0; partvz_x5[ip].x=0.0; partvz_x5[ip].y=0.0; partvx_z5[ip].x=0.0; partvx_z5[ip].y=0.0; dvx[ip].x=0.0; dvx[ip].y=0.0; dvy[ip].x=0.0; dvy[ip].y=0.0; dvz[ip].x=0.0; dvz[ip].y=0.0; } __syncthreads(); } __global__ void cuda_kernel_p_real_to_complex ( int ntx, int nty, int ntz, float *real_pxx, float *real_pyy, float *real_pzz, float *real_pxy, float *real_pxz, float *real_pyz, cufftComplex *in_pxx, cufftComplex *in_pyy, cufftComplex *in_pzz, cufftComplex *in_pxy, cufftComplex *in_pxz, cufftComplex *in_pyz ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx) { in_pxx[ip].x=real_pxx[ip]; in_pxx[ip].y=0.0; in_pyy[ip].x=real_pyy[ip]; in_pyy[ip].y=0.0; in_pzz[ip].x=real_pzz[ip]; in_pzz[ip].y=0.0; in_pxy[ip].x=real_pxy[ip]; in_pxy[ip].y=0.0; in_pyz[ip].x=real_pyz[ip]; in_pyz[ip].y=0.0; in_pxz[ip].x=real_pxz[ip]; in_pxz[ip].y=0.0; } __syncthreads(); } __global__ void cuda_kernel_vxvyvz_real_to_complex ( int ntx, int nty, int ntz, float *real_x, float *real_y, float *real_z, cufftComplex *inx, cufftComplex *iny, cufftComplex *inz ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; if(iz>=0 && iz<ntz && iy>=0 && iy< nty && ix>=0 && ix<ntx) { inx[ip].x=real_x[ip]; inx[ip].y=0.0; iny[ip].x=real_y[ip]; iny[ip].y=0.0; inz[ip].x=real_z[ip]; inz[ip].y=0.0; } __syncthreads(); } __global__ void cuda_kernel_operate_k_pxxpyypzz ( int ntx, int nty, int ntz, float dt, cufftComplex *outx, cufftComplex *outy, cufftComplex *outz, cufftComplex *dvx, cufftComplex *dvy, cufftComplex *dvz, cufftComplex *inx, cufftComplex *iny, cufftComplex *inz, cufftComplex *k_x, cufftComplex *k_y, cufftComplex *k_z, float *k2, int AorB ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; cufftComplex tmpx, tmpy, tmpz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty) { if(AorB==0) { inx[ip].x=k2[ip]*(k_x[iptt].x*outx[ip].x - k_x[iptt].y*outx[ip].y); inx[ip].y=k2[ip]*(k_x[iptt].x*outx[ip].y + k_x[iptt].y*outx[ip].x); iny[ip].x=k2[ip]*(k_y[ipt].x*outy[ip].x - k_y[ipt].y*outy[ip].y); iny[ip].y=k2[ip]*(k_y[ipt].x*outy[ip].y + k_y[ipt].y*outy[ip].x); inz[ip].x=k2[ip]*(k_z[ip].x*outz[ip].x - k_z[ip].y*outz[ip].y); inz[ip].y=k2[ip]*(k_z[ip].x*outz[ip].y + k_z[ip].y*outz[ip].x); } if(AorB==1) { tmpx.x=(outx[ip].x-dvx[ip].x)/dt; tmpx.y=(outx[ip].y-dvx[ip].y)/dt; tmpy.x=(outy[ip].x-dvy[ip].x)/dt; tmpy.y=(outy[ip].y-dvy[ip].y)/dt; tmpz.x=(outz[ip].x-dvz[ip].x)/dt; tmpz.y=(outz[ip].y-dvz[ip].y)/dt; inx[ip].x=k2[ip]*(k_x[iptt].x*tmpx.x - k_x[iptt].y*tmpx.y); inx[ip].y=k2[ip]*(k_x[iptt].x*tmpx.y + k_x[iptt].y*tmpx.x); iny[ip].x=k2[ip]*(k_y[ipt].x*tmpy.x - k_y[ipt].y*tmpy.y); iny[ip].y=k2[ip]*(k_y[ipt].x*tmpy.y + k_y[ipt].y*tmpy.x); inz[ip].x=k2[ip]*(k_z[ip].x*tmpz.x - k_z[ip].y*tmpz.y); inz[ip].y=k2[ip]*(k_z[ip].x*tmpz.y + k_z[ip].y*tmpz.x); } } __syncthreads(); } __global__ void cuda_kernel_operate_k_pxz ( int ntx, int nty, int ntz, float dt, cufftComplex *outx, cufftComplex *outz, cufftComplex *dvx, cufftComplex *dvz, cufftComplex *inx, cufftComplex *inz, cufftComplex *k_x, cufftComplex *k_z, float *k2, int AorB ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; cufftComplex tmpx, tmpy, tmpz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty) { if(AorB==0) { inx[ip].x=k2[ip]*(k_x[ip].x*outx[ip].x - k_x[ip].y*outx[ip].y); inx[ip].y=k2[ip]*(k_x[ip].x*outx[ip].y + k_x[ip].y*outx[ip].x); inz[ip].x=k2[ip]*(k_z[iptt].x*outz[ip].x - k_z[iptt].y*outz[ip].y); inz[ip].y=k2[ip]*(k_z[iptt].x*outz[ip].y + k_z[iptt].y*outz[ip].x); } if(AorB==1) { tmpx.x=(outx[ip].x-dvx[ip].x)/dt; tmpx.y=(outx[ip].y-dvx[ip].y)/dt; tmpz.x=(outz[ip].x-dvz[ip].x)/dt; tmpz.y=(outz[ip].y-dvz[ip].y)/dt; inx[ip].x=k2[ip]*(k_x[ip].x*tmpx.x - k_x[ip].y*tmpx.y); inx[ip].y=k2[ip]*(k_x[ip].x*tmpx.y + k_x[ip].y*tmpx.x); inz[ip].x=k2[ip]*(k_z[iptt].x*tmpz.x - k_z[iptt].y*tmpz.y); inz[ip].y=k2[ip]*(k_z[iptt].x*tmpz.y + k_z[iptt].y*tmpz.x); } } __syncthreads(); } __global__ void cuda_kernel_operate_k_pxy ( int ntx, int nty, int ntz, float dt, cufftComplex *outx, cufftComplex *outy, cufftComplex *dvx, cufftComplex *dvy, cufftComplex *inx, cufftComplex *iny, cufftComplex *k_x, cufftComplex *k_y, float *k2, int AorB ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; cufftComplex tmpx, tmpy, tmpz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty) { if(AorB==0) { inx[ip].x=k2[ip]*(k_x[ipt].x*outx[ip].x - k_x[ipt].y*outx[ip].y); inx[ip].y=k2[ip]*(k_x[ipt].x*outx[ip].y + k_x[ipt].y*outx[ip].x); iny[ip].x=k2[ip]*(k_y[iptt].x*outy[ip].x - k_y[iptt].y*outy[ip].y); iny[ip].y=k2[ip]*(k_y[iptt].x*outy[ip].y + k_y[iptt].y*outy[ip].x); } if(AorB==1) { tmpx.x=(outx[ip].x-dvx[ip].x)/dt; tmpx.y=(outx[ip].y-dvx[ip].y)/dt; tmpy.x=(outy[ip].x-dvy[ip].x)/dt; tmpy.y=(outy[ip].y-dvy[ip].y)/dt; inx[ip].x=k2[ip]*(k_x[ipt].x*tmpx.x - k_x[ipt].y*tmpx.y); inx[ip].y=k2[ip]*(k_x[ipt].x*tmpx.y + k_x[ipt].y*tmpx.x); iny[ip].x=k2[ip]*(k_y[iptt].x*tmpy.x - k_y[iptt].y*tmpy.y); iny[ip].y=k2[ip]*(k_y[iptt].x*tmpy.y + k_y[iptt].y*tmpy.x); } } __syncthreads(); } __global__ void cuda_kernel_operate_k_pyz ( int ntx, int nty, int ntz, float dt, cufftComplex *outy, cufftComplex *outz, cufftComplex *dvy, cufftComplex *dvz, cufftComplex *iny, cufftComplex *inz, cufftComplex *k_y, cufftComplex *k_z, float *k2, int AorB ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; cufftComplex tmpx, tmpy, tmpz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty) { if(AorB==0) { iny[ip].x=k2[ip]*(k_y[ip].x*outy[ip].x - k_y[ip].y*outy[ip].y); iny[ip].y=k2[ip]*(k_y[ip].x*outy[ip].y + k_y[ip].y*outy[ip].x); inz[ip].x=k2[ip]*(k_z[ipt].x*outz[ip].x - k_z[ipt].y*outz[ip].y); inz[ip].y=k2[ip]*(k_z[ipt].x*outz[ip].y + k_z[ipt].y*outz[ip].x); } if(AorB==1) { tmpy.x=(outy[ip].x-dvy[ip].x)/dt; tmpy.y=(outy[ip].y-dvy[ip].y)/dt; tmpz.x=(outz[ip].x-dvz[ip].x)/dt; tmpz.y=(outz[ip].y-dvz[ip].y)/dt; iny[ip].x=k2[ip]*(k_y[ip].x*tmpy.x - k_y[ip].y*tmpy.y); iny[ip].y=k2[ip]*(k_y[ip].x*tmpy.y + k_y[ip].y*tmpy.x); inz[ip].x=k2[ip]*(k_z[ipt].x*tmpz.x - k_z[ipt].y*tmpz.y); inz[ip].y=k2[ip]*(k_z[ipt].x*tmpz.y + k_z[ipt].y*tmpz.x); } } __syncthreads(); } __global__ void cuda_kernel_operate_k_v ( int ntx, int nty, int ntz, float dt, cufftComplex *outx, cufftComplex *outy, cufftComplex *outz, cufftComplex *inx, cufftComplex *iny, cufftComplex *inz, cufftComplex *k_x, cufftComplex *k_y, cufftComplex *k_z ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ipt=iz*nty*ntx+ix*nty+iy; int iptt=iy*ntz*ntx+iz*ntx+ix; cufftComplex tmpx, tmpy, tmpz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy< nty) { inx[ip].x=k_x[iptt].x*outx[ip].x - k_x[iptt].y*outx[ip].y; inx[ip].y=k_x[iptt].x*outx[ip].y + k_x[iptt].y*outx[ip].x; iny[ip].x=k_y[ipt].x*outy[ip].x - k_y[ipt].y*outy[ip].y; iny[ip].y=k_y[ipt].x*outy[ip].y + k_y[ipt].y*outy[ip].x; inz[ip].x=k_z[ip].x*outz[ip].x - k_z[ip].y*outz[ip].y; inz[ip].y=k_z[ip].x*outz[ip].y + k_z[ip].y*outz[ip].x; } __syncthreads(); } __global__ void cuda_kernel_forward_IO ( int ntx, int nty, int ntz, int ntp, int pml, int nt, int it, float dx, float dy, float dz, float dt, int s_ix, int s_iy, int s_iz, float *rik, float *record, float *record2, float *record3, int *r_ix, int *r_iy, int r_iz, int rnmax, int rnx_max, int rny_max, int dr, int r_n, float *pxx, float *pyy, float *pzz, float *vx, float *vy, float *vz ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; int ip11=(ix+15)*area+(iy+15)*ntz+iz; int ir; //============Add source==============// if(iz==s_iz+10 && ix==s_ix && iy==s_iy) { vx[ip]+=rik[it]; //pyy[ip]+=rik[it]; //pzz[ip]+=rik[it]; } //===============seismic record=================// if(ix>=0&&ix<rnx_max && iy>=0&&iy<rny_max&& iz==r_iz) { /* ir=ix*rny_max+iy; // record[it*rnmax+(r_iy[ir]-r_iy[0])/dr*rnx_max+(r_ix[ir]-r_ix[0])/dr]=p2[iz*area+r_iy[ir]*ntx+r_ix[ir]]; record[ir*nt+it]=vx[r_ix[ir]*area+r_iy[ir]*ntz+iz];*/ ir=ix*rny_max+iy; // record[it*rnmax+(r_iy[ir]-r_iy[0])/dr*rnx_max+(r_ix[ir]-r_ix[0])/dr]=p2[iz*area+r_iy[ir]*ntx+r_ix[ir]]; record[it*rny_max*rnx_max+ir]=vx[r_ix[ir]*area+r_iy[ir]*ntz+iz]; record2[it*rny_max*rnx_max+ir]=vy[r_ix[ir]*area+r_iy[ir]*ntz+iz]; record3[it*rny_max*rnx_max+ir]=vz[r_ix[ir]*area+r_iy[ir]*ntz+iz]; } __syncthreads(); } __global__ void cuda_kernel_calculate_p ( int ntx, int nty, int ntz, int ntp, float dt, float *pxx, float *pyy, float *pzz, float *pxy, float *pxz, float *pyz, float *tao_p1, float *tao_s1, float *tao_p2, float *tao_s2, float *eta_p1, float *eta_s1, float *eta_p2, float *eta_s2, float *eta_p3, float *eta_s3, float *gammax, float *a_x, float *b_x, float *gammay, float *a_y, float *b_y, float *gammaz, float *a_z, float *b_z, float *phi_vx_xx, float *phi_vz_zx, float *phi_vy_yx, float *phi_vx_xy, float *phi_vz_zy, float *phi_vy_yy, float *phi_vx_xz, float *phi_vz_zz, float *phi_vy_yz, float *phi_vx_z, float *phi_vz_x, float *phi_vx_y, float *phi_vy_x, float *phi_vy_z, float *phi_vz_y, cufftComplex *partvx_x1, cufftComplex *partvx_x2, cufftComplex *partvx_x3, cufftComplex *partvx_x4, cufftComplex *partvx_x5, cufftComplex *partvz_z1, cufftComplex *partvz_z2, cufftComplex *partvz_z3, cufftComplex *partvz_z4, cufftComplex *partvz_z5, cufftComplex *partvy_y1, cufftComplex *partvy_y2, cufftComplex *partvy_y3, cufftComplex *partvy_y4, cufftComplex *partvy_y5, cufftComplex *partvx_z1, cufftComplex *partvx_z2, cufftComplex *partvx_z3, cufftComplex *partvx_z4, cufftComplex *partvx_z5, cufftComplex *partvz_x1, cufftComplex *partvz_x2, cufftComplex *partvz_x3, cufftComplex *partvz_x4, cufftComplex *partvz_x5, cufftComplex *partvx_y1, cufftComplex *partvx_y2, cufftComplex *partvx_y3, cufftComplex *partvx_y4, cufftComplex *partvx_y5, cufftComplex *partvy_x1, cufftComplex *partvy_x2, cufftComplex *partvy_x3, cufftComplex *partvy_x4, cufftComplex *partvy_x5, cufftComplex *partvy_z1, cufftComplex *partvy_z2, cufftComplex *partvy_z3, cufftComplex *partvy_z4, cufftComplex *partvy_z5, cufftComplex *partvz_y1, cufftComplex *partvz_y2, cufftComplex *partvz_y3, cufftComplex *partvz_y4, cufftComplex *partvz_y5 ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int area=nty*ntz; int ip=ix*area+iy*ntz+iz; float alpha=1.0; float w, s, t11, t12, t13; float sign_of_tao; if(iz>=0 && iz<ntz-1 && ix>=0 && ix<ntx-1 && iy>=0 && iy<nty-1) { phi_vx_xx[ip] = phi_vx_xx[ip] + dt*(-a_x[ix]*phi_vx_xx[ip] - b_x[ix]*( eta_p1[ip]*partvx_x1[ip].x/ntp+eta_p2[ip]*partvx_x2[ip].x/ntp+eta_p3[ip]*partvx_x3[ip].x/ntp+tao_p1[ip]*partvx_x4[ip].x/ntp+tao_p2[ip]*partvx_x5[ip].x/ntp)); phi_vz_zx[ip] = phi_vz_zx[ip] + dt*(-a_z[iz]*phi_vz_zx[ip] - b_z[iz]*( (eta_p1[ip]-2*eta_s1[ip])*partvz_z1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvz_z2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvz_z3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvz_z4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvz_z5[ip].x/ntp)); phi_vy_yx[ip] = phi_vy_yx[ip] + dt*(-a_y[iy]*phi_vy_yx[ip] - b_y[iy]*( (eta_p1[ip]-2*eta_s1[ip])*partvy_y1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvy_y2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvy_y3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvy_y4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvy_y5[ip].x/ntp)); phi_vz_zz[ip] = phi_vz_zz[ip] + dt*(-a_z[iz]*phi_vz_zz[ip] - b_z[iz]*( eta_p1[ip]*partvz_z1[ip].x/ntp+eta_p2[ip]*partvz_z2[ip].x/ntp+eta_p3[ip]*partvz_z3[ip].x/ntp+tao_p1[ip]*partvz_z4[ip].x/ntp+tao_p2[ip]*partvz_z5[ip].x/ntp)); phi_vx_xz[ip] = phi_vx_xz[ip] + dt*(-a_x[ix]*phi_vx_xz[ip] - b_x[ix]*( (eta_p1[ip]-2*eta_s1[ip])*partvx_x1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvx_x2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvx_x3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvx_x4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvx_x5[ip].x/ntp)); phi_vy_yz[ip] = phi_vy_yz[ip] + dt*(-a_y[iy]*phi_vy_yz[ip] - b_y[iy]*( (eta_p1[ip]-2*eta_s1[ip])*partvy_y1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvy_y2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvy_y3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvy_y4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvy_y5[ip].x/ntp)); phi_vy_yy[ip] = phi_vy_yy[ip] + dt*(-a_y[iy]*phi_vy_yy[ip] - b_y[iy]*( eta_p1[ip]*partvy_y1[ip].x/ntp+eta_p2[ip]*partvy_y2[ip].x/ntp+eta_p3[ip]*partvy_y3[ip].x/ntp+tao_p1[ip]*partvy_y4[ip].x/ntp+tao_p2[ip]*partvy_y5[ip].x/ntp)); phi_vx_xy[ip] = phi_vx_xy[ip] + dt*(-a_x[ix]*phi_vx_xy[ip] - b_x[ix]*( (eta_p1[ip]-2*eta_s1[ip])*partvx_x1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvx_x2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvx_x3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvx_x4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvx_x5[ip].x/ntp)); phi_vz_zy[ip] = phi_vz_zy[ip] + dt*(-a_z[iz]*phi_vz_zy[ip] - b_z[iz]*( (eta_p1[ip]-2*eta_s1[ip])*partvz_z1[ip].x/ntp + (eta_p2[ip]-2*eta_s2[ip])*partvz_z2[ip].x/ntp + (eta_p3[ip]-2*eta_s3[ip])*partvz_z3[ip].x/ntp +(tao_p1[ip]-2*tao_s1[ip])*partvz_z4[ip].x/ntp + (tao_p2[ip]-2*tao_s2[ip])*partvz_z5[ip].x/ntp)); pxx[ip] = pxx[ip] + dt*( 1.0/gammax[ix]*(eta_p1[ip]*partvx_x1[ip].x/ntp+eta_p2[ip]*partvx_x2[ip].x/ntp+eta_p3[ip]*partvx_x3[ip].x/ntp+tao_p1[ip]*partvx_x4[ip].x/ntp+tao_p2[ip]*partvx_x5[ip].x/ntp)+ 1.0/gammay[iy]*((eta_p1[ip]-2*eta_s1[ip])*partvy_y1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvy_y2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvy_y3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvy_y4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvy_y5[ip].x/ntp)+ 1.0/gammaz[iz]*((eta_p1[ip]-2*eta_s1[ip])*partvz_z1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvz_z2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvz_z3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvz_z4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvz_z5[ip].x/ntp) +(phi_vx_xx[ip]+phi_vy_yx[ip]+phi_vz_zx[ip]) ); pyy[ip] = pyy[ip] + dt*( 1.0/gammax[ix]*((eta_p1[ip]-2*eta_s1[ip])*partvx_x1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvx_x2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvx_x3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvx_x4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvx_x5[ip].x/ntp)+ 1.0/gammay[iy]*(eta_p1[ip]*partvy_y1[ip].x/ntp+eta_p2[ip]*partvy_y2[ip].x/ntp+eta_p3[ip]*partvy_y3[ip].x/ntp+tao_p1[ip]*partvy_y4[ip].x/ntp+tao_p2[ip]*partvy_y5[ip].x/ntp)+ 1.0/gammaz[iz]*((eta_p1[ip]-2*eta_s1[ip])*partvz_z1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvz_z2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvz_z3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvz_z4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvz_z5[ip].x/ntp) +(phi_vx_xy[ip]+phi_vy_yy[ip]+phi_vz_zy[ip]) ); pzz[ip] = pzz[ip] + dt*( 1.0/gammax[ix]*((eta_p1[ip]-2*eta_s1[ip])*partvx_x1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvx_x2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvx_x3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvx_x4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvx_x5[ip].x/ntp)+ 1.0/gammay[iy]*((eta_p1[ip]-2*eta_s1[ip])*partvy_y1[ip].x/ntp+(eta_p2[ip]-2*eta_s2[ip])*partvy_y2[ip].x/ntp+(eta_p3[ip]-2*eta_s3[ip])*partvy_y3[ip].x/ntp+(tao_p1[ip]-2*tao_s1[ip])*partvy_y4[ip].x/ntp+(tao_p2[ip]-2*tao_s2[ip])*partvy_y5[ip].x/ntp)+ 1.0/gammaz[iz]*(eta_p1[ip]*partvz_z1[ip].x/ntp+eta_p2[ip]*partvz_z2[ip].x/ntp+eta_p3[ip]*partvz_z3[ip].x/ntp+tao_p1[ip]*partvz_z4[ip].x/ntp+tao_p2[ip]*partvz_z5[ip].x/ntp) +(phi_vx_xz[ip]+phi_vy_yz[ip]+phi_vz_zz[ip]) ); phi_vx_z[ip] = phi_vx_z[ip] + dt*(-0.5*(a_z[iz]+a_z[iz+1])*phi_vx_z[ip] - 0.5*(b_z[iz]+b_z[iz+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvx_z1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvx_z2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvx_z3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvx_z4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvx_z5[ip].x)/ntp ) ); phi_vz_x[ip] = phi_vz_x[ip] + dt*(-0.5*(a_x[ix]+a_x[ix+1])*phi_vz_x[ip] - 0.5*(b_x[ix]+b_x[ix+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvz_x1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvz_x2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvz_x3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvz_x4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvz_x5[ip].x)/ntp ) ); phi_vx_y[ip] = phi_vx_y[ip] + dt*(-0.5*(a_y[iy]+a_y[iy+1])*phi_vx_y[ip] - 0.5*(b_y[iy]+b_y[iy+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvx_y1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvx_y2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvx_y3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvx_y4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvx_y5[ip].x)/ntp ) ); phi_vy_x[ip] = phi_vy_x[ip] + dt*(-0.5*(a_x[ix]+a_x[ix+1])*phi_vy_x[ip] - 0.5*(b_x[ix]+b_x[ix+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvy_x1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvy_x2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvy_x3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvy_x4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvy_x5[ip].x)/ntp ) ); phi_vy_z[ip] = phi_vy_z[ip] + dt*(-0.5*(a_z[iz]+a_z[iz+1])*phi_vy_z[ip] - 0.5*(b_z[iz]+b_z[iz+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvy_z1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvy_z2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvy_z3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvy_z4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvy_z5[ip].x)/ntp ) ); phi_vz_y[ip] = phi_vz_y[ip] + dt*(-0.5*(a_y[iy]+a_y[iy+1])*phi_vz_y[ip] - 0.5*(b_y[iy]+b_y[iy+1])*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvz_y1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvz_y2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvz_y3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvz_y4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvz_y5[ip].x)/ntp ) ); pxz[ip] = pxz[ip] + dt*( 1.0/(0.5*(gammaz[iz]+gammaz[iz+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvx_z1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvx_z2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvx_z3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvx_z4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvx_z5[ip].x)/ntp ) + phi_vx_z[ip] + 1.0/(0.5*(gammax[ix]+gammax[ix+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvz_x1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvz_x2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvz_x3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvz_x4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvz_x5[ip].x)/ntp ) + phi_vz_x[ip] ); pyz[ip] = pyz[ip] + dt*( 1.0/(0.5*(gammaz[iz]+gammaz[iz+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvy_z1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvy_z2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvy_z3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvy_z4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvy_z5[ip].x)/ntp ) + phi_vy_z[ip] + 1.0/(0.5*(gammay[iy]+gammay[iy+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvz_y1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvz_y2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvz_y3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvz_y4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvz_y5[ip].x)/ntp ) + phi_vz_y[ip] ); pxy[ip] = pxy[ip] + dt*( 1.0/(0.5*(gammay[iy]+gammay[iy+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvx_y1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvx_y2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvx_y3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvx_y4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvx_y5[ip].x)/ntp ) + phi_vx_y[ip] + 1.0/(0.5*(gammax[ix]+gammax[ix+1]))*( 0.125*(eta_s1[ip]+eta_s1[ip+1]+eta_s1[ip+ntz]+eta_s1[ip+1+ntz]+eta_s1[ip+nty*ntz]+eta_s1[ip+nty*ntz+1]+eta_s1[ip+nty*ntz+ntz]+eta_s1[ip+nty*ntz+1+ntz])*(partvy_x1[ip].x)/ntp+ 0.125*(eta_s2[ip]+eta_s2[ip+1]+eta_s2[ip+ntz]+eta_s2[ip+1+ntz]+eta_s2[ip+nty*ntz]+eta_s2[ip+nty*ntz+1]+eta_s2[ip+nty*ntz+ntz]+eta_s2[ip+nty*ntz+1+ntz])*(partvy_x2[ip].x)/ntp+ 0.125*(eta_s3[ip]+eta_s3[ip+1]+eta_s3[ip+ntz]+eta_s3[ip+1+ntz]+eta_s3[ip+nty*ntz]+eta_s3[ip+nty*ntz+1]+eta_s3[ip+nty*ntz+ntz]+eta_s3[ip+nty*ntz+1+ntz])*(partvy_x3[ip].x)/ntp+ 0.125*(tao_s1[ip]+tao_s1[ip+1]+tao_s1[ip+ntz]+tao_s1[ip+1+ntz]+tao_s1[ip+nty*ntz]+tao_s1[ip+nty*ntz+1]+tao_s1[ip+nty*ntz+ntz]+tao_s1[ip+nty*ntz+1+ntz])*(partvy_x4[ip].x)/ntp+ 0.125*(tao_s2[ip]+tao_s2[ip+1]+tao_s2[ip+ntz]+tao_s2[ip+1+ntz]+tao_s2[ip+nty*ntz]+tao_s2[ip+nty*ntz+1]+tao_s2[ip+nty*ntz+ntz]+tao_s2[ip+nty*ntz+1+ntz])*(partvy_x5[ip].x)/ntp ) + phi_vy_x[ip] ); } __syncthreads(); } __global__ void cuda_kernel_calculate_v ( int ntx, int nty, int ntz, int ntp, float dt, float *rho, float *vx, float *vz, float *vy, float *gammax, float *a_x, float *b_x, float *gammay, float *a_y, float *b_y, float *gammaz, float *a_z, float *b_z, float *phi_pxx_x, float *phi_pxy_y, float *phi_pxz_z, float *phi_pxy_x, float *phi_pyy_y, float *phi_pyz_z, float *phi_pxz_x, float *phi_pyz_y, float *phi_pzz_z, cufftComplex *partx1, cufftComplex *partz1, cufftComplex *party1, cufftComplex *partx2, cufftComplex *partz2, cufftComplex *party2, cufftComplex *partx3, cufftComplex *partz3, cufftComplex *party3 ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int ip=ix*nty*ntz+iy*ntz+iz; if(iz>=0 && iz<ntz-1 && ix>=0 && ix<ntx-1&& iy>=0 && iy<nty-1) { phi_pxx_x[ip] = phi_pxx_x[ip] + dt*(-0.5*(a_x[ix]+a_x[ix+1])*phi_pxx_x[ip]-0.5*(b_x[ix]+b_x[ix+1])*partx1[ip].x/ntp); phi_pxy_y[ip] = phi_pxy_y[ip] + dt*(-a_y[iy]*phi_pxy_y[ip]-b_y[iy]*party1[ip].x/ntp); phi_pxz_z[ip] = phi_pxz_z[ip] + dt*(-a_z[iz]*phi_pxz_z[ip]-b_z[iz]*partz1[ip].x/ntp); phi_pxy_x[ip] = phi_pxy_x[ip] + dt*(-a_x[ix]*phi_pxy_x[ip]-b_x[ix]*partx3[ip].x/ntp); phi_pyy_y[ip] = phi_pyy_y[ip] + dt*(-0.5*(a_y[iy]+a_y[iy+1])*phi_pyy_y[ip]-0.5*(b_y[iy]+b_y[iy+1])*party3[ip].x/ntp); phi_pyz_z[ip] = phi_pyz_z[ip] + dt*(-a_z[iz]*phi_pyz_z[ip]-b_z[iz]*partz3[ip].x/ntp); phi_pxz_x[ip] = phi_pxz_x[ip] + dt*(-a_x[ix]*phi_pxz_x[ip]-b_x[ix]*partx2[ip].x/ntp); phi_pyz_y[ip] = phi_pyz_y[ip] + dt*(-a_y[iy]*phi_pyz_y[ip]-b_y[iy]*party2[ip].x/ntp); phi_pzz_z[ip] = phi_pzz_z[ip] + dt*(-0.5*(a_z[iz]+a_z[iz+1])*phi_pzz_z[ip]-0.5*(b_z[iz]+b_z[iz+1])*partz2[ip].x/ntp); vx[ip] = vx[ip] + dt/(0.5*(rho[ip]+rho[ip+nty*ntz]))* ( 1.0/(0.5*(gammax[ix]+gammax[ix+1]))*partx1[ip].x/ntp + phi_pxx_x[ip] + 1.0/gammay[iy]*party1[ip].x/ntp + phi_pxy_y[ip] + 1.0/gammaz[iz]*partz1[ip].x/ntp + phi_pxz_z[ip] ); vy[ip] = vy[ip] + dt/(0.5*(rho[ip]+rho[ip+ntz]))* ( 1.0/gammax[ix]*partx3[ip].x/ntp + phi_pxy_x[ip] + 1.0/(0.5*(gammay[iy]+gammay[iy+1]))*party3[ip].x/ntp + phi_pyy_y[ip] + 1.0/gammaz[iz]*partz3[ip].x/ntp + phi_pyz_z[ip] ); vz[ip] = vz[ip] + dt/(0.5*(rho[ip]+rho[ip+1]))* ( 1.0/gammax[ix]*partx2[ip].x/ntp + phi_pxz_x[ip] + 1.0/gammay[iy]*party2[ip].x/ntp + phi_pyz_y[ip] + 1.0/(0.5*(gammaz[iz]+gammaz[iz+1]))*partz2[ip].x/ntp + phi_pzz_z[ip] ); } __syncthreads(); } __global__ void cuda_kernel_get_dv_renewed ( int ntx, int nty, int ntz, cufftComplex *outx, cufftComplex *outy, cufftComplex *outz, cufftComplex *dvx, cufftComplex *dvy, cufftComplex *dvz ) { int bx=blockIdx.x; int by=blockIdx.y; int tx=threadIdx.x; int ty=threadIdx.y; int iy=by*BLOCK_HEIGHT+ty; int colt=bx*BLOCK_WIDTH+tx; int iz=colt/ntx; int ix=colt-iz*ntx; int ip=ix*nty*ntz+iy*ntz+iz; if(iz>=0 && iz<ntz && ix>=0 && ix<ntx && iy>=0 && iy<nty) { dvx[ip].x=outx[ip].x; dvx[ip].y=outx[ip].y; dvz[ip].x=outz[ip].x; dvz[ip].y=outz[ip].y; dvy[ip].x=outy[ip].x; dvy[ip].y=outy[ip].y; } __syncthreads(); } extern "C" void cuda_forward_acoustic_3D ( int myid, int is, int nt, int ntx, int nty, int ntz, int ntp, int nx, int ny, int nz, int pml, float dx, float dy, float dz, float dt, float f0, float w0, float velp_max, float *rik, float *velp, float *gama_p,float *vels, float *gama_s, float *rho, struct Source ss[], struct MultiGPU plan[], int GPU_N, int rnmax, int rnx_max, int rny_max, int dr ) { int i, it, ix, iy, iz; size_t size_model=sizeof(float)*ntp; char filename[150]; FILE *fp; // define multistream variable Multistream plans[GPU_N]; float *tmp; tmp=(float*)malloc(sizeof(float)*ntp); // block size 16*16; // grid size ntx/16*ntz/16 dim3 dimBlock(BLOCK_WIDTH,BLOCK_HEIGHT); dim3 dimGrid((ntx*ntz+dimBlock.x-1)/dimBlock.x,(nty+dimBlock.y-1)/dimBlock.y); for(i=0;i<GPU_N;i++) { cudaSetDevice(i); // define streaming cufft handle (very important!!!) cudaStreamCreate(&plans[i].stream); cufftSetStream(plan[i].PLAN_FORWARD, plans[i].stream); cufftSetStream(plan[i].PLAN_BACKWARD, plans[i].stream); } for(i=0;i<GPU_N;i++) { cudaSetDevice(i); // copy the vectors from the host to the device cudaMemcpyAsync(plan[i].d_r_ix,ss[is+i].r_ix,sizeof(float)*rnmax,cudaMemcpyHostToDevice,plans[i].stream); cudaMemcpyAsync(plan[i].d_r_iy,ss[is+i].r_iy,sizeof(float)*rnmax,cudaMemcpyHostToDevice,plans[i].stream); cudaMemcpyAsync(plan[i].d_velp,velp,size_model,cudaMemcpyHostToDevice,plans[i].stream); cudaMemcpyAsync(plan[i].d_gama_p,gama_p,size_model,cudaMemcpyHostToDevice,plans[i].stream); cudaMemcpyAsync(plan[i].d_vels,vels,size_model,cudaMemcpyHostToDevice,plans[i].stream); cudaMemcpyAsync(plan[i].d_gama_s,gama_s,size_model,cudaMemcpyHostToDevice,plans[i].stream); cudaMemcpyAsync(plan[i].d_rho,rho,size_model,cudaMemcpyHostToDevice,plans[i].stream); cudaMemcpyAsync(plan[i].d_rik,rik,sizeof(float)*nt,cudaMemcpyHostToDevice,plans[i].stream); } for(i=0;i<GPU_N;i++) { cudaSetDevice(i); //===============define wavenumber variables============// cuda_kernel_wavenumber<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dx, dy, dz, plan[i].d_kx, plan[i].d_ky, plan[i].d_kz, plan[i].d_k, plan[i].d_kvx_x, plan[i].d_kvx_z, plan[i].d_kvx_y, plan[i].d_kvz_x, plan[i].d_kvz_z, plan[i].d_kvz_y, plan[i].d_kvy_x, plan[i].d_kvy_z, plan[i].d_kvy_y ); //===============define viscoacoustic variables============// cuda_kernel_viscoacoustic_parameters<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dx, dy, dz, dt, w0, plan[i].d_velp, plan[i].d_vels, plan[i].d_rho, plan[i].d_k, plan[i].d_gama_p, plan[i].d_gama_s, plan[i].d_Ap1, plan[i].d_Ap2, plan[i].d_Ap3, plan[i].d_tao_p1, plan[i].d_tao_p2, plan[i].d_eta_p1, plan[i].d_eta_p2, plan[i].d_eta_p3, plan[i].d_tao_s1, plan[i].d_tao_s2, plan[i].d_eta_s1, plan[i].d_eta_s2, plan[i].d_eta_s3 ); //===============PML parameters============// cuda_kernel_pml_parameters<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, pml, dx, dy, dz, dt, f0, velp_max, plan[i].d_gammax, plan[i].d_alphax, plan[i].d_Omegax, plan[i].d_a_x, plan[i].d_b_x, plan[i].d_gammay, plan[i].d_alphay, plan[i].d_Omegay, plan[i].d_a_y, plan[i].d_b_y, plan[i].d_gammaz, plan[i].d_alphaz, plan[i].d_Omegaz, plan[i].d_a_z, plan[i].d_b_z ); //===============initialization============// cuda_kernel_initialization<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, plan[i].d_vx, plan[i].d_vy, plan[i].d_vz, plan[i].d_pxx, plan[i].d_pyy, plan[i].d_pzz, plan[i].d_pxy, plan[i].d_pyz, plan[i].d_pxz, plan[i].d_phi_vx_xx, plan[i].d_phi_vz_zx, plan[i].d_phi_vy_yx, plan[i].d_phi_vx_xy, plan[i].d_phi_vz_zy, plan[i].d_phi_vy_yy, plan[i].d_phi_vx_xz, plan[i].d_phi_vz_zz, plan[i].d_phi_vy_yz, plan[i].d_phi_vx_z, plan[i].d_phi_vz_x,plan[i].d_phi_vx_y, plan[i].d_phi_vy_x,plan[i].d_phi_vy_z, plan[i].d_phi_vz_y, plan[i].d_phi_pxx_x, plan[i].d_phi_pxy_y, plan[i].d_phi_pxz_z, plan[i].d_phi_pxy_x, plan[i].d_phi_pyy_y, plan[i].d_phi_pyz_z, plan[i].d_phi_pxz_x, plan[i].d_phi_pyz_y, plan[i].d_phi_pzz_z, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_partx1, plan[i].d_partz1, plan[i].d_party1, plan[i].d_partx2, plan[i].d_partz2, plan[i].d_party2, plan[i].d_partx3, plan[i].d_partz3, plan[i].d_party3, plan[i].d_partvx_x1, plan[i].d_partvx_x2, plan[i].d_partvx_x3, plan[i].d_partvx_x4, plan[i].d_partvx_x5, plan[i].d_partvz_z1, plan[i].d_partvz_z2, plan[i].d_partvz_z3, plan[i].d_partvz_z4, plan[i].d_partvz_z5, plan[i].d_partvy_y1, plan[i].d_partvy_y2, plan[i].d_partvy_y3, plan[i].d_partvy_y4, plan[i].d_partvy_y5, plan[i].d_partvx_y1, plan[i].d_partvx_y2, plan[i].d_partvx_y3, plan[i].d_partvx_y4, plan[i].d_partvx_y5, plan[i].d_partvy_x1, plan[i].d_partvy_x2, plan[i].d_partvy_x3, plan[i].d_partvy_x4, plan[i].d_partvy_x5, plan[i].d_partvy_z1, plan[i].d_partvy_z2, plan[i].d_partvy_z3, plan[i].d_partvy_z4, plan[i].d_partvy_z5, plan[i].d_partvz_y1, plan[i].d_partvz_y2, plan[i].d_partvz_y3, plan[i].d_partvz_y4, plan[i].d_partvz_y5, plan[i].d_partvx_z1, plan[i].d_partvx_z2, plan[i].d_partvx_z3, plan[i].d_partvx_z4, plan[i].d_partvx_z5, plan[i].d_partvz_x1, plan[i].d_partvz_x2, plan[i].d_partvz_x3, plan[i].d_partvz_x4, plan[i].d_partvz_x5 ); } //===================time begin===========================// //===================time begin===========================// for(it=0;it<nt;it++) { for(i=0;i<GPU_N;i++) { cudaSetDevice(i); //===============calculate k-space spatial derivative============// //===============calculate k-space spatial derivative============// cuda_kernel_p_real_to_complex<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, plan[i].d_pxx, plan[i].d_pyy, plan[i].d_pzz, plan[i].d_pxy, plan[i].d_pxz, plan[i].d_pyz, plan[i].d_in_pxx, plan[i].d_in_pyy, plan[i].d_in_pzz, plan[i].d_in_pxy, plan[i].d_in_pxz, plan[i].d_in_pyz ); cufftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pxx,plan[i].d_outpxx,CUFFT_FORWARD); cufftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pyy,plan[i].d_outpyy,CUFFT_FORWARD); cufftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pzz,plan[i].d_outpzz,CUFFT_FORWARD); cufftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pxy,plan[i].d_outpxy,CUFFT_FORWARD); cufftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pxz,plan[i].d_outpxz,CUFFT_FORWARD); cufftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_in_pyz,plan[i].d_outpyz,CUFFT_FORWARD); cuda_kernel_operate_k_v<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outpxx, plan[i].d_outpxy, plan[i].d_outpxz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvz_x, plan[i].d_kvy_y, plan[i].d_kvz_z ); cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partx1, CUFFT_INVERSE); //dpxxdx cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_party1, CUFFT_INVERSE); //dpxydy cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partz1, CUFFT_INVERSE); //dpxzdz cuda_kernel_operate_k_v<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outpxz, plan[i].d_outpyz, plan[i].d_outpzz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvx_z ); cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partx2, CUFFT_INVERSE); //dpxzdx cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_party2, CUFFT_INVERSE); //dpyzdy cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partz2, CUFFT_INVERSE); //dpzzdz cuda_kernel_operate_k_v<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outpxy, plan[i].d_outpyy, plan[i].d_outpyz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvz_y, plan[i].d_kvz_z ); cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partx3, CUFFT_INVERSE); //dpxydx cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_party3, CUFFT_INVERSE); //dpyydy cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partz3, CUFFT_INVERSE); //dpyzdz //===================calculate vx vy and vz==================// //===================calculate vx vy and vz==================// cuda_kernel_calculate_v<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, ntp, dt, plan[i].d_rho, plan[i].d_vx, plan[i].d_vz, plan[i].d_vy, plan[i].d_gammax, plan[i].d_a_x, plan[i].d_b_x, plan[i].d_gammay, plan[i].d_a_y, plan[i].d_b_y, plan[i].d_gammaz, plan[i].d_a_z, plan[i].d_b_z, plan[i].d_phi_pxx_x, plan[i].d_phi_pxy_y, plan[i].d_phi_pxz_z, plan[i].d_phi_pxy_x, plan[i].d_phi_pyy_y, plan[i].d_phi_pyz_z, plan[i].d_phi_pxz_x, plan[i].d_phi_pyz_y, plan[i].d_phi_pzz_z, plan[i].d_partx1, plan[i].d_partz1, plan[i].d_party1, plan[i].d_partx2, plan[i].d_partz2, plan[i].d_party2, plan[i].d_partx3, plan[i].d_partz3, plan[i].d_party3 ); //===============calculate k-space spatial derivatives============// //===============calculate k-space spatial derivatives============// cuda_kernel_vxvyvz_real_to_complex<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, plan[i].d_vx, plan[i].d_vy, plan[i].d_vz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz ); cufftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_inx,plan[i].d_outx,CUFFT_FORWARD); cufftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_iny,plan[i].d_outy,CUFFT_FORWARD); cufftExecC2C(plan[i].PLAN_FORWARD,plan[i].d_inz,plan[i].d_outz,CUFFT_FORWARD); //////////////////////////////////// sigma xx yy zz dispersion_3 parts ////////////////////////// cuda_kernel_operate_k_pxxpyypzz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvz_z, plan[i].d_Ap1, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_x1, CUFFT_INVERSE); //dvxdx, k^-0.5 cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_y1, CUFFT_INVERSE); //dvydy cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_z1, CUFFT_INVERSE); //dvzdz cuda_kernel_operate_k_pxxpyypzz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvz_z, plan[i].d_Ap2, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_x2, CUFFT_INVERSE); //dvxdx, 1* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_y2, CUFFT_INVERSE); //dvydy cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_z2, CUFFT_INVERSE); //dvzdz cuda_kernel_operate_k_pxxpyypzz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvz_z, plan[i].d_Ap3, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_x3, CUFFT_INVERSE); //dvxdx, k^0.5* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_y3, CUFFT_INVERSE); //dvydy cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_z3, CUFFT_INVERSE); //dvzdz //////////////////////////////////// sigma xz zx dispersion_3 parts ////////////////////////// cuda_kernel_operate_k_pxz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvz, plan[i].d_inx, plan[i].d_inz, plan[i].d_kvx_z, plan[i].d_kvz_x, plan[i].d_Ap1, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_z1, CUFFT_INVERSE); //dvxdz, k^-0.5* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_x1, CUFFT_INVERSE); //dvzdx cuda_kernel_operate_k_pxz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvz, plan[i].d_inx, plan[i].d_inz, plan[i].d_kvx_z, plan[i].d_kvz_x, plan[i].d_Ap2, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_z2, CUFFT_INVERSE); //dvxdz, 1* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_x2, CUFFT_INVERSE); //dvzdx cuda_kernel_operate_k_pxz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvz, plan[i].d_inx, plan[i].d_inz, plan[i].d_kvx_z, plan[i].d_kvz_x, plan[i].d_Ap3, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_z3, CUFFT_INVERSE); //dvxdz, k^0.5* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_x3, CUFFT_INVERSE); //dvzdx //////////////////////////////////// sigma xy yx dispersion_3 parts ////////////////////////// cuda_kernel_operate_k_pxy<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_inx, plan[i].d_iny, plan[i].d_kvx_y, plan[i].d_kvy_x, plan[i].d_Ap1, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_y1, CUFFT_INVERSE); //dvxdy, k^-0.5* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_x1, CUFFT_INVERSE); //dvydx cuda_kernel_operate_k_pxy<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_inx, plan[i].d_iny, plan[i].d_kvx_y, plan[i].d_kvy_x, plan[i].d_Ap2, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_y2, CUFFT_INVERSE); //dvxdy, 1* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_x2, CUFFT_INVERSE); //dvydx cuda_kernel_operate_k_pxy<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_inx, plan[i].d_iny, plan[i].d_kvx_y, plan[i].d_kvy_x, plan[i].d_Ap3, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_y3, CUFFT_INVERSE); //dvxdy, k^0.5 cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_x3, CUFFT_INVERSE); //dvydx //////////////////////////////////// sigma yz zy dispersion_3 parts ////////////////////////// cuda_kernel_operate_k_pyz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvy_z, plan[i].d_kvz_y, plan[i].d_Ap1, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_z1, CUFFT_INVERSE); //dvydz, k^-0.5 cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_y1, CUFFT_INVERSE); //dvzdy cuda_kernel_operate_k_pyz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvy_z, plan[i].d_kvz_y, plan[i].d_Ap2, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_z2, CUFFT_INVERSE); //dvydz, 1* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_y2, CUFFT_INVERSE); //dvzdy cuda_kernel_operate_k_pyz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvy_z, plan[i].d_kvz_y, plan[i].d_Ap3, 0 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_z3, CUFFT_INVERSE); //dvydz, k^0.5 cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_y3, CUFFT_INVERSE); //dvzdy //////////////////////////////////// sigma xx yy zz amplitude-loss_2 parts ////////////////////////// cuda_kernel_operate_k_pxxpyypzz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvz_z, plan[i].d_Ap1, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_x4, CUFFT_INVERSE); //dvxdx, k^-0.5 cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_y4, CUFFT_INVERSE); //dvydy cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_z4, CUFFT_INVERSE); //dvzdz cuda_kernel_operate_k_pxxpyypzz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_inx, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvx_x, plan[i].d_kvy_y, plan[i].d_kvz_z, plan[i].d_Ap2, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_x5, CUFFT_INVERSE); //dvxdx, 1* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_y5, CUFFT_INVERSE); //dvydy cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_z5, CUFFT_INVERSE); //dvzdz //////////////////////////////////// sigma xz zx amplitude-loss_2 parts ////////////////////////// cuda_kernel_operate_k_pxz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvz, plan[i].d_inx, plan[i].d_inz, plan[i].d_kvx_z, plan[i].d_kvz_x, plan[i].d_Ap1, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_z4, CUFFT_INVERSE); //dvxdz, k^-0.5* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_x4, CUFFT_INVERSE); //dvzdx cuda_kernel_operate_k_pxz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvz, plan[i].d_inx, plan[i].d_inz, plan[i].d_kvx_z, plan[i].d_kvz_x, plan[i].d_Ap2, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_z5, CUFFT_INVERSE); //dvxdz, 1* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_x5, CUFFT_INVERSE); //dvzdx //////////////////////////////////// sigma xy yx amplitude-loss_2 parts ////////////////////////// cuda_kernel_operate_k_pxy<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_inx, plan[i].d_iny, plan[i].d_kvx_y, plan[i].d_kvy_x, plan[i].d_Ap1, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_y4, CUFFT_INVERSE); //dvxdy, k^-0.5* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_x4, CUFFT_INVERSE); //dvydx cuda_kernel_operate_k_pxy<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outx, plan[i].d_outy, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_inx, plan[i].d_iny, plan[i].d_kvx_y, plan[i].d_kvy_x, plan[i].d_Ap2, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inx,plan[i].d_partvx_y5, CUFFT_INVERSE); //dvxdy, 1* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_x5, CUFFT_INVERSE); //dvydx //////////////////////////////////// sigma yz zy amplitude-loss_2 parts ////////////////////////// cuda_kernel_operate_k_pyz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvy_z, plan[i].d_kvz_y, plan[i].d_Ap1, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_z4, CUFFT_INVERSE); //dvydz, k^-0.5 cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_y4, CUFFT_INVERSE); //dvzdy cuda_kernel_operate_k_pyz<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, dt, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvy, plan[i].d_dvz, plan[i].d_iny, plan[i].d_inz, plan[i].d_kvy_z, plan[i].d_kvz_y, plan[i].d_Ap2, 1 ); //0 or 1 here stand for parameter AorB, where 1 stand for the first order time derivative of k-space variables. cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_iny,plan[i].d_partvy_z5, CUFFT_INVERSE); //dvydz, 1* cufftExecC2C(plan[i].PLAN_BACKWARD, plan[i].d_inz,plan[i].d_partvz_y5, CUFFT_INVERSE); //dvzdy //===================calculate p ==================// //===================calculate p ==================// cuda_kernel_calculate_p<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, ntp, dt, plan[i].d_pxx, plan[i].d_pyy, plan[i].d_pzz, plan[i].d_pxy, plan[i].d_pxz, plan[i].d_pyz, plan[i].d_tao_p1, plan[i].d_tao_s1,plan[i].d_tao_p2,plan[i].d_tao_s2,plan[i].d_eta_p1,plan[i].d_eta_s1,plan[i].d_eta_p2,plan[i].d_eta_s2,plan[i].d_eta_p3,plan[i].d_eta_s3, plan[i].d_gammax, plan[i].d_a_x, plan[i].d_b_x, plan[i].d_gammay, plan[i].d_a_y, plan[i].d_b_y, plan[i].d_gammaz, plan[i].d_a_z, plan[i].d_b_z, plan[i].d_phi_vx_xx, plan[i].d_phi_vz_zx, plan[i].d_phi_vy_yx, plan[i].d_phi_vx_xy, plan[i].d_phi_vz_zy, plan[i].d_phi_vy_yy, plan[i].d_phi_vx_xz, plan[i].d_phi_vz_zz, plan[i].d_phi_vy_yz, plan[i].d_phi_vx_z, plan[i].d_phi_vz_x, plan[i].d_phi_vx_y, plan[i].d_phi_vy_x, plan[i].d_phi_vy_z, plan[i].d_phi_vz_y, plan[i].d_partvx_x1, plan[i].d_partvx_x2, plan[i].d_partvx_x3, plan[i].d_partvx_x4, plan[i].d_partvx_x5, plan[i].d_partvz_z1, plan[i].d_partvz_z2, plan[i].d_partvz_z3, plan[i].d_partvz_z4, plan[i].d_partvz_z5, plan[i].d_partvy_y1, plan[i].d_partvy_y2, plan[i].d_partvy_y3, plan[i].d_partvy_y4, plan[i].d_partvy_y5, plan[i].d_partvx_z1, plan[i].d_partvx_z2, plan[i].d_partvx_z3, plan[i].d_partvx_z4, plan[i].d_partvx_z5, plan[i].d_partvz_x1, plan[i].d_partvz_x2, plan[i].d_partvz_x3, plan[i].d_partvz_x4, plan[i].d_partvz_x5, plan[i].d_partvx_y1, plan[i].d_partvx_y2, plan[i].d_partvx_y3, plan[i].d_partvx_y4, plan[i].d_partvx_y5, plan[i].d_partvy_x1, plan[i].d_partvy_x2, plan[i].d_partvy_x3, plan[i].d_partvy_x4, plan[i].d_partvy_x5, plan[i].d_partvy_z1, plan[i].d_partvy_z2, plan[i].d_partvy_z3, plan[i].d_partvy_z4, plan[i].d_partvy_z5, plan[i].d_partvz_y1, plan[i].d_partvz_y2, plan[i].d_partvz_y3, plan[i].d_partvz_y4, plan[i].d_partvz_y5 ); cuda_kernel_forward_IO<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, ntp, pml, nt, it, dx, dy, dz, dt, ss[is+i].s_ix, ss[is+i].s_iy, ss[is+i].s_iz, plan[i].d_rik, plan[i].d_record, plan[i].d_record2, plan[i].d_record3, plan[i].d_r_ix, plan[i].d_r_iy, ss[is+i].r_iz, rnmax, rnx_max, rny_max, dr, ss[is+i].r_n, plan[i].d_pxx, plan[i].d_pyy, plan[i].d_pzz, plan[i].d_vx, plan[i].d_vy, plan[i].d_vz ); cuda_kernel_get_dv_renewed<<<dimGrid,dimBlock,0,plans[i].stream>>> ( ntx, nty, ntz, plan[i].d_outx, plan[i].d_outy, plan[i].d_outz, plan[i].d_dvx, plan[i].d_dvy, plan[i].d_dvz ); //================updating wavefields==================// if(it%200==0 && myid==0 && i==0) { printf("forward using real model,is=%2d,it=%4d\n",is+i,it); /* cudaMemcpyAsync(tmp,plan[i].d_p2,size_model,cudaMemcpyDeviceToHost,plans[i].stream); sprintf(filename,"./output/shot%dsnap%d.bin",is+i,it); fp=fopen(filename,"wb"); for(ix=pml;ix<ntx-pml;ix++) for(iy=pml; iy<nty-pml; iy++) for(iz=pml; iz<ntz-pml; iz++) { fwrite(&tmp[ix*nty*ntz+iy*ntz+iz],sizeof(float),1,fp); } fclose(fp); */ } } //end of GPU_N loop } //end of time loop for(i=0;i<GPU_N;i++) { cudaSetDevice(i); cudaMemcpyAsync(plan[i].record, plan[i].d_record,sizeof(float)*nt*rnmax, cudaMemcpyDeviceToHost,plans[i].stream); cudaMemcpyAsync(plan[i].record2, plan[i].d_record2,sizeof(float)*nt*rnmax, cudaMemcpyDeviceToHost,plans[i].stream); cudaMemcpyAsync(plan[i].record3, plan[i].d_record3,sizeof(float)*nt*rnmax, cudaMemcpyDeviceToHost,plans[i].stream); cudaDeviceSynchronize(); cudaStreamDestroy(plans[i].stream); } free(tmp); } //========================================================= // Initializating the memory for variables in device // ======================================================= extern "C" void cuda_Host_initialization ( int ntp, int nt, int rnmax, struct MultiGPU plan[], int GPU_N ) { int i; for(i=0;i<GPU_N;i++) { cudaSetDevice(i); memset(plan[i].pxx, 0, ntp*sizeof(float)); memset(plan[i].pyy, 0, ntp*sizeof(float)); memset(plan[i].pzz, 0, ntp*sizeof(float)); memset(plan[i].pxy, 0, ntp*sizeof(float)); memset(plan[i].pxz, 0, ntp*sizeof(float)); memset(plan[i].pyz, 0, ntp*sizeof(float)); memset(plan[i].vx, 0, ntp*sizeof(float)); memset(plan[i].vy, 0, ntp*sizeof(float)); memset(plan[i].vz, 0, ntp*sizeof(float)); memset(plan[i].record, 0, nt*rnmax*sizeof(float)); memset(plan[i].record2, 0, nt*rnmax*sizeof(float)); memset(plan[i].record3, 0, nt*rnmax*sizeof(float)); } } //=================================================// // Allocate the memory for variables in device // ================================================// extern "C" void cuda_Device_malloc ( int ntx, int nty, int ntz, int ntp, int nx, int ny, int nz, int nt, int rnmax, struct MultiGPU plan[], int GPU_N ) { int i; size_t size_model=sizeof(float)*ntp; for(i=0;i<GPU_N;i++) { cudaSetDevice(i); cufftPlan3d(&plan[i].PLAN_FORWARD,ntx, nty, ntz,CUFFT_C2C); cufftPlan3d(&plan[i].PLAN_BACKWARD,ntx, nty, ntz,CUFFT_C2C); //===========Host======================// //===========Host======================// cudaMallocHost((void **)&plan[i].pxx, size_model); cudaMallocHost((void **)&plan[i].pyy, size_model); cudaMallocHost((void **)&plan[i].pzz, size_model); cudaMallocHost((void **)&plan[i].pxy, size_model); cudaMallocHost((void **)&plan[i].pxz, size_model); cudaMallocHost((void **)&plan[i].pyz, size_model); cudaMallocHost((void **)&plan[i].vx, size_model); cudaMallocHost((void **)&plan[i].vy, size_model); cudaMallocHost((void **)&plan[i].vz, size_model); cudaMallocHost((void **)&plan[i].record, sizeof(float)*rnmax*nt); cudaMallocHost((void **)&plan[i].record2, sizeof(float)*rnmax*nt); cudaMallocHost((void **)&plan[i].record3, sizeof(float)*rnmax*nt); //===========device======================// //===========device======================// cudaMalloc((void **)&plan[i].d_r_ix,sizeof(int)*rnmax); cudaMalloc((void **)&plan[i].d_r_iy,sizeof(int)*rnmax); cudaMalloc((void **)&plan[i].d_rik,sizeof(float)*nt); cudaMalloc((void **)&plan[i].d_velp, size_model); cudaMalloc((void **)&plan[i].d_gama_p, size_model); cudaMalloc((void **)&plan[i].d_vels, size_model); cudaMalloc((void **)&plan[i].d_gama_s, size_model); cudaMalloc((void **)&plan[i].d_rho, size_model); cudaMalloc((void **)&plan[i].d_pxx, size_model); cudaMalloc((void **)&plan[i].d_pyy, size_model); cudaMalloc((void **)&plan[i].d_pzz, size_model); cudaMalloc((void **)&plan[i].d_pxy, size_model); cudaMalloc((void **)&plan[i].d_pxz, size_model); cudaMalloc((void **)&plan[i].d_pyz, size_model); cudaMalloc((void **)&plan[i].d_vx, size_model); cudaMalloc((void **)&plan[i].d_vy, size_model); cudaMalloc((void **)&plan[i].d_vz, size_model); //////////////// pml ////////////// cudaMalloc((void **)&plan[i].d_gammax,sizeof(float)*ntx); cudaMalloc((void **)&plan[i].d_alphax,sizeof(float)*ntx); cudaMalloc((void **)&plan[i].d_Omegax,sizeof(float)*ntx); cudaMalloc((void **)&plan[i].d_a_x,sizeof(float)*ntx); cudaMalloc((void **)&plan[i].d_b_x,sizeof(float)*ntx); cudaMalloc((void **)&plan[i].d_gammay,sizeof(float)*nty); cudaMalloc((void **)&plan[i].d_alphay,sizeof(float)*nty); cudaMalloc((void **)&plan[i].d_Omegay,sizeof(float)*nty); cudaMalloc((void **)&plan[i].d_a_y,sizeof(float)*nty); cudaMalloc((void **)&plan[i].d_b_y,sizeof(float)*nty); cudaMalloc((void **)&plan[i].d_gammaz,sizeof(float)*ntz); cudaMalloc((void **)&plan[i].d_alphaz,sizeof(float)*ntz); cudaMalloc((void **)&plan[i].d_Omegaz,sizeof(float)*ntz); cudaMalloc((void **)&plan[i].d_a_z,sizeof(float)*ntz); cudaMalloc((void **)&plan[i].d_b_z,sizeof(float)*ntz); cudaMalloc((void**)&plan[i].d_phi_vx_xx,size_model); cudaMalloc((void**)&plan[i].d_phi_vy_yx,size_model); cudaMalloc((void**)&plan[i].d_phi_vz_zx,size_model); cudaMalloc((void**)&plan[i].d_phi_vx_xy,size_model); cudaMalloc((void**)&plan[i].d_phi_vy_yy,size_model); cudaMalloc((void**)&plan[i].d_phi_vz_zy,size_model); cudaMalloc((void**)&plan[i].d_phi_vx_xz,size_model); cudaMalloc((void**)&plan[i].d_phi_vy_yz,size_model); cudaMalloc((void**)&plan[i].d_phi_vz_zz,size_model); cudaMalloc((void**)&plan[i].d_phi_vx_z,size_model); cudaMalloc((void**)&plan[i].d_phi_vz_x,size_model); cudaMalloc((void**)&plan[i].d_phi_vx_y,size_model); cudaMalloc((void**)&plan[i].d_phi_vy_x,size_model); cudaMalloc((void**)&plan[i].d_phi_vy_z,size_model); cudaMalloc((void**)&plan[i].d_phi_vz_y,size_model); cudaMalloc((void**)&plan[i].d_phi_pxx_x,size_model); cudaMalloc((void**)&plan[i].d_phi_pxy_y,size_model); cudaMalloc((void**)&plan[i].d_phi_pxz_z,size_model); cudaMalloc((void**)&plan[i].d_phi_pxy_x,size_model); cudaMalloc((void**)&plan[i].d_phi_pyy_y,size_model); cudaMalloc((void**)&plan[i].d_phi_pyz_z,size_model); cudaMalloc((void**)&plan[i].d_phi_pxz_x,size_model); cudaMalloc((void**)&plan[i].d_phi_pyz_y,size_model); cudaMalloc((void**)&plan[i].d_phi_pzz_z,size_model); /////////////////////////////////////////////////////////////////////// cudaMalloc((void **)&plan[i].d_inx, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_iny, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_inz, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_in_pxx, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_in_pyy, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_in_pzz, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_in_pxy, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_in_pxz, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_in_pyz, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_outx, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_outy, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_outz, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_outpxx, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_outpyy, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_outpzz, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_outpxy, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_outpxz, sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_outpyz, sizeof(cufftComplex)*ntp); cudaMalloc((void**)&plan[i].d_kx,sizeof(float)*ntx); cudaMalloc((void**)&plan[i].d_ky,sizeof(float)*nty); cudaMalloc((void**)&plan[i].d_kz,sizeof(float)*ntz); cudaMalloc((void**)&plan[i].d_k,size_model); cudaMalloc((void **)&plan[i].d_kvx_x,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_kvy_y,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_kvz_z,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_kvx_z,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_kvz_x,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_kvx_y,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_kvy_x,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_kvy_z,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_kvz_y,sizeof(cufftComplex)*ntp); ////////////////////////////////////////////////////////////// cudaMalloc((void **)&plan[i].d_eta_p1,size_model); cudaMalloc((void **)&plan[i].d_eta_p2,size_model); cudaMalloc((void **)&plan[i].d_eta_p3,size_model); cudaMalloc((void **)&plan[i].d_eta_s1,size_model); cudaMalloc((void **)&plan[i].d_eta_s2,size_model); cudaMalloc((void **)&plan[i].d_eta_s3,size_model); cudaMalloc((void **)&plan[i].d_tao_p1,size_model); cudaMalloc((void **)&plan[i].d_tao_p2,size_model); cudaMalloc((void **)&plan[i].d_tao_s1,size_model); cudaMalloc((void **)&plan[i].d_tao_s2,size_model); ////////////////////////////////////////////////////////////// //////////////////////////////////////// cudaMalloc((void **)&plan[i].d_Ap1,size_model); cudaMalloc((void **)&plan[i].d_Ap2,size_model); cudaMalloc((void **)&plan[i].d_Ap3,size_model); /////////////////////////////////////// cudaMalloc((void **)&plan[i].d_partx1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_party1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partz1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partx2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_party2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partz2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partx3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_party3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partz3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_x1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_x2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_x3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_x4,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_x5,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_y1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_y2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_y3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_y4,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_y5,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_z1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_z2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_z3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_z4,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_z5,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_y1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_y2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_y3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_y4,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_y5,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_x1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_x2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_x3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_x4,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_x5,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_x1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_x2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_x3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_x4,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_x5,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_z1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_z2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_z3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_z4,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvx_z5,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_z1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_z2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_z3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_z4,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvy_z5,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_y1,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_y2,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_y3,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_y4,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_partvz_y5,sizeof(cufftComplex)*ntp); //////////// cudaMalloc((void **)&plan[i].d_record, sizeof(float)*rnmax*nt); cudaMalloc((void **)&plan[i].d_record2, sizeof(float)*rnmax*nt); cudaMalloc((void **)&plan[i].d_record3, sizeof(float)*rnmax*nt); cudaMalloc((void **)&plan[i].d_dvx,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_dvy,sizeof(cufftComplex)*ntp); cudaMalloc((void **)&plan[i].d_dvz,sizeof(cufftComplex)*ntp); } } //========================================================= // Free the memory for variables in device // ======================================================= extern "C" void cuda_Device_free ( struct MultiGPU plan[], int GPU_N ) { int i; for(i=0;i<GPU_N;i++) { cudaSetDevice(i); cufftDestroy(plan[i].PLAN_FORWARD); cufftDestroy(plan[i].PLAN_BACKWARD); cudaFreeHost(plan[i].pxx); cudaFreeHost(plan[i].pyy); cudaFreeHost(plan[i].pzz); cudaFreeHost(plan[i].pxy); cudaFreeHost(plan[i].pxz); cudaFreeHost(plan[i].pyz); cudaFreeHost(plan[i].vx); cudaFreeHost(plan[i].vy); cudaFreeHost(plan[i].vz); cudaFreeHost(plan[i].record); cudaFreeHost(plan[i].record2); cudaFreeHost(plan[i].record3); cudaFree(plan[i].d_r_ix); cudaFree(plan[i].d_r_iy); cudaFree(plan[i].d_rik); cudaFree(plan[i].d_velp); cudaFree(plan[i].d_gama_p); cudaFree(plan[i].d_vels); cudaFree(plan[i].d_gama_s); cudaFree(plan[i].d_rho); cudaFree(plan[i].d_pxx); cudaFree(plan[i].d_pyy); cudaFree(plan[i].d_pzz); cudaFree(plan[i].d_pxy); cudaFree(plan[i].d_pxz); cudaFree(plan[i].d_pyz); cudaFree(plan[i].d_vx); cudaFree(plan[i].d_vy); cudaFree(plan[i].d_vz); //////////////////pml ///////////////// cudaFree(plan[i].d_gammax); cudaFree(plan[i].d_alphax); cudaFree(plan[i].d_Omegax); cudaFree(plan[i].d_a_x); cudaFree(plan[i].d_b_x); cudaFree(plan[i].d_gammay); cudaFree(plan[i].d_alphay); cudaFree(plan[i].d_Omegay); cudaFree(plan[i].d_a_y); cudaFree(plan[i].d_b_y); cudaFree(plan[i].d_gammaz); cudaFree(plan[i].d_alphaz); cudaFree(plan[i].d_Omegaz); cudaFree(plan[i].d_a_z); cudaFree(plan[i].d_b_z); cudaFree(plan[i].d_phi_vx_xx); cudaFree(plan[i].d_phi_vy_yx); cudaFree(plan[i].d_phi_vz_zx); cudaFree(plan[i].d_phi_vx_xy); cudaFree(plan[i].d_phi_vy_yy); cudaFree(plan[i].d_phi_vz_zy); cudaFree(plan[i].d_phi_vx_xz); cudaFree(plan[i].d_phi_vy_yz); cudaFree(plan[i].d_phi_vz_zz); cudaFree(plan[i].d_phi_vx_z); cudaFree(plan[i].d_phi_vz_x); cudaFree(plan[i].d_phi_vx_y); cudaFree(plan[i].d_phi_vy_x); cudaFree(plan[i].d_phi_vy_z); cudaFree(plan[i].d_phi_vz_y); cudaFree(plan[i].d_phi_pxx_x); cudaFree(plan[i].d_phi_pxy_y); cudaFree(plan[i].d_phi_pxz_z); cudaFree(plan[i].d_phi_pxy_x); cudaFree(plan[i].d_phi_pyy_y); cudaFree(plan[i].d_phi_pyz_z); cudaFree(plan[i].d_phi_pxz_x); cudaFree(plan[i].d_phi_pyz_y); cudaFree(plan[i].d_phi_pzz_z); ////////////////////////////////////////////////////// cudaFree(plan[i].d_inx); cudaFree(plan[i].d_iny); cudaFree(plan[i].d_inz); cudaFree(plan[i].d_in_pxx); cudaFree(plan[i].d_in_pyy); cudaFree(plan[i].d_in_pzz); cudaFree(plan[i].d_in_pxy); cudaFree(plan[i].d_in_pxz); cudaFree(plan[i].d_in_pyz); cudaFree(plan[i].d_outx); cudaFree(plan[i].d_outy); cudaFree(plan[i].d_outz); cudaFree(plan[i].d_outpxx); cudaFree(plan[i].d_outpyy); cudaFree(plan[i].d_outpzz); cudaFree(plan[i].d_outpxy); cudaFree(plan[i].d_outpxz); cudaFree(plan[i].d_outpyz); cudaFree(plan[i].d_kx); cudaFree(plan[i].d_ky); cudaFree(plan[i].d_kz); cudaFree(plan[i].d_k); cudaFree(plan[i].d_kvx_x); cudaFree(plan[i].d_kvy_y); cudaFree(plan[i].d_kvz_z); cudaFree(plan[i].d_kvx_z); cudaFree(plan[i].d_kvz_x); cudaFree(plan[i].d_kvy_z); cudaFree(plan[i].d_kvz_y); cudaFree(plan[i].d_kvx_y); cudaFree(plan[i].d_kvy_x); cudaFree(plan[i].d_eta_p1); cudaFree(plan[i].d_eta_p2); cudaFree(plan[i].d_eta_p3); cudaFree(plan[i].d_eta_s1); cudaFree(plan[i].d_eta_s2); cudaFree(plan[i].d_eta_s3); cudaFree(plan[i].d_tao_p1); cudaFree(plan[i].d_tao_p2); cudaFree(plan[i].d_tao_s1); cudaFree(plan[i].d_tao_s2); cudaFree(plan[i].d_Ap1); cudaFree(plan[i].d_Ap2); cudaFree(plan[i].d_Ap3); cudaFree(plan[i].d_partx1); cudaFree(plan[i].d_party1); cudaFree(plan[i].d_partz1); cudaFree(plan[i].d_partx2); cudaFree(plan[i].d_party2); cudaFree(plan[i].d_partz2); cudaFree(plan[i].d_partx3); cudaFree(plan[i].d_party3); cudaFree(plan[i].d_partz3); cudaFree(plan[i].d_partvx_x1); cudaFree(plan[i].d_partvx_x2); cudaFree(plan[i].d_partvx_x3); cudaFree(plan[i].d_partvx_x4); cudaFree(plan[i].d_partvx_x5); cudaFree(plan[i].d_partvy_y1); cudaFree(plan[i].d_partvy_y2); cudaFree(plan[i].d_partvy_y3); cudaFree(plan[i].d_partvy_y4); cudaFree(plan[i].d_partvy_y5); cudaFree(plan[i].d_partvz_z1); cudaFree(plan[i].d_partvz_z2); cudaFree(plan[i].d_partvz_z3); cudaFree(plan[i].d_partvz_z4); cudaFree(plan[i].d_partvz_z5); cudaFree(plan[i].d_partvx_y1); cudaFree(plan[i].d_partvx_y2); cudaFree(plan[i].d_partvx_y3); cudaFree(plan[i].d_partvx_y4); cudaFree(plan[i].d_partvx_y5); cudaFree(plan[i].d_partvy_x1); cudaFree(plan[i].d_partvy_x2); cudaFree(plan[i].d_partvy_x3); cudaFree(plan[i].d_partvy_x4); cudaFree(plan[i].d_partvy_x5); cudaFree(plan[i].d_partvx_z1); cudaFree(plan[i].d_partvx_z2); cudaFree(plan[i].d_partvx_z3); cudaFree(plan[i].d_partvx_z4); cudaFree(plan[i].d_partvx_z5); cudaFree(plan[i].d_partvz_x1); cudaFree(plan[i].d_partvz_x2); cudaFree(plan[i].d_partvz_x3); cudaFree(plan[i].d_partvz_x4); cudaFree(plan[i].d_partvz_x5); cudaFree(plan[i].d_partvy_z1); cudaFree(plan[i].d_partvy_z2); cudaFree(plan[i].d_partvy_z3); cudaFree(plan[i].d_partvy_z4); cudaFree(plan[i].d_partvy_z5); cudaFree(plan[i].d_partvz_y1); cudaFree(plan[i].d_partvz_y2); cudaFree(plan[i].d_partvz_y3); cudaFree(plan[i].d_partvz_y4); cudaFree(plan[i].d_partvz_y5); cudaFree(plan[i].d_record); cudaFree(plan[i].d_record2); cudaFree(plan[i].d_record3); cudaFree(plan[i].d_dvx); cudaFree(plan[i].d_dvy); cudaFree(plan[i].d_dvz); } } extern "C" void getdevice(int *GPU_N) { cudaGetDeviceCount(GPU_N); }
1d6528e2daf1a41704a2a7baeec50445f3b3502e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <linalg/power.cuh> #include <raft/random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename Type> __global__ void naivePowerElemKernel(Type *out, const Type *in1, const Type *in2, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = raft::myPow(in1[idx], in2[idx]); } } template <typename Type> void naivePowerElem(Type *out, const Type *in1, const Type *in2, int len, hipStream_t stream) { static const int TPB = 64; int nblks = raft::ceildiv(len, TPB); hipLaunchKernelGGL(( naivePowerElemKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, out, in1, in2, len); CUDA_CHECK(hipPeekAtLastError()); } template <typename Type> __global__ void naivePowerScalarKernel(Type *out, const Type *in1, const Type in2, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = raft::myPow(in1[idx], in2); } } template <typename Type> void naivePowerScalar(Type *out, const Type *in1, const Type in2, int len, hipStream_t stream) { static const int TPB = 64; int nblks = raft::ceildiv(len, TPB); hipLaunchKernelGGL(( naivePowerScalarKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, out, in1, in2, len); CUDA_CHECK(hipPeekAtLastError()); } template <typename T> struct PowerInputs { T tolerance; int len; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const PowerInputs<T> &dims) { return os; } template <typename T> class PowerTest : public ::testing::TestWithParam<PowerInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<PowerInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.len; hipStream_t stream; CUDA_CHECK(hipStreamCreate(&stream)); raft::allocate(in1, len); raft::allocate(in2, len); raft::allocate(out_ref, len); raft::allocate(out, len); r.uniform(in1, len, T(1.0), T(2.0), stream); r.uniform(in2, len, T(1.0), T(2.0), stream); naivePowerElem(out_ref, in1, in2, len, stream); naivePowerScalar(out_ref, out_ref, T(2), len, stream); power(out, in1, in2, len, stream); powerScalar(out, out, T(2), len, stream); power(in1, in1, in2, len, stream); powerScalar(in1, in1, T(2), len, stream); CUDA_CHECK(hipStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(hipFree(in1)); CUDA_CHECK(hipFree(in2)); CUDA_CHECK(hipFree(out_ref)); CUDA_CHECK(hipFree(out)); } protected: PowerInputs<T> params; T *in1, *in2, *out_ref, *out; int device_count = 0; }; const std::vector<PowerInputs<float>> inputsf2 = { {0.000001f, 1024 * 1024, 1234ULL}}; const std::vector<PowerInputs<double>> inputsd2 = { {0.00000001, 1024 * 1024, 1234ULL}}; typedef PowerTest<float> PowerTestF; TEST_P(PowerTestF, Result) { ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.len, raft::CompareApprox<float>(params.tolerance))); ASSERT_TRUE(raft::devArrMatch(out_ref, in1, params.len, raft::CompareApprox<float>(params.tolerance))); } typedef PowerTest<double> PowerTestD; TEST_P(PowerTestD, Result) { ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.len, raft::CompareApprox<double>(params.tolerance))); ASSERT_TRUE(raft::devArrMatch(out_ref, in1, params.len, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(PowerTests, PowerTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(PowerTests, PowerTestD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
1d6528e2daf1a41704a2a7baeec50445f3b3502e.cu
/* * Copyright (c) 2018-2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <gtest/gtest.h> #include <raft/cudart_utils.h> #include <linalg/power.cuh> #include <raft/random/rng.cuh> #include "test_utils.h" namespace MLCommon { namespace LinAlg { template <typename Type> __global__ void naivePowerElemKernel(Type *out, const Type *in1, const Type *in2, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = raft::myPow(in1[idx], in2[idx]); } } template <typename Type> void naivePowerElem(Type *out, const Type *in1, const Type *in2, int len, cudaStream_t stream) { static const int TPB = 64; int nblks = raft::ceildiv(len, TPB); naivePowerElemKernel<Type><<<nblks, TPB, 0, stream>>>(out, in1, in2, len); CUDA_CHECK(cudaPeekAtLastError()); } template <typename Type> __global__ void naivePowerScalarKernel(Type *out, const Type *in1, const Type in2, int len) { int idx = threadIdx.x + blockIdx.x * blockDim.x; if (idx < len) { out[idx] = raft::myPow(in1[idx], in2); } } template <typename Type> void naivePowerScalar(Type *out, const Type *in1, const Type in2, int len, cudaStream_t stream) { static const int TPB = 64; int nblks = raft::ceildiv(len, TPB); naivePowerScalarKernel<Type><<<nblks, TPB, 0, stream>>>(out, in1, in2, len); CUDA_CHECK(cudaPeekAtLastError()); } template <typename T> struct PowerInputs { T tolerance; int len; unsigned long long int seed; }; template <typename T> ::std::ostream &operator<<(::std::ostream &os, const PowerInputs<T> &dims) { return os; } template <typename T> class PowerTest : public ::testing::TestWithParam<PowerInputs<T>> { protected: void SetUp() override { params = ::testing::TestWithParam<PowerInputs<T>>::GetParam(); raft::random::Rng r(params.seed); int len = params.len; cudaStream_t stream; CUDA_CHECK(cudaStreamCreate(&stream)); raft::allocate(in1, len); raft::allocate(in2, len); raft::allocate(out_ref, len); raft::allocate(out, len); r.uniform(in1, len, T(1.0), T(2.0), stream); r.uniform(in2, len, T(1.0), T(2.0), stream); naivePowerElem(out_ref, in1, in2, len, stream); naivePowerScalar(out_ref, out_ref, T(2), len, stream); power(out, in1, in2, len, stream); powerScalar(out, out, T(2), len, stream); power(in1, in1, in2, len, stream); powerScalar(in1, in1, T(2), len, stream); CUDA_CHECK(cudaStreamDestroy(stream)); } void TearDown() override { CUDA_CHECK(cudaFree(in1)); CUDA_CHECK(cudaFree(in2)); CUDA_CHECK(cudaFree(out_ref)); CUDA_CHECK(cudaFree(out)); } protected: PowerInputs<T> params; T *in1, *in2, *out_ref, *out; int device_count = 0; }; const std::vector<PowerInputs<float>> inputsf2 = { {0.000001f, 1024 * 1024, 1234ULL}}; const std::vector<PowerInputs<double>> inputsd2 = { {0.00000001, 1024 * 1024, 1234ULL}}; typedef PowerTest<float> PowerTestF; TEST_P(PowerTestF, Result) { ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.len, raft::CompareApprox<float>(params.tolerance))); ASSERT_TRUE(raft::devArrMatch(out_ref, in1, params.len, raft::CompareApprox<float>(params.tolerance))); } typedef PowerTest<double> PowerTestD; TEST_P(PowerTestD, Result) { ASSERT_TRUE(raft::devArrMatch(out_ref, out, params.len, raft::CompareApprox<double>(params.tolerance))); ASSERT_TRUE(raft::devArrMatch(out_ref, in1, params.len, raft::CompareApprox<double>(params.tolerance))); } INSTANTIATE_TEST_CASE_P(PowerTests, PowerTestF, ::testing::ValuesIn(inputsf2)); INSTANTIATE_TEST_CASE_P(PowerTests, PowerTestD, ::testing::ValuesIn(inputsd2)); } // end namespace LinAlg } // end namespace MLCommon
23f52f3443a6d322393594abb71658e925ebfd96.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <opencv4/opencv2/imgcodecs.hpp> #include <opencv4/opencv2/core/utility.hpp> #include <opencv4/opencv2/highgui.hpp> #include <opencv4/opencv2/imgproc/imgproc.hpp> #include <../include/kernel.h> #include <../include/Filter.h> #include <../include/colors.h> #include <stdio.h> #include <math.h> #include <iostream> #define BLOCK_SIZE 32 #define GRID_SIZE 128 #define KERNEL_SIZE 3 /*Kernels*/ __global__ void kernelConvolutionSobel(unsigned char* src_img, unsigned char* dst_img, int width_img, int height_img){ int sobel_x[KERNEL_SIZE][KERNEL_SIZE] = {{-1, 0, 1}, {-2, 0, 2}, {-1, 0, 1}}; int sobel_y[KERNEL_SIZE][KERNEL_SIZE] = {{-1, -2, -1}, {0, 0, 0}, {1, 2, 1}}; int num_row = blockIdx.x * blockDim.x + threadIdx.x; int num_col = blockIdx.y * blockDim.y + threadIdx.y; int index = num_row * width_img + num_col; if(num_col < (width_img - 1) && num_row < (height_img - 1)){ float grad_x= (src_img[index] * sobel_x[0][0]) + (src_img[index+1] * sobel_x[0][1]) + (src_img[index+2] * sobel_x[0][2]) + (src_img[index] * sobel_x[1][0]) + (src_img[index+1] * sobel_x[1][1]) + (src_img[index+2] * sobel_x[1][2]) + (src_img[index] * sobel_x[2][0]) + (src_img[index+1] * sobel_x[2][1]) + (src_img[index+2] * sobel_x[2][2]); float grad_y= (src_img[index] * sobel_y[0][0]) + (src_img[index+1] * sobel_y[0][1]) + (src_img[index+2] * sobel_y[0][2]) + (src_img[index] * sobel_y[1][0]) + (src_img[index+1] * sobel_y[1][1]) + (src_img[index+2] * sobel_y[1][2]) + (src_img[index] * sobel_y[2][0]) + (src_img[index+1] * sobel_y[2][1]) + (src_img[index+2] * sobel_y[2][2]); float gradient = sqrtf(grad_x * grad_x + grad_y * grad_y); if(gradient > 255) gradient = 255; if(gradient < 0) gradient = 0; __syncthreads(); dst_img[index] = gradient; } } __global__ void kernelConvolutionSharpen(unsigned char* src_img, unsigned char* dst_img, int width_img, int height_img){ int sharpen[KERNEL_SIZE][KERNEL_SIZE] = {{0, -1, 0}, {-1, 5, -1}, {0, -1, 0}}; int num_row = blockIdx.x * blockDim.x + threadIdx.x; int num_col = blockIdx.y * blockDim.y + threadIdx.y; int index = num_row * width_img + num_col; if(num_col < (width_img - 1) && num_row < (height_img - 1)){ float sum = (src_img[index] * sharpen[0][0]) + (src_img[index+1] * sharpen[0][1]) + (src_img[index+2] * sharpen[0][2]) + (src_img[index] * sharpen[1][0]) + (src_img[index+1] * sharpen[1][1]) + (src_img[index+2] * sharpen[1][2]) + (src_img[index] * sharpen[2][0]) + (src_img[index+1] * sharpen[2][1]) + (src_img[index+2] * sharpen[2][2]); if(sum > 255) sum = 255; if(sum < 0)sum = 0; __syncthreads(); dst_img[index] = sum; } } hipError_t Filter::testCuErr(hipError_t dst_img){ if (dst_img != hipSuccess) { printf("CUDA Runtime Error: %s\n", hipGetErrorString(dst_img)); assert(dst_img == hipSuccess); } return dst_img; } __host__ void Filter::applyFilter(cv::Mat *src_img, std::string type_filter){ hipFree(0); unsigned char *dev_src, *dev_sobel; int img_size = src_img->rows * src_img->cols * sizeof(unsigned char); hipEvent_t start, end; testCuErr(hipEventCreate(&start)); testCuErr(hipEventCreate(&end)); dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 numBlocks(GRID_SIZE, GRID_SIZE); testCuErr(hipMalloc((void**)&dev_src, img_size)); testCuErr(hipMalloc((void**)&dev_sobel, img_size)); testCuErr(hipMemcpy(dev_src, src_img->data, img_size, hipMemcpyHostToDevice)); testCuErr(hipEventRecord(start)); if(type_filter.compare("sobel") == 0) hipLaunchKernelGGL(( kernelConvolutionSobel), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, dev_src, dev_sobel, src_img->cols, src_img->rows); if(type_filter.compare("sharpen") == 0) hipLaunchKernelGGL(( kernelConvolutionSharpen), dim3(numBlocks),dim3(threadsPerBlock), 0, 0, dev_src, dev_sobel, src_img->cols, src_img->rows); testCuErr(hipGetLastError()); testCuErr(hipEventRecord(end)); testCuErr(hipEventSynchronize(end)); float milliseconds = 0; testCuErr(hipEventElapsedTime(&milliseconds, start, end)); std::cout << CYAN << "Elapsed time: " << RESET << milliseconds << " ms" << std::endl; testCuErr(hipMemcpy(src_img->data, dev_sobel, img_size, hipMemcpyDeviceToHost)); testCuErr(hipFree(dev_src)); testCuErr(hipFree(dev_sobel)); }
23f52f3443a6d322393594abb71658e925ebfd96.cu
#include <opencv4/opencv2/imgcodecs.hpp> #include <opencv4/opencv2/core/utility.hpp> #include <opencv4/opencv2/highgui.hpp> #include <opencv4/opencv2/imgproc/imgproc.hpp> #include <../include/kernel.h> #include <../include/Filter.h> #include <../include/colors.h> #include <stdio.h> #include <math.h> #include <iostream> #define BLOCK_SIZE 32 #define GRID_SIZE 128 #define KERNEL_SIZE 3 /*Kernels*/ __global__ void kernelConvolutionSobel(unsigned char* src_img, unsigned char* dst_img, int width_img, int height_img){ int sobel_x[KERNEL_SIZE][KERNEL_SIZE] = {{-1, 0, 1}, {-2, 0, 2}, {-1, 0, 1}}; int sobel_y[KERNEL_SIZE][KERNEL_SIZE] = {{-1, -2, -1}, {0, 0, 0}, {1, 2, 1}}; int num_row = blockIdx.x * blockDim.x + threadIdx.x; int num_col = blockIdx.y * blockDim.y + threadIdx.y; int index = num_row * width_img + num_col; if(num_col < (width_img - 1) && num_row < (height_img - 1)){ float grad_x= (src_img[index] * sobel_x[0][0]) + (src_img[index+1] * sobel_x[0][1]) + (src_img[index+2] * sobel_x[0][2]) + (src_img[index] * sobel_x[1][0]) + (src_img[index+1] * sobel_x[1][1]) + (src_img[index+2] * sobel_x[1][2]) + (src_img[index] * sobel_x[2][0]) + (src_img[index+1] * sobel_x[2][1]) + (src_img[index+2] * sobel_x[2][2]); float grad_y= (src_img[index] * sobel_y[0][0]) + (src_img[index+1] * sobel_y[0][1]) + (src_img[index+2] * sobel_y[0][2]) + (src_img[index] * sobel_y[1][0]) + (src_img[index+1] * sobel_y[1][1]) + (src_img[index+2] * sobel_y[1][2]) + (src_img[index] * sobel_y[2][0]) + (src_img[index+1] * sobel_y[2][1]) + (src_img[index+2] * sobel_y[2][2]); float gradient = sqrtf(grad_x * grad_x + grad_y * grad_y); if(gradient > 255) gradient = 255; if(gradient < 0) gradient = 0; __syncthreads(); dst_img[index] = gradient; } } __global__ void kernelConvolutionSharpen(unsigned char* src_img, unsigned char* dst_img, int width_img, int height_img){ int sharpen[KERNEL_SIZE][KERNEL_SIZE] = {{0, -1, 0}, {-1, 5, -1}, {0, -1, 0}}; int num_row = blockIdx.x * blockDim.x + threadIdx.x; int num_col = blockIdx.y * blockDim.y + threadIdx.y; int index = num_row * width_img + num_col; if(num_col < (width_img - 1) && num_row < (height_img - 1)){ float sum = (src_img[index] * sharpen[0][0]) + (src_img[index+1] * sharpen[0][1]) + (src_img[index+2] * sharpen[0][2]) + (src_img[index] * sharpen[1][0]) + (src_img[index+1] * sharpen[1][1]) + (src_img[index+2] * sharpen[1][2]) + (src_img[index] * sharpen[2][0]) + (src_img[index+1] * sharpen[2][1]) + (src_img[index+2] * sharpen[2][2]); if(sum > 255) sum = 255; if(sum < 0)sum = 0; __syncthreads(); dst_img[index] = sum; } } cudaError_t Filter::testCuErr(cudaError_t dst_img){ if (dst_img != cudaSuccess) { printf("CUDA Runtime Error: %s\n", cudaGetErrorString(dst_img)); assert(dst_img == cudaSuccess); } return dst_img; } __host__ void Filter::applyFilter(cv::Mat *src_img, std::string type_filter){ cudaFree(0); unsigned char *dev_src, *dev_sobel; int img_size = src_img->rows * src_img->cols * sizeof(unsigned char); cudaEvent_t start, end; testCuErr(cudaEventCreate(&start)); testCuErr(cudaEventCreate(&end)); dim3 threadsPerBlock(BLOCK_SIZE, BLOCK_SIZE); dim3 numBlocks(GRID_SIZE, GRID_SIZE); testCuErr(cudaMalloc((void**)&dev_src, img_size)); testCuErr(cudaMalloc((void**)&dev_sobel, img_size)); testCuErr(cudaMemcpy(dev_src, src_img->data, img_size, cudaMemcpyHostToDevice)); testCuErr(cudaEventRecord(start)); if(type_filter.compare("sobel") == 0) kernelConvolutionSobel<<<numBlocks,threadsPerBlock>>>(dev_src, dev_sobel, src_img->cols, src_img->rows); if(type_filter.compare("sharpen") == 0) kernelConvolutionSharpen<<<numBlocks,threadsPerBlock>>>(dev_src, dev_sobel, src_img->cols, src_img->rows); testCuErr(cudaGetLastError()); testCuErr(cudaEventRecord(end)); testCuErr(cudaEventSynchronize(end)); float milliseconds = 0; testCuErr(cudaEventElapsedTime(&milliseconds, start, end)); std::cout << CYAN << "Elapsed time: " << RESET << milliseconds << " ms" << std::endl; testCuErr(cudaMemcpy(src_img->data, dev_sobel, img_size, cudaMemcpyDeviceToHost)); testCuErr(cudaFree(dev_src)); testCuErr(cudaFree(dev_sobel)); }
304e5d57ca17e8d62b49ce8d710745085691d61f.hip
// !!! This is a file automatically generated by hipify!!! #include <device_matrix/device_matrix.h> #include <glog/logging.h> #include <memory> using namespace cuda; int main(int argc, char* argv[]) { google::InitGoogleLogging(argv[0]); const hipStream_t stream = 0; // default CUDA stream. std::unique_ptr<device_matrix<float32>> a( device_matrix<float32>::create( stream, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}, 2 /* num_rows */, 3 /* num_columns */)); std::unique_ptr<device_matrix<float32>> b( device_matrix<float32>::create( stream, {7.0, 8.0, 9.0, 10.0, 11.0, 12.0}, 3 /* num_rows */, 2 /* num_columns */)); device_matrix<float32> c( 2 /* num_rows */, 2 /* num_columns */, stream); matrix_mult(stream, *a, HIPBLAS_OP_N, *b, HIPBLAS_OP_N, &c); hipDeviceSynchronize(); print_matrix(c); }
304e5d57ca17e8d62b49ce8d710745085691d61f.cu
#include <device_matrix/device_matrix.h> #include <glog/logging.h> #include <memory> using namespace cuda; int main(int argc, char* argv[]) { google::InitGoogleLogging(argv[0]); const cudaStream_t stream = 0; // default CUDA stream. std::unique_ptr<device_matrix<float32>> a( device_matrix<float32>::create( stream, {1.0, 2.0, 3.0, 4.0, 5.0, 6.0}, 2 /* num_rows */, 3 /* num_columns */)); std::unique_ptr<device_matrix<float32>> b( device_matrix<float32>::create( stream, {7.0, 8.0, 9.0, 10.0, 11.0, 12.0}, 3 /* num_rows */, 2 /* num_columns */)); device_matrix<float32> c( 2 /* num_rows */, 2 /* num_columns */, stream); matrix_mult(stream, *a, CUBLAS_OP_N, *b, CUBLAS_OP_N, &c); cudaDeviceSynchronize(); print_matrix(c); }
b63adff8942bd8c578ad192ac0477e014a79d432.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <fcuda.h> #include "../matrixMul.h" #include <string.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) cutilBankChecker(((DATATYPE*)&As[0][0]), (BLOCK_SIZE * i + j)) #define BS(i, j) cutilBankChecker(((DATATYPE*)&Bs[0][0]), (BLOCK_SIZE * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif //////////////////////////////////////////////////////////////////////////////// //! Matrix multiplication on the device: C = A * B //! wA is A's width and wB is B's width //////////////////////////////////////////////////////////////////////////////// #pragma FCUDA GRID x_dim=16 y_dim=16 #pragma FCUDA COREINFO num_cores=1 pipeline=no //#pragma FCUDA TBLK bdim_num=2 bdim_x=16 bdim_y=16 gdims=2 //#pragma FCUDA GRID x_dim=2 y_dim=2 concur=0 //#pragma FCUDA PORTMERGE remove_port_name=A port_id=0 //#pragma FCUDA PORTMERGE remove_port_name=B port_id=0 //#pragma FCUDA PORTMERGE remove_port_name=C port_id=0 __global__ void matrixMul( DATATYPE *C, DATATYPE *A, DATATYPE *B, int wA, int wB) { #pragma HLS INTERFACE ap_bus port=A depth=3840 #pragma HLS INTERFACE ap_bus port=B depth=6144 #pragma HLS INTERFACE ap_bus port=C depth=10240 // Block index int bx = blockIdx.x; int by = blockIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; //#pragma FCUDA COMPUTE cores=[1] begin name=vec_init unroll=1 mpart=1 array_split=[Csub_block|As] DATATYPE Csub = 0; //#pragma FCUDA COMPUTE cores=[1] end name=vec_init unroll=1 mpart=1 array_split=[Csub_block|As] int a = 0, b = 0, k = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ DATATYPE As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ DATATYPE Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[0|0] pointer=[A|B] size=[16|16] begin name=fetch unroll=1 mpart=1 array_split=[Csub_block|As] AS(threadIdx.y, threadIdx.x) = A[a + wA * threadIdx.y + threadIdx.x]; BS(threadIdx.y, threadIdx.x) = B[b + wB * threadIdx.y + threadIdx.x]; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[0|0] pointer=[A|B] size=[16|16] end name=fetch unroll=1 mpart=1 array_split=[Csub_block|As] // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix //#pragma FCUDA COMPUTE cores=[1] begin name=vec_blk array_split=[Csub_block|As] unroll=1 mpart=1 lp1: for (k = 0; k < BLOCK_SIZE; ++k) Csub += AS(threadIdx.y, k) * BS(k, threadIdx.x); //#pragma FCUDA COMPUTE cores=[1] end name=vec_blk array_split=[Csub_block|As] unroll=1 mpart=1 // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[1] pointer=[C] size=[16] begin name=write unroll=1 mpart=1 array_split=[Csub_block|As] C[c + wB * threadIdx.y + threadIdx.x] = Csub; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[1] pointer=[C] size=[16] end name=write unroll=1 mpart=1 array_split=[Csub_block|As] } #endif // #ifndef _MATRIXMUL_KERNEL_H_
b63adff8942bd8c578ad192ac0477e014a79d432.cu
/* Matrix multiplication: C = A * B. * Device code. */ #ifndef _MATRIXMUL_KERNEL_H_ #define _MATRIXMUL_KERNEL_H_ #include <fcuda.h> #include "../matrixMul.h" #include <string.h> #define CHECK_BANK_CONFLICTS 0 #if CHECK_BANK_CONFLICTS #define AS(i, j) cutilBankChecker(((DATATYPE*)&As[0][0]), (BLOCK_SIZE * i + j)) #define BS(i, j) cutilBankChecker(((DATATYPE*)&Bs[0][0]), (BLOCK_SIZE * i + j)) #else #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] #endif //////////////////////////////////////////////////////////////////////////////// //! Matrix multiplication on the device: C = A * B //! wA is A's width and wB is B's width //////////////////////////////////////////////////////////////////////////////// #pragma FCUDA GRID x_dim=16 y_dim=16 #pragma FCUDA COREINFO num_cores=1 pipeline=no //#pragma FCUDA TBLK bdim_num=2 bdim_x=16 bdim_y=16 gdims=2 //#pragma FCUDA GRID x_dim=2 y_dim=2 concur=0 //#pragma FCUDA PORTMERGE remove_port_name=A port_id=0 //#pragma FCUDA PORTMERGE remove_port_name=B port_id=0 //#pragma FCUDA PORTMERGE remove_port_name=C port_id=0 __global__ void matrixMul( DATATYPE *C, DATATYPE *A, DATATYPE *B, int wA, int wB) { #pragma HLS INTERFACE ap_bus port=A depth=3840 #pragma HLS INTERFACE ap_bus port=B depth=6144 #pragma HLS INTERFACE ap_bus port=C depth=10240 // Block index int bx = blockIdx.x; int by = blockIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = BLOCK_SIZE * wB; //#pragma FCUDA COMPUTE cores=[1] begin name=vec_init unroll=1 mpart=1 array_split=[Csub_block|As] DATATYPE Csub = 0; //#pragma FCUDA COMPUTE cores=[1] end name=vec_init unroll=1 mpart=1 array_split=[Csub_block|As] int a = 0, b = 0, k = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ DATATYPE As[BLOCK_SIZE][BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ DATATYPE Bs[BLOCK_SIZE][BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[0|0] pointer=[A|B] size=[16|16] begin name=fetch unroll=1 mpart=1 array_split=[Csub_block|As] AS(threadIdx.y, threadIdx.x) = A[a + wA * threadIdx.y + threadIdx.x]; BS(threadIdx.y, threadIdx.x) = B[b + wB * threadIdx.y + threadIdx.x]; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[0|0] pointer=[A|B] size=[16|16] end name=fetch unroll=1 mpart=1 array_split=[Csub_block|As] // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix //#pragma FCUDA COMPUTE cores=[1] begin name=vec_blk array_split=[Csub_block|As] unroll=1 mpart=1 lp1: for (k = 0; k < BLOCK_SIZE; ++k) Csub += AS(threadIdx.y, k) * BS(k, threadIdx.x); //#pragma FCUDA COMPUTE cores=[1] end name=vec_blk array_split=[Csub_block|As] unroll=1 mpart=1 // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[1] pointer=[C] size=[16] begin name=write unroll=1 mpart=1 array_split=[Csub_block|As] C[c + wB * threadIdx.y + threadIdx.x] = Csub; //#pragma FCUDA TRANSFER cores=[1] type=burst dir=[1] pointer=[C] size=[16] end name=write unroll=1 mpart=1 array_split=[Csub_block|As] } #endif // #ifndef _MATRIXMUL_KERNEL_H_
60f855db78726e29398026c088d9bb40b4ce3c16.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "initSquare.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; hipMalloc(&a, XSIZE*YSIZE); float *x = NULL; hipMalloc(&x, XSIZE*YSIZE); float totalX = 1; int n = XSIZE*YSIZE; int ghosts = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( initSquare), dim3(gridBlock),dim3(threadBlock), 0, 0, a,x,totalX,n,ghosts); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( initSquare), dim3(gridBlock),dim3(threadBlock), 0, 0, a,x,totalX,n,ghosts); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( initSquare), dim3(gridBlock),dim3(threadBlock), 0, 0, a,x,totalX,n,ghosts); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
60f855db78726e29398026c088d9bb40b4ce3c16.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "initSquare.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *a = NULL; cudaMalloc(&a, XSIZE*YSIZE); float *x = NULL; cudaMalloc(&x, XSIZE*YSIZE); float totalX = 1; int n = XSIZE*YSIZE; int ghosts = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); initSquare<<<gridBlock,threadBlock>>>(a,x,totalX,n,ghosts); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { initSquare<<<gridBlock,threadBlock>>>(a,x,totalX,n,ghosts); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { initSquare<<<gridBlock,threadBlock>>>(a,x,totalX,n,ghosts); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
f4c1f89dcfc576f249fd27e3f4f13d28508cd40f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include <iostream> #include <cmath> __global__ void vector_add_kernel(float * r, float * v1, float * v2, int size) { // // TODO // complete kernel // } void vector_add_cpu(float * r, float * v1, float * v2, int size) { for(std::size_t i=0; i < size; ++i) { r[i] = v1[i] + v2[i]; } } void fill_vector(std::vector<float> & v) { for(std::size_t i=0; i < v.size(); ++i) { v[i] = i; } } bool compare_vectors(std::vector<float> const& v1, std::vector<float> const& v2) { if(v1.size() != v2.size()) { std::cout << "ERROR: Vector sizes mismatch!" << std::endl; return false; } bool ok = true; for(std::size_t i=0; i < v1.size(); ++i) { if(std::abs(v1[i]-v2[i]) > 1e-5) { std::cout << "ERROR: element " << i << " mismatch: " << v1[i] << " != " << v2[i] << std::endl; ok = false; } } return ok; } int main() { int const N = 1000; std::vector<float> a(N); std::vector<float> b(N); std::vector<float> c(N); fill_vector(a); fill_vector(b); vector_add_cpu(&c[0], &a[0], &b[0], N); // Create copies on GPU device // Allocate memory float * d_a; float * d_b; float * d_c; hipMalloc(&d_a, N*sizeof(float)); hipMalloc(&d_b, N*sizeof(float)); hipMalloc(&d_c, N*sizeof(float)); // Copy hipMemcpy(d_a, &a[0], N*sizeof(float), hipMemcpyHostToDevice); hipMemcpy(d_b, &b[0], N*sizeof(float), hipMemcpyHostToDevice); // // TODO // Kernel call // // Get result std::vector<float> c_from_gpu(N); hipMemcpy(&c_from_gpu[0], d_c, N*sizeof(float), hipMemcpyDeviceToHost); bool ok = compare_vectors(c, c_from_gpu); // Free the device memory hipFree(d_c); hipFree(d_b); hipFree(d_a); if(ok) std::cout << "Results match... It works!" << std::endl; return ok ? 0 : 1; }
f4c1f89dcfc576f249fd27e3f4f13d28508cd40f.cu
#include <vector> #include <iostream> #include <cmath> __global__ void vector_add_kernel(float * r, float * v1, float * v2, int size) { // // TODO // complete kernel // } void vector_add_cpu(float * r, float * v1, float * v2, int size) { for(std::size_t i=0; i < size; ++i) { r[i] = v1[i] + v2[i]; } } void fill_vector(std::vector<float> & v) { for(std::size_t i=0; i < v.size(); ++i) { v[i] = i; } } bool compare_vectors(std::vector<float> const& v1, std::vector<float> const& v2) { if(v1.size() != v2.size()) { std::cout << "ERROR: Vector sizes mismatch!" << std::endl; return false; } bool ok = true; for(std::size_t i=0; i < v1.size(); ++i) { if(std::abs(v1[i]-v2[i]) > 1e-5) { std::cout << "ERROR: element " << i << " mismatch: " << v1[i] << " != " << v2[i] << std::endl; ok = false; } } return ok; } int main() { int const N = 1000; std::vector<float> a(N); std::vector<float> b(N); std::vector<float> c(N); fill_vector(a); fill_vector(b); vector_add_cpu(&c[0], &a[0], &b[0], N); // Create copies on GPU device // Allocate memory float * d_a; float * d_b; float * d_c; cudaMalloc(&d_a, N*sizeof(float)); cudaMalloc(&d_b, N*sizeof(float)); cudaMalloc(&d_c, N*sizeof(float)); // Copy cudaMemcpy(d_a, &a[0], N*sizeof(float), cudaMemcpyHostToDevice); cudaMemcpy(d_b, &b[0], N*sizeof(float), cudaMemcpyHostToDevice); // // TODO // Kernel call // // Get result std::vector<float> c_from_gpu(N); cudaMemcpy(&c_from_gpu[0], d_c, N*sizeof(float), cudaMemcpyDeviceToHost); bool ok = compare_vectors(c, c_from_gpu); // Free the device memory cudaFree(d_c); cudaFree(d_b); cudaFree(d_a); if(ok) std::cout << "Results match... It works!" << std::endl; return ok ? 0 : 1; }
97bafee9023970d35ed1043e4aff40b50115363d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> using namespace std; const int N = 16; const int CORES = 16; __global__ void hello(char* s){ if ((s[blockIdx.x] >= 'a')&&(s[blockIdx.x] <= 'z')) { s[blockIdx.x] -= 32; } } int main(int argc, char const *argv[]) { char cpu_string[N] = "hello world!"; char* gpu_string; hipMalloc((void**)&gpu_string, N * sizeof(char)); hipMemcpy(gpu_string, cpu_string, N * sizeof(char), hipMemcpyHostToDevice); hipLaunchKernelGGL(( hello), dim3(CORES),dim3(1), 0, 0, gpu_string); hipMemcpy(cpu_string, gpu_string, N * sizeof(char), hipMemcpyDeviceToHost); hipFree(gpu_string); cout << cpu_string << endl; return 0; }
97bafee9023970d35ed1043e4aff40b50115363d.cu
#include <iostream> using namespace std; const int N = 16; const int CORES = 16; __global__ void hello(char* s){ if ((s[blockIdx.x] >= 'a')&&(s[blockIdx.x] <= 'z')) { s[blockIdx.x] -= 32; } } int main(int argc, char const *argv[]) { char cpu_string[N] = "hello world!"; char* gpu_string; cudaMalloc((void**)&gpu_string, N * sizeof(char)); cudaMemcpy(gpu_string, cpu_string, N * sizeof(char), cudaMemcpyHostToDevice); hello<<<CORES,1>>>(gpu_string); cudaMemcpy(cpu_string, gpu_string, N * sizeof(char), cudaMemcpyDeviceToHost); cudaFree(gpu_string); cout << cpu_string << endl; return 0; }
3902f1b6db3739f7a233b4e30c6203fb48cc8faf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct hipComplex { float r; float i; __device__ hipComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ hipComplex operator*(const hipComplex& a) { return hipComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ hipComplex operator-(const hipComplex& a) { return hipComplex(r-a.r, i-a.i); } __device__ hipComplex operator+(const hipComplex& a) { return hipComplex(r+a.r, i+a.i); } __device__ hipComplex operator/(const hipComplex& a) { return hipComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ hipComplex conj(hipComplex m) { hipComplex out(m.r,-m.i); return out; } __device__ hipComplex nor(hipComplex m) { hipComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(hipComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ hipComplex qpoch(hipComplex a, hipComplex q) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex qp(hipComplex a, hipComplex q, int n) { hipComplex out(1.0,0.0); hipComplex unity(1.0,0.0); int i = 0; hipComplex Q = q; if(q.magnitude2()>1.0) { return hipComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ hipComplex ramphi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ hipComplex rampsi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ hipComplex ramchi(hipComplex q) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex mq = mone*q; return qpoch(mq,q*q); } __device__ hipComplex ramf(hipComplex a, hipComplex b) { hipComplex out(1.0,0.0); hipComplex mone(-1.0,0.0); hipComplex ma = mone*a; hipComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ hipComplex expc(hipComplex m) { hipComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ hipComplex powc(hipComplex ag, hipComplex bg) { hipComplex out(0.0,0.0); hipComplex mesp(0.0,0.0); hipComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ hipComplex cosc(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.5,0.0); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ hipComplex sins(hipComplex m) { hipComplex ai(0.0,1.0); hipComplex ot(0.0,0.5); hipComplex mone(-1.0,0.0); hipComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ hipComplex tans(hipComplex m) { return sins(m)/cosc(m); } __device__ hipComplex moeb(hipComplex t, hipComplex a, hipComplex z) { hipComplex out(0.0,0.0); hipComplex ai(0.0,1.0); hipComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ hipComplex bnewt(hipComplex z) { hipComplex three(3.0,0.0); hipComplex unity(1.0,0.0); hipComplex out(0.0,0.0); hipComplex Z =z; hipComplex L(0.0,0.0); hipComplex R(0.62348980185873359,0.7818314824680298); hipComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ hipComplex they3(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex wahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ hipComplex dwahi(hipComplex z) { int u; hipComplex un(1.0,0.0); hipComplex ne(1.0,0.0); hipComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ hipComplex they3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ hipComplex h3ey3p(hipComplex z, hipComplex q) { int u; hipComplex out(0.0,0.0); hipComplex aut(0.0,0.0); hipComplex enn(-20.0,0.0); hipComplex onn(1.0,0.0); hipComplex dui(0.0,1.0); hipComplex vel(0.0,0.0); hipComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ hipComplex thess(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the1(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ hipComplex the2(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ hipComplex the3(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex the4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ hipComplex qin(hipComplex a, hipComplex q) { hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ hipComplex geffa(hipComplex z, hipComplex q) { hipComplex out(0.0,0.0); hipComplex unity(1.0,0.0); hipComplex wu(0.0,0.0); hipComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ hipComplex thratd(hipComplex z, hipComplex q) { int n; hipComplex fau(4.0,0.0); hipComplex too(2.0,0.0); hipComplex unity(1.0,0.0); hipComplex ennn(1.0,0.0); hipComplex ni(-1.0,0.0); hipComplex noo(-1.0,0.0); hipComplex out(0.0,0.0); hipComplex loo = q; hipComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ hipComplex thess4(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex thesk(hipComplex z, hipComplex q, hipComplex r) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); hipComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ hipComplex thass(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex rogers( hipComplex q) { hipComplex onf(0.2,0.0); hipComplex Q5 = q*q*q*q*q; hipComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ hipComplex flat(hipComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); hipComplex out(m.r/ua,m.i/ua); return out; } __device__ hipComplex eff(hipComplex z, hipComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ hipComplex thete(float R, hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); hipComplex ann(1.0,0.0); hipComplex bnn(1.0,0.0); hipComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ hipComplex thetta(hipComplex tau, hipComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ hipComplex A(0.0,0.0); /* miscellaneous setup */ hipComplex pai(3.14159265353898,0.0); hipComplex ai(0.0,1.0); hipComplex oo(1.0,0.0); hipComplex oot(2.0,0.0); hipComplex nini(9.0,0.0); hipComplex eigh(-18.0,0.0); /* hipComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ hipComplex frann(1.0,0.0); frann = pai * ai * tau ; hipComplex shenn(1.0,0.0); shenn = oot * ai * z; hipComplex plenn(1.0,0.0); hipComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the hipComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ hipComplex mitlef(hipComplex z,hipComplex c) { hipComplex out(0.0,0.0); hipComplex Z(1.0,0.0); hipComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ hipComplex helva(hipComplex z) { hipComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ hipComplex hilva(hipComplex z) { hipComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ hipComplex hinva(hipComplex z) { hipComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ hipComplex henga(hipComplex z) { hipComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ hipComplex holva(hipComplex z) { hipComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ hipComplex arago(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex irigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ hipComplex urigo(hipComplex z, hipComplex q) { int v; hipComplex unity(1.0,0.0); hipComplex out(1.0,0.0); hipComplex tw(2.0,0.0); hipComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * (hilva(z)*helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; hipComplex ip(pi,0.0); const float scale = 10.0; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); hipComplex effx(fx,0.0); hipComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); hipComplex mouse(LA,LB); hipComplex moux(LA,0.0); hipComplex mouy(0.0,LB); hipComplex q(fx,fy); /* hipComplex tik(sin(ticks/40.0f),0.0);*/ /* hipComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); hipComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); hipComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ hipComplex fixon(.029348,.828934); hipComplex faxon(.029348,-.828934); hipComplex unity(1.0,0.0); hipComplex ai(0.0,1.0); hipComplex aon = expc(ai*moux); hipComplex uon= expc(mouy); hipComplex flurn(0.0,0.0); hipComplex accume(1.0,0.0); hipComplex eccume(0.0,0.0); hipComplex rhun(1.02871376821872462237195122725097462534904479,0.0); hipComplex cue = q; hipComplex lam(0.73736887807831963, -0.67549029426152396); hipComplex due(3.0,0.0); hipComplex tir(2.0,0.0); hipComplex selga(3.5,0.0); hipComplex vro(-1.0,0.0); hipComplex tle(1.0,0.0); hipComplex sle(4.0,0.0); hipComplex cherra(0.62348980185873359, 0.7818314824680298); hipComplex lerra = cherra*cherra; hipComplex ferra = lerra * cherra; hipComplex terra = ferra * cherra; hipComplex zerra = terra * cherra; hipComplex nerra = zerra * cherra; hipComplex vlarv(1/3.0,0.0); hipComplex sugna(0.70710678118654757, 0.70710678118654746); hipComplex regna(0.99966573338968745, 0.025853848581176047); hipComplex spa(sqrtf(2.0),0.0); hipComplex spb(sqrtf(3.0),0.0); hipComplex spc(sqrtf(4.0),0.0); hipComplex spd(sqrtf(5.0),0.0); hipComplex mrun(1/2.0,0.0); hipComplex gloon (4.0,0.0); hipComplex plenod(-.01,0.0); hipComplex nue = cue; hipComplex bor(-10.0,0.0); hipComplex nat(0.0,-10.0); hipComplex rhus(1.0,0.0); hipComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<10;v++) { cue = cue - urigo(cue,aon*fixon)*urigo(cue,uon*faxon); accume = accume * uon*urigo(cue,aon*faxon); } cue = accume; double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); hipLaunchKernelGGL(( distanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
3902f1b6db3739f7a233b4e30c6203fb48cc8faf.cu
#include "kernel.h" #define TX 32 #define TY 32 #define DIM 2100 struct cuComplex { float r; float i; __device__ cuComplex( float a, float b ) : r(a), i(b) {} __device__ float magnitude2( void ) { return r * r + i * i; } __device__ cuComplex operator*(const cuComplex& a) { return cuComplex(r*a.r - i*a.i, i*a.r + r*a.i); } __device__ cuComplex operator-(const cuComplex& a) { return cuComplex(r-a.r, i-a.i); } __device__ cuComplex operator+(const cuComplex& a) { return cuComplex(r+a.r, i+a.i); } __device__ cuComplex operator/(const cuComplex& a) { return cuComplex((r*a.r + i*a.i)/(a.r*a.r + a.i*a.i), (i*a.r - r*a.i)/(a.r*a.r + a.i*a.i)); } }; __device__ cuComplex conj(cuComplex m) { cuComplex out(m.r,-m.i); return out; } __device__ cuComplex nor(cuComplex m) { cuComplex out(m.r*m.r+m.i*m.i,0.0); return out; } __device__ float norg(cuComplex m) { return sqrtf(m.r*m.r+m.i*m.i); } __device__ cuComplex qpoch(cuComplex a, cuComplex q) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<80;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex qp(cuComplex a, cuComplex q, int n) { cuComplex out(1.0,0.0); cuComplex unity(1.0,0.0); int i = 0; cuComplex Q = q; if(q.magnitude2()>1.0) { return cuComplex(0.0,0.0); } // We want to formally match the definition of a q-pochhammer symbol. for(i=1;i<n;i++) { out = out * (unity - a*Q); Q = q * Q; } return out; } __device__ cuComplex ramphi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,mq)/qpoch(q,mq); } __device__ cuComplex rampsi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q)*qpoch(q*q,q*q); } __device__ cuComplex ramchi(cuComplex q) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex mq = mone*q; return qpoch(mq,q*q); } __device__ cuComplex ramf(cuComplex a, cuComplex b) { cuComplex out(1.0,0.0); cuComplex mone(-1.0,0.0); cuComplex ma = mone*a; cuComplex mb = mone*b; return qpoch(ma,a*b)*qpoch(mb,a*b)*qpoch(a*b,a*b); } // complex exponential __device__ cuComplex expc(cuComplex m) { cuComplex out(expf(m.r) * cosf(m.i),expf(m.r) * sinf(m.i)); return out; } __device__ cuComplex powc(cuComplex ag, cuComplex bg) { cuComplex out(0.0,0.0); cuComplex mesp(0.0,0.0); cuComplex frim(0.0,0.0); double radiu, thet; /* get the proper polar form of the complex number */ radiu = sqrtf(ag.r*ag.r + ag.i*ag.i); thet = atan2f(ag.i,ag.r); /* mesp gives R^(c+di) */ mesp.r = powf(radiu,bg.r)*cosf(bg.i*logf(radiu)); mesp.i = powf(radiu,bg.r)*sinf(bg.i*logf(radiu)); /* frim gives e^(i theta (c+di)) */ /* now since we already have the machinery for performing complex exponentiation (just exp), we can just call that here */ frim.r = -1.0 * bg.i * thet; frim.i = bg.r * thet; frim = expc(frim); out = mesp*frim; return out; } // cosine (nothing algorithmically clean) __device__ cuComplex cosc(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.5,0.0); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) + expc(mone*m*ai)); return out; } __device__ cuComplex sins(cuComplex m) { cuComplex ai(0.0,1.0); cuComplex ot(0.0,0.5); cuComplex mone(-1.0,0.0); cuComplex out = ot*(expc(m*ai) - expc(mone*m*ai)); return out; } __device__ cuComplex tans(cuComplex m) { return sins(m)/cosc(m); } __device__ cuComplex moeb(cuComplex t, cuComplex a, cuComplex z) { cuComplex out(0.0,0.0); cuComplex ai(0.0,1.0); cuComplex unity(1.0,0.0); out = expc(ai*t) * (z-a)/(unity-conj(a)*z); return out; } __device__ cuComplex bnewt(cuComplex z) { cuComplex three(3.0,0.0); cuComplex unity(1.0,0.0); cuComplex out(0.0,0.0); cuComplex Z =z; cuComplex L(0.0,0.0); cuComplex R(0.62348980185873359,0.7818314824680298); cuComplex v(0.62348980185873359,0.7818314824680298); int i; for(i=0;i<100;i++) { L = sins(expc(Z)-cosc(Z))-Z; out = out + v*L; v = R * v; Z = Z - L/((expc(Z)+sins(Z))*cosc(expc(Z)-cosc(Z))-unity); } return out; } __device__ cuComplex they3(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + powc(q,enn*enn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex wahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne); ne = ne + un; } out = out + un; return out; } __device__ cuComplex dwahi(cuComplex z) { int u; cuComplex un(1.0,0.0); cuComplex ne(1.0,0.0); cuComplex out(0.0,0.0); for(u=1;u<40;u++) { out = out + powc(z/ne,ne-un); ne = ne + un; } return out; } __device__ cuComplex they3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); for(u=-20;u<20;u++) { out = out + (enn*enn)*powc(q,enn*enn-onn)*expc(dui*enn*z); enn = enn + onn; } return out; } __device__ cuComplex h3ey3p(cuComplex z, cuComplex q) { int u; cuComplex out(0.0,0.0); cuComplex aut(0.0,0.0); cuComplex enn(-20.0,0.0); cuComplex onn(1.0,0.0); cuComplex dui(0.0,1.0); cuComplex vel(0.0,0.0); cuComplex rav(0.0,0.0); for(u=-40;u<40;u++) { vel = expc(dui*enn*z); rav = powc(q,enn*enn); aut = aut + (enn*enn)*rav/q*vel; out = out + rav*vel; enn = enn + onn; } return out/aut; } __device__ cuComplex thess(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the1(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*sins(z); } __device__ cuComplex the2(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex rt(0.25,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return tw*out*powc(q,rt)*cosc(z); } __device__ cuComplex the3(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex the4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } /* routine to generate q-integers */ __device__ cuComplex qin(cuComplex a, cuComplex q) { cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); out = (unity - powc(q, a))/(unity-q); return out; } /* generating function for n^2 */ __device__ cuComplex geffa(cuComplex z, cuComplex q) { cuComplex out(0.0,0.0); cuComplex unity(1.0,0.0); cuComplex wu(0.0,0.0); cuComplex Z=unity; int v; for(v=0;v<20;v++) { out = out + qin(wu*wu,q)* Z; wu = wu + unity; Z = z * Z; } return out; } __device__ cuComplex thratd(cuComplex z, cuComplex q) { int n; cuComplex fau(4.0,0.0); cuComplex too(2.0,0.0); cuComplex unity(1.0,0.0); cuComplex ennn(1.0,0.0); cuComplex ni(-1.0,0.0); cuComplex noo(-1.0,0.0); cuComplex out(0.0,0.0); cuComplex loo = q; cuComplex qoo =q*q; for(n=0;n<80;n++) { out = out + noo*(loo/(unity-qoo))*sins(too*ennn*z); qoo = qoo * q*q; loo = loo * q; ennn = ennn +unity; noo = ni * noo; } return out*fau; } __device__ cuComplex thess4(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity - tw * qoo/q * cosc(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex thesk(cuComplex z, cuComplex q, cuComplex r) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); cuComplex roo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; roo = roo * r * r ; out = out * (unity - qoo) * (unity + tw * qoo/q * cosc(tw*z) + roo*roo/(r*r)); } return out; } __device__ cuComplex thass(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<20;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * sins(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex rogers( cuComplex q) { cuComplex onf(0.2,0.0); cuComplex Q5 = q*q*q*q*q; cuComplex out = powc(q,onf)* qpoch(q,Q5) * qpoch(q*q*q*q,Q5)/ (qpoch(q*q,Q5)*qpoch(q*q*q,Q5)); return out; } __device__ cuComplex flat(cuComplex m) { float ua = sqrtf(m.r*m.r + m.i*m.i); cuComplex out(m.r/ua,m.i/ua); return out; } __device__ cuComplex eff(cuComplex z, cuComplex lambda) { return z*z*z*z+ lambda/(z*z*z*z); } __device__ cuComplex thete(float R, cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); cuComplex ann(1.0,0.0); cuComplex bnn(1.0,0.0); cuComplex scrunn(1.0,0.0); float ca, cb,cc; int a, b; for(a=-10;a<10;a++) { ann.r = a; for(b=-10;b<10;b++) { bnn.r = b; if(((a+b)%2)==0) { scrunn.r = a*a + b*b; A = A + expc(frann* scrunn) * expc(shenn* (ann+bnn)); } else { ca = 5.0 + a*a + b*b; cb = 2*(a * cos(R)- b * sin(R)); cc = 4*(b * cos(R)+a*sin(R)); scrunn.r = ca + cb + cc; A = A + expc(frann*scrunn)*expc(shenn*(ann+bnn)); } } } return A; } __device__ cuComplex thetta(cuComplex tau, cuComplex z) { /* note that as I'm not immediately doing this on the unit circle, as the real action is considered to happen on the z-plane, we don't yet need to fret about whether I'm looking at things in terms of tau or in terms of q, next revision */ /* set accumulant to zero */ cuComplex A(0.0,0.0); /* miscellaneous setup */ cuComplex pai(3.14159265353898,0.0); cuComplex ai(0.0,1.0); cuComplex oo(1.0,0.0); cuComplex oot(2.0,0.0); cuComplex nini(9.0,0.0); cuComplex eigh(-18.0,0.0); /* cuComplex arr(cos(2*3.1415926535897f*R/2048.0),0.0) */ cuComplex frann(1.0,0.0); frann = pai * ai * tau ; cuComplex shenn(1.0,0.0); shenn = oot * ai * z; cuComplex plenn(1.0,0.0); cuComplex enn(1.0,0.0); int n; for(n=-10;n<10;n++) { enn.r = n; plenn = enn * enn; /* this get the cuComplex out of the event loop */ A = A + expc(frann* plenn) * expc(shenn* enn); } return A; } __device__ cuComplex mitlef(cuComplex z,cuComplex c) { cuComplex out(0.0,0.0); cuComplex Z(1.0,0.0); cuComplex frove(0.0,0.0); int v; for(v=0;v<20;v++) { frove.r = tgammaf(c.r*v+c.i); out = out + Z/frove; Z = Z * z; } return out; } __device__ cuComplex helva(cuComplex z) { cuComplex out(j0f(z.r),j1f(z.i)); return out; } __device__ cuComplex hilva(cuComplex z) { cuComplex out(j1f(z.r),j0f(z.i)); return out; } __device__ cuComplex hinva(cuComplex z) { cuComplex out(j1f(z.r),j1f(z.i)); return out; } __device__ cuComplex henga(cuComplex z) { cuComplex out(acoshf(z.r),asinhf(z.i)); return out; } __device__ cuComplex holva(cuComplex z) { cuComplex out(y0f(z.r),y1f(z.i)); return out; } __device__ cuComplex arago(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * hinva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex irigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * holva(tw*z) + qoo*qoo/(q*q)); } return out; } __device__ cuComplex urigo(cuComplex z, cuComplex q) { int v; cuComplex unity(1.0,0.0); cuComplex out(1.0,0.0); cuComplex tw(2.0,0.0); cuComplex qoo(1.0,0.0); for(v=0;v<10;v++) { qoo = qoo * q * q; out = out * (unity - qoo) * (unity + tw * qoo/q * (hilva(z)*helva(q*z)) + qoo*qoo/(q*q)); } return out; } __device__ unsigned char clip(int n) { return n > 255 ? 255 : (n < 0 ? 0 : n); } __global__ void distanceKernel(uchar4 *d_out, int w, int h, int2 pos) { const int c = blockIdx.x*blockDim.x + threadIdx.x; const int r= blockIdx.y*blockDim.y + threadIdx.y; const int i = c + r*w; // 1D indexing float pi = 3.1415926535898; cuComplex ip(pi,0.0); const float scale = 10.0; float fx = -scale * (float)(DIM/2 - c)/(DIM/2); float fy = scale * (float)(DIM/2 - r)/(DIM/2); cuComplex effx(fx,0.0); cuComplex effy(fy,0.0); float LA = -scale * (float)(DIM/2 - pos.x)/(DIM/2); float LB = scale * (float)(DIM/2 - pos.y)/(DIM/2); cuComplex mouse(LA,LB); cuComplex moux(LA,0.0); cuComplex mouy(0.0,LB); cuComplex q(fx,fy); /* cuComplex tik(sin(ticks/40.0f),0.0);*/ /* cuComplex uon(cosf(-2*pi*ticks/16384.0),sinf(-2*pi*ticks/16384.0)); cuComplex aon(cosf(2.6457513110645912*2*pi*ticks/1024),sinf(2.645751311064591*2*pi*ticks/1024)); cuComplex eon(cosf(-2.6457513110645912*2*pi*ticks/1024.0),sinf(2.645751311064591*2*pi*ticks/1024.0));*/ cuComplex fixon(.029348,.828934); cuComplex faxon(.029348,-.828934); cuComplex unity(1.0,0.0); cuComplex ai(0.0,1.0); cuComplex aon = expc(ai*moux); cuComplex uon= expc(mouy); cuComplex flurn(0.0,0.0); cuComplex accume(1.0,0.0); cuComplex eccume(0.0,0.0); cuComplex rhun(1.02871376821872462237195122725097462534904479,0.0); cuComplex cue = q; cuComplex lam(0.73736887807831963, -0.67549029426152396); cuComplex due(3.0,0.0); cuComplex tir(2.0,0.0); cuComplex selga(3.5,0.0); cuComplex vro(-1.0,0.0); cuComplex tle(1.0,0.0); cuComplex sle(4.0,0.0); cuComplex cherra(0.62348980185873359, 0.7818314824680298); cuComplex lerra = cherra*cherra; cuComplex ferra = lerra * cherra; cuComplex terra = ferra * cherra; cuComplex zerra = terra * cherra; cuComplex nerra = zerra * cherra; cuComplex vlarv(1/3.0,0.0); cuComplex sugna(0.70710678118654757, 0.70710678118654746); cuComplex regna(0.99966573338968745, 0.025853848581176047); cuComplex spa(sqrtf(2.0),0.0); cuComplex spb(sqrtf(3.0),0.0); cuComplex spc(sqrtf(4.0),0.0); cuComplex spd(sqrtf(5.0),0.0); cuComplex mrun(1/2.0,0.0); cuComplex gloon (4.0,0.0); cuComplex plenod(-.01,0.0); cuComplex nue = cue; cuComplex bor(-10.0,0.0); cuComplex nat(0.0,-10.0); cuComplex rhus(1.0,0.0); cuComplex D(0.739085133215160641655312087674,0.0); /* if ((c >= w) || (r >= h)) return; // Check if within image bounds const int i = c + r*w; // 1D indexing const int dist = sqrtf((c - pos.x)*(c - pos.x) + (r - pos.y)*(r - pos.y)); const unsigned char intensity = clip(255 - dist);*/ // theta function varying on constant // cue =thess(cue,fixon*mouse); int v=1; int axa=-10; /*while((v<100)&&norg(cue)<2.0) { cue = cue*(cue-mouy)*(cue-moux) -cue * q; v++; }*/ for(v=0;v<10;v++) { cue = cue - urigo(cue,aon*fixon)*urigo(cue,uon*faxon); accume = accume * uon*urigo(cue,aon*faxon); } cue = accume; double tha; tha = ((atan2(cue.i,cue.r) - pi)/(2.0*pi)); d_out[i].x = (unsigned char) (255.0*pow(sin(pi*tha),2)); d_out[i].y = (unsigned char) (255.0*pow(sin(pi*tha+pi/3),2)); d_out[i].z = (unsigned char) (255.0*pow(sin(pi*tha+2*pi/3),2)); d_out[i].w = 255; } void kernelLauncher(uchar4 *d_out, int w, int h, int2 pos) { const dim3 blockSize(TX, TY); const dim3 gridSize = dim3((w + TX - 1)/TX, (h + TY - 1)/TY); distanceKernel<<<gridSize, blockSize>>>(d_out, w, h, pos); } /*for(v=1;v<5;v++) { cue = cue - cue * (expc(unity-cue/moux)+expc(cue-unity/mouy))/((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); accume = accume + ((vlarv-unity/moux )*(expc(unity-cue/moux))-expc(cue-unity/mouy)); } cue = accume;*/ /*cue = ramchi(moeb(unity,uon*fixon,q))*rampsi(moeb(unity,uon*fixon,q)); rhus = ramchi(uon/moeb(unity,uon*faxon,unity/q))*ramphi(uon/moeb(unity,uon*faxon,unity/q)); cue = rhus+cue; cue = cosc(unity/(unity-uon*cue))*rampsi(moeb(unity,uon*fixon,q));*/ /*for(v=0;v<60;v++){ cue = moeb(aon,fixon,cue) - aon/((expc(uon*cue-sins(cue))-cue)/((aon+cosc(cue)) * expc(uon*cue-sins(cue))-aon)); accume = accume *(unity - (expc(aon*moeb(uon,faxon,cue))-sins(moeb(aon,fixon,cue))-cue)); } cue = accume;*/ /* One for (x+d)/cos(d) -cos(x)/d Tungilipa D = cos(D) cos(sqrt(x*D))/D -1 = 0.0 The other for cos(x)-x Eripgrunna */
ab9f6dc7d839962dccf2e19af6f507662339726f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) #define JITIFY_ENABLE_EMBEDDED_FILES 1 #endif #define JITIFY_PRINT_INSTANTIATION 1 #define JITIFY_PRINT_SOURCE 1 #define JITIFY_PRINT_LOG 1 #define JITIFY_PRINT_PTX 1 #define JITIFY_PRINT_LINKER_LOG 1 #define JITIFY_PRINT_LAUNCH 1 #define JITIFY_PRINT_HEADER_PATHS 1 #include "jitify.hpp" #include "example_headers/class_arg_kernel.cuh" #include "example_headers/my_header1.cuh.jit" #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) JITIFY_INCLUDE_EMBEDDED_FILE(example_headers_my_header2_cuh); #endif #include "gtest/gtest.h" #include <cstdio> #include <fstream> #include <iostream> #include <memory> #define CHECK_CUDA(call) \ do { \ hipError_t status = call; \ if (status != hipSuccess) { \ const char* str; \ hipGetErrorName(status, &str); \ std::cout << "(CUDA) returned " << str; \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, hipSuccess); \ } \ } while (0) #define CHECK_CUDART(call) \ do { \ hipError_t status = call; \ if (status != hipSuccess) { \ std::cout << "(CUDART) returned " << hipGetErrorString(status); \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, hipSuccess); \ } \ } while (0) std::istream* file_callback(std::string filename, std::iostream& tmp_stream) { // User returns NULL or pointer to stream containing file source // Note: tmp_stream is provided for convenience if (filename == "example_headers/my_header4.cuh") { tmp_stream << "#pragma once\n" "template<typename T>\n" "T pointless_func(T x) {\n" " return x;\n" "}\n"; return &tmp_stream; } else { // Find this file through other mechanisms return 0; } } static const char* const simple_program_source = "my_program\n" "template<int N, typename T>\n" "__global__\n" "void my_kernel(T* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " T data0 = data[0];\n" " for( int i=0; i<N-1; ++i ) {\n" " data[0] *= data0;\n" " }\n" "}\n"; TEST(JitifyTest, Simple) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(simple_program_source); typedef float T; T* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); T h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(hipFree(d_data)); } TEST(JitifyTest, Simple_experimental) { std::vector<std::string> opts; jitify::experimental::Program program_orig(simple_program_source, {}, opts); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst_orig = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); auto kernel_inst = jitify::experimental::KernelInstantiation::deserialize( kernel_inst_orig.serialize()); T h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(hipMemcpy(d_data, &h_data, sizeof(T), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(hipMemcpy(&h_data, d_data, sizeof(T), hipMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(hipFree(d_data)); } static const char* const multiple_kernels_program_source = "my_program1\n" "#include \"example_headers/my_header1.cuh\"\n" "#include \"example_headers/my_header2.cuh\"\n" "#include \"example_headers/my_header3.cuh\"\n" "#include \"example_headers/my_header4.cuh\"\n" "\n" "__global__\n" "void my_kernel1(float const* indata, float* outdata) {\n" " outdata[0] = indata[0] + 1;\n" " outdata[0] -= 1;\n" "}\n" "\n" "template<int C, typename T>\n" "__global__\n" "void my_kernel2(float const* indata, float* outdata) {\n" " for( int i=0; i<C; ++i ) {\n" " outdata[0] = " "pointless_func(identity(sqrt(square(negate(indata[0])))));\n" " }\n" "}\n"; TEST(JitifyTest, MultipleKernels) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; thread_local static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); typedef float T; T* indata; T* outdata; CHECK_CUDART(hipMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(hipMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(hipMemcpy(indata, &inval, sizeof(T), hipMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent and will come from cache after the 1st CHECK_CUDA((program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .configure(grid, block) .launch(indata, outdata))); CHECK_CUDA(program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(hipMemcpy(&outval, outdata, sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(outdata)); CHECK_CUDART(hipFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } TEST(JitifyTest, MultipleKernels_experimental) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; jitify::experimental::Program program_orig( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* indata; T* outdata; CHECK_CUDART(hipMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(hipMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(hipMemcpy(indata, &inval, sizeof(T), hipMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent. CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .serialize()) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(hipMemcpy(&outval, outdata, sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(outdata)); CHECK_CUDART(hipFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } static const char* const constmem_program_source = "constmem_program\n" "#pragma once\n" "\n" "__constant__ int a;\n" "__device__ int d;\n" "namespace b { __constant__ int a; __device__ int d; }\n" "namespace c { namespace b { __constant__ int a; __device__ int d; } }\n" "namespace x { __constant__ int a = 3; __device__ int d = 7; }\n" "namespace y { __constant__ int a[] = {4, 5}; __device__ int d[] = {8, 9}; " "}\n" "\n" "__global__ void constant_test(int *x) {\n" " x[0] = a;\n" " x[1] = b::a;\n" " x[2] = c::b::a;\n" " x[3] = d;\n" " x[4] = b::d;\n" " x[5] = c::b::d;\n" " x[6] = x::a;\n" " x[7] = x::d;\n" " x[8] = y::a[0];\n" " x[9] = y::a[1];\n" " x[10] = y::d[0];\n" " x[11] = y::d[1];\n" "}\n"; TEST(JitifyTest, ConstantMemory) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; constexpr int n_const = 12; int* outdata; CHECK_CUDART(hipMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using diffrent namespaces jitify::Program program = kernel_cache.program( constmem_program_source, 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test").instantiate(); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(hipDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::Program program = kernel_cache.program("example_headers/constant_header.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test2").instantiate(); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(hipFree(outdata)); } TEST(JitifyTest, ConstantMemory_experimental) { using jitify::reflection::Type; constexpr int n_const = 12; int* outdata; CHECK_CUDART(hipMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using different namespaces jitify::experimental::Program program_orig( constmem_program_source, {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test").instantiate().serialize()); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(hipDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(hipDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::experimental::Program program_orig( "example_headers/constant_header.cuh", {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test2").instantiate().serialize()); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( hipMemcpy(outval, outdata, sizeof(outval), hipMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(hipFree(outdata)); } TEST(JitifyTest, ParallelFor) { int n = 10000; typedef float T; T* d_out; CHECK_CUDART(hipMalloc((void**)&d_out, n * sizeof(T))); T val = 3.14159f; jitify::ExecutionPolicy policy(jitify::DEVICE); auto lambda = JITIFY_LAMBDA((d_out, val), d_out[i] = (float)i * val); CHECK_CUDA(jitify::parallel_for(policy, 0, n, lambda)); std::vector<T> h_out(n); CHECK_CUDART( hipMemcpy(&h_out[0], d_out, n * sizeof(T), hipMemcpyDeviceToHost)); CHECK_CUDART(hipFree(d_out)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_out[i], (T)i * val); } } TEST(JitifyTest, InvalidPrograms) { jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program("empty_program\n"); // OK EXPECT_THROW(auto program_v2 = kernel_cache.program("missing_filename"), std::runtime_error); EXPECT_THROW( auto program_v3 = kernel_cache.program("bad_program\nNOT CUDA C!"), std::runtime_error); jitify::experimental::Program program_v4("empty_program\n"); // OK EXPECT_THROW(jitify::experimental::Program program_v5("missing_filename"), std::runtime_error); EXPECT_THROW( jitify::experimental::Program program_v6("bad_program\nNOT CUDA C!"), std::runtime_error); } static const char* const pragma_repl_program_source = R"(my_program template <int N, typename T> __global__ void my_kernel(T* data) { if (blockIdx.x != 0 || threadIdx.x != 0) return; T data0 = data[0]; #pragma unroll for (int i = 0; i < N - 1; ++i) data[0] *= data0; #pragma unroll 1 for (int i = 0; i < N - 1; ++i) data[0] *= data0; #pragma unroll 1 // Make sure parsing works with comments for (int i = 0; i < N - 1; ++i) data[0] *= data0; // TODO: Add support for block comments. //#pragma unroll 1 /* Make sure parsing works with comments */ //for (int i = 0; i < N - 1; ++i) data[0] *= data0; } )"; TEST(JitifyTest, PragmaReplacement) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(pragma_repl_program_source); typedef float T; T* d_data = nullptr; using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); } // TODO: Expand this to include more Thrust code. static const char* const thrust_program_source = "thrust_program\n" "#include <thrust/iterator/counting_iterator.h>\n" "__global__ void my_kernel(thrust::counting_iterator<int> begin,\n" " thrust::counting_iterator<int> end) {\n" "}\n"; TEST(JitifyTest, ThrustHeaders) { // Checks that basic Thrust headers can be compiled. jitify::JitCache kernel_cache; #if TORCH_HIP_VERSION < 11000 const char* cppstd = "-std=c++98"; #else const char* cppstd = "-std=c++11"; #endif auto program_v1 = kernel_cache.program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); auto program_v2 = jitify::experimental::Program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); } static const char* const cub_program_source = "cub_program\n" "#include <hipcub/hipcub.hpp>\n" "#include <cub/block/block_radix_sort.cuh>\n" "#include <hipcub/hipcub.hpp>\n" "#include <cub/block/block_store.cuh>\n" "\n" "template<int BLOCK_SIZE, int PER_THREAD>\n" "__global__ void my_kernel(float* data) {\n" " typedef cub::BlockLoad<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_LOAD_VECTORIZE> BlockLoad;\n" " typedef cub::BlockRadixSort<float, BLOCK_SIZE, PER_THREAD>\n" " BlockSort;\n" " typedef hipcub::BlockReduce<float, BLOCK_SIZE> BlockReduce;\n" " typedef cub::BlockStore<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_STORE_VECTORIZE> BlockStore;\n" " __shared__ union {\n" " typename BlockLoad::TempStorage load;\n" " typename BlockSort::TempStorage sort;\n" " typename BlockReduce::TempStorage reduce;\n" " typename BlockStore::TempStorage store;\n" " float sum;\n" " } temp_storage;\n" " float thread_data[PER_THREAD];\n" " BlockLoad(temp_storage.load).Load(data, thread_data);\n" " __syncthreads();\n" " BlockSort(temp_storage.sort).Sort(thread_data);\n" " __syncthreads();\n" " float sum = BlockReduce(temp_storage.reduce).Sum(thread_data);\n" " __syncthreads();\n" " if (threadIdx.x == 0) {\n" " temp_storage.sum = sum;\n" " }\n" " __syncthreads();\n" " sum = temp_storage.sum;\n" " #pragma unroll\n" " for (int i = 0; i < PER_THREAD; ++i) {\n" " thread_data[i] *= 1.f / sum;\n" " }\n" " __syncthreads();\n" " BlockStore(temp_storage.store).Store(data, thread_data);\n" "}\n"; TEST(JitifyTest, CubBlockPrimitives) { int block_size = 64; int per_thread = 4; int n = block_size * per_thread; std::vector<float> h_data(n); float sum = 0; for (int i = 0; i < n; ++i) { // Start with values sorted in reverse. h_data[i] = (float)(n - 1 - i); sum += h_data[i]; } // Shuffle the values a bit. std::swap(h_data[3], h_data[7]); std::swap(h_data[10], h_data[20]); std::vector<float> h_expected(n); for (int i = 0; i < n; ++i) { // Expected sorted and normalized. h_expected[i] = (float)i / sum; } std::vector<float> h_result(n); float* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, n * sizeof(float))); jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}); CHECK_CUDART(hipMemcpy(d_data, h_data.data(), n * sizeof(float), hipMemcpyHostToDevice)); CHECK_CUDA(program_v1.kernel("my_kernel") .instantiate(block_size, per_thread) .configure(1, block_size) .launch(d_data)); CHECK_CUDART(hipMemcpy(h_result.data(), d_data, n * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } auto program_v2 = jitify::experimental::Program::deserialize( jitify::experimental::Program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}) .serialize()); auto kernel_inst_v2 = jitify::experimental::KernelInstantiation::deserialize( program_v2.kernel("my_kernel") .instantiate(block_size, per_thread) .serialize()); CHECK_CUDART(hipMemcpy(d_data, h_data.data(), n * sizeof(float), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, block_size).launch(d_data)); CHECK_CUDART(hipMemcpy(h_result.data(), d_data, n * sizeof(float), hipMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } CHECK_CUDART(hipFree(d_data)); } static const char* const unused_globals_source = "unused_globals_program\n" "struct Foo { static const int value = 7; };\n" "struct Bar { int a; double b; };\n" "__device__ float used_scalar;\n" "__device__ float used_array[2];\n" "__device__ Bar used_struct;\n" "__device__ float unused_scalar;\n" "__device__ float unused_array[3];\n" "__device__ Bar unused_struct;\n" "__device__ float reg, ret, bra;\n" // Tricky names "__global__ void foo_kernel(int* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " used_scalar = 1.f;\n" " used_array[1] = 2.f;\n" " used_struct.b = 3.f;\n" " __syncthreads();\n" " *data += Foo::value + used_scalar + used_array[1] + used_struct.b;\n" " printf(\"*data = %i\\n\", *data);\n" // Produces global symbols named // $str "}\n"; TEST(JitifyTest, RemoveUnusedGlobals) { hipFree(0); auto program_v2 = jitify::experimental::Program( unused_globals_source, {}, // Note: Flag added twice to test handling of repeats. {"-remove-unused-globals", "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("foo_kernel").instantiate(); std::string ptx = kernel_inst_v2.ptx(); EXPECT_TRUE(ptx.find(".global .align 4 .f32 used_scalar;") != std::string::npos); // Note: PTX represents arrays and structs as .b8 instead of the actual type. EXPECT_TRUE(ptx.find(".global .align 4 .b8 used_array[8];") != std::string::npos); EXPECT_TRUE(ptx.find(".global .align 8 .b8 used_struct[16];") != std::string::npos); EXPECT_FALSE(ptx.find("_ZN3Foo5valueE") != std::string::npos); EXPECT_FALSE(ptx.find("unused_scalar;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_array;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_struct;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 reg;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 ret;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 bra;") != std::string::npos); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 16); CHECK_CUDART(hipFree(d_data)); } static const char* const curand_program_source = "curand_program\n" "#include <hiprand/hiprand_kernel.h>\n" "__global__ void my_kernel() {}\n" "\n"; TEST(JitifyTest, CuRandKernel) { auto program_v2 = jitify::experimental::Program( curand_program_source, {}, // Note: --remove-unused-globals is added to remove huge precomputed // arrays that come from CURAND. {"-I" CUDA_INC_DIR, "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); // TODO: Expand this test to actually call hiprand kernels and check outputs. } static const char* const linktest_program1_source = "linktest_program1\n" "__constant__ int c = 5;\n" "__device__ int d = 7;\n" "__device__ int f(int i) { return i + 11; }\n" "\n"; static const char* const linktest_program2_source = "linktest_program2\n" "extern __constant__ int c;\n" "extern __device__ int d;\n" "extern __device__ int f(int);\n" "__global__ void my_kernel(int* data) {\n" " *data = f(*data + c + d);\n" "}\n" "\n"; TEST(JitifyTest, LinkExternalFiles) { hipFree(0); // Ensure temporary file is deleted at the end. std::unique_ptr<const char, int (*)(const char*)> ptx_filename( "example_headers/linktest.ptx", std::remove); { std::ofstream ptx_file(ptx_filename.get()); ptx_file.exceptions(std::ofstream::failbit | std::ofstream::badbit); ptx_file << jitify::experimental::Program(linktest_program1_source, {}, {"-rdc=true"}) .kernel("") .instantiate() .ptx(); } auto program_v2 = jitify::experimental::Program( linktest_program2_source, {}, {"-rdc=true", "-Lexample_headers", "-llinktest.ptx"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 26); CHECK_CUDART(hipFree(d_data)); } namespace a { __host__ __device__ int external_device_func(int i) { return i + 1; } } // namespace a static const char* const selflink_program_source = "selflink_program\n" "namespace a {\n" "extern __device__ int external_device_func(int);\n" "}\n" "__global__ void my_kernel(int* data) {\n" " *data = a::external_device_func(*data);\n" "}\n" "\n"; TEST(JitifyTest, LinkCurrentExecutable) { hipFree(0); using namespace jitify::experimental; auto program = Program(selflink_program_source, {}, {"-l."}); auto kernel_inst = program.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( hipMemcpy(d_data, &h_data, sizeof(int), hipMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(1, 1).launch(d_data)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(h_data, 4); CHECK_CUDART(hipFree(d_data)); } static const char* const reflection_program_source = "reflection_program\n" "struct Base { virtual ~Base() {} };\n" "template <typename T>\n" "struct Derived : public Base {};\n" "template<typename T>\n" "__global__ void type_kernel() {}\n" "template<unsigned short N>\n" "__global__ void nontype_kernel() {}\n" "\n"; struct Base { virtual ~Base() {} }; template <typename T> struct Derived : public Base {}; TEST(JitifyTest, Reflection) { hipFree(0); using namespace jitify::experimental; using jitify::reflection::instance_of; Program program(reflection_program_source); auto type_kernel = program.kernel("type_kernel"); #define JITIFY_TYPE_REFLECTION_TEST(T) \ EXPECT_EQ(type_kernel.instantiate<T>().mangled_name(), \ type_kernel.instantiate({#T}).mangled_name()) JITIFY_TYPE_REFLECTION_TEST(const volatile float); JITIFY_TYPE_REFLECTION_TEST(const volatile float*); JITIFY_TYPE_REFLECTION_TEST(const volatile float&); JITIFY_TYPE_REFLECTION_TEST(Base * (const volatile float)); JITIFY_TYPE_REFLECTION_TEST(const volatile float[4]); #undef JITIFY_TYPE_REFLECTION_TEST typedef Derived<float> derived_type; const Base& base = derived_type(); EXPECT_EQ(type_kernel.instantiate(instance_of(base)).mangled_name(), type_kernel.instantiate<derived_type>().mangled_name()); auto nontype_kernel = program.kernel("nontype_kernel"); #define JITIFY_NONTYPE_REFLECTION_TEST(N) \ EXPECT_EQ(nontype_kernel.instantiate(N).mangled_name(), \ nontype_kernel.instantiate({#N}).mangled_name()) JITIFY_NONTYPE_REFLECTION_TEST(7); JITIFY_NONTYPE_REFLECTION_TEST('J'); #undef JITIFY_NONTYPE_REFLECTION_TEST } static const char* const builtin_numeric_limits_program_source = "builtin_numeric_limits_program\n" "#include <limits>\n" "struct MyType {};\n" "namespace std {\n" "template<> class numeric_limits<MyType> {\n" " public:\n" " static MyType min() { return {}; }\n" " static MyType max() { return {}; }\n" "};\n" "} // namespace std\n" "template <typename T>\n" "__global__ void my_kernel(T* data) {\n" " data[0] = std::numeric_limits<T>::min();\n" " data[1] = std::numeric_limits<T>::max();\n" "}\n"; TEST(JitifyTest, BuiltinNumericLimitsHeader) { hipFree(0); using namespace jitify::experimental; auto program = Program(builtin_numeric_limits_program_source); for (const auto& type : {"float", "double", "char", "signed char", "unsigned char", "short", "unsigned short", "int", "unsigned int", "long", "unsigned long", "long long", "unsigned long long", "MyType"}) { program.kernel("my_kernel").instantiate({type}); } } TEST(JitifyTest, ClassKernelArg) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; int h_data; int* d_data; CHECK_CUDART(hipMalloc((void**)&d_data, sizeof(int))); dim3 grid(1); dim3 block(1); jitify::Program program = kernel_cache.program("example_headers/class_arg_kernel.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); { // test that we can pass an arg object to a kernel Arg arg(-1); CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART(hipDeviceSynchronize()); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(arg.x, h_data); } { // test that we can pass an arg object rvalue to a kernel int value = -2; CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, Arg(value))); CHECK_CUDART(hipDeviceSynchronize()); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(value, h_data); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-3); // references are passed as pointers since refernces are just pointers from // an ABI point of view CHECK_CUDA(program.kernel("class_arg_ref_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-4); CHECK_CUDA(program.kernel("class_arg_ptr_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( hipMemcpy(&h_data, d_data, sizeof(int), hipMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } CHECK_CUDART(hipFree(d_data)); } static const char* const assert_program_source = R"( #include <cassert> __global__ void my_assert_kernel() { assert(0 == 1); } )"; static const char* const get_attribute_program_source = R"( __global__ void get_attribute_kernel(int *out, int *in) { __shared__ int buffer[4096]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, GetAttribute) { // Checks that we can get function attributes jitify::JitCache kernel_cache; auto program = kernel_cache.program(get_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("get_attribute_kernel").instantiate(); EXPECT_EQ(4096 * (int)sizeof(int), instance.get_func_attribute(hipFuncAttributeSharedSizeBytes)); } static const char* const set_attribute_program_source = R"( __global__ void set_attribute_kernel(int *out, int *in) { extern __shared__ int buffer[]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, SetAttribute) { // Checks that we can set function attributes jitify::JitCache kernel_cache; int* in; CHECK_CUDART(hipMalloc((void**)&in, sizeof(int))); int* out; CHECK_CUDART(hipMalloc((void**)&out, sizeof(int))); // query the maximum supported shared bytes per block hipDevice_t device; CHECK_CUDA(hipDeviceGet(&device, 0)); int shared_bytes; CHECK_CUDA(hipDeviceGetAttribute( &shared_bytes, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, device)); auto program = kernel_cache.program(set_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("set_attribute_kernel").instantiate(); instance.set_func_attribute(CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_bytes); dim3 grid(1); dim3 block(1); // this kernel will fail on Volta+ unless the set attribute succeeded CHECK_CUDA(instance.configure(grid, block, shared_bytes).launch(out, in)); CHECK_CUDART(hipFree(out)); CHECK_CUDART(hipFree(in)); } TEST(JitifyTest, EnvVarOptions) { setenv("JITIFY_OPTIONS", "-bad_option", true); EXPECT_THROW(jitify::JitCache kernel_cache; auto program = kernel_cache.program(simple_program_source), std::runtime_error); EXPECT_THROW(jitify::experimental::Program program(simple_program_source), std::runtime_error); setenv("JITIFY_OPTIONS", "", true); } // NOTE: This MUST be the last test in the file, due to sticky CUDA error. TEST(JitifyTest, AssertHeader) { // Checks that cassert works as expected jitify::JitCache kernel_cache; auto program = kernel_cache.program(assert_program_source, {}, {"-I" CUDA_INC_DIR}); dim3 grid(1); dim3 block(1); CHECK_CUDA((program.kernel("my_assert_kernel") .instantiate<>() .configure(grid, block) .launch())); }
ab9f6dc7d839962dccf2e19af6f507662339726f.cu
/* * Copyright (c) 2017-2020, NVIDIA CORPORATION. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of NVIDIA CORPORATION nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) #define JITIFY_ENABLE_EMBEDDED_FILES 1 #endif #define JITIFY_PRINT_INSTANTIATION 1 #define JITIFY_PRINT_SOURCE 1 #define JITIFY_PRINT_LOG 1 #define JITIFY_PRINT_PTX 1 #define JITIFY_PRINT_LINKER_LOG 1 #define JITIFY_PRINT_LAUNCH 1 #define JITIFY_PRINT_HEADER_PATHS 1 #include "jitify.hpp" #include "example_headers/class_arg_kernel.cuh" #include "example_headers/my_header1.cuh.jit" #ifdef LINUX // Only supported by gcc on Linux (defined in Makefile) JITIFY_INCLUDE_EMBEDDED_FILE(example_headers_my_header2_cuh); #endif #include "gtest/gtest.h" #include <cstdio> #include <fstream> #include <iostream> #include <memory> #define CHECK_CUDA(call) \ do { \ CUresult status = call; \ if (status != CUDA_SUCCESS) { \ const char* str; \ cuGetErrorName(status, &str); \ std::cout << "(CUDA) returned " << str; \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, CUDA_SUCCESS); \ } \ } while (0) #define CHECK_CUDART(call) \ do { \ cudaError_t status = call; \ if (status != cudaSuccess) { \ std::cout << "(CUDART) returned " << cudaGetErrorString(status); \ std::cout << " (" << __FILE__ << ":" << __LINE__ << ":" << __func__ \ << "())" << std::endl; \ ASSERT_EQ(status, cudaSuccess); \ } \ } while (0) std::istream* file_callback(std::string filename, std::iostream& tmp_stream) { // User returns NULL or pointer to stream containing file source // Note: tmp_stream is provided for convenience if (filename == "example_headers/my_header4.cuh") { tmp_stream << "#pragma once\n" "template<typename T>\n" "T pointless_func(T x) {\n" " return x;\n" "}\n"; return &tmp_stream; } else { // Find this file through other mechanisms return 0; } } static const char* const simple_program_source = "my_program\n" "template<int N, typename T>\n" "__global__\n" "void my_kernel(T* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " T data0 = data[0];\n" " for( int i=0; i<N-1; ++i ) {\n" " data[0] *= data0;\n" " }\n" "}\n"; TEST(JitifyTest, Simple) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(simple_program_source); typedef float T; T* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); T h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(cudaFree(d_data)); } TEST(JitifyTest, Simple_experimental) { std::vector<std::string> opts; jitify::experimental::Program program_orig(simple_program_source, {}, opts); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(T))); dim3 grid(1); dim3 block(1); using jitify::reflection::type_of; auto kernel_inst_orig = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); auto kernel_inst = jitify::experimental::KernelInstantiation::deserialize( kernel_inst_orig.serialize()); T h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(grid, block).launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); h_data = 5; CHECK_CUDART(cudaMemcpy(d_data, &h_data, sizeof(T), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure_1d_max_occupancy().launch(d_data)); CHECK_CUDART(cudaMemcpy(&h_data, d_data, sizeof(T), cudaMemcpyDeviceToHost)); EXPECT_FLOAT_EQ(h_data, 125.f); CHECK_CUDART(cudaFree(d_data)); } static const char* const multiple_kernels_program_source = "my_program1\n" "#include \"example_headers/my_header1.cuh\"\n" "#include \"example_headers/my_header2.cuh\"\n" "#include \"example_headers/my_header3.cuh\"\n" "#include \"example_headers/my_header4.cuh\"\n" "\n" "__global__\n" "void my_kernel1(float const* indata, float* outdata) {\n" " outdata[0] = indata[0] + 1;\n" " outdata[0] -= 1;\n" "}\n" "\n" "template<int C, typename T>\n" "__global__\n" "void my_kernel2(float const* indata, float* outdata) {\n" " for( int i=0; i<C; ++i ) {\n" " outdata[0] = " "pointless_func(identity(sqrt(square(negate(indata[0])))));\n" " }\n" "}\n"; TEST(JitifyTest, MultipleKernels) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; thread_local static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); typedef float T; T* indata; T* outdata; CHECK_CUDART(cudaMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(cudaMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(cudaMemcpy(indata, &inval, sizeof(T), cudaMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent and will come from cache after the 1st CHECK_CUDA((program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .configure(grid, block) .launch(indata, outdata))); CHECK_CUDA(program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(cudaMemcpy(&outval, outdata, sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(outdata)); CHECK_CUDART(cudaFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } TEST(JitifyTest, MultipleKernels_experimental) { using jitify::reflection::instance_of; using jitify::reflection::NonType; using jitify::reflection::reflect; using jitify::reflection::Type; using jitify::reflection::type_of; jitify::experimental::Program program_orig( multiple_kernels_program_source, // Code string specified above {example_headers_my_header1_cuh}, // Code string generated by stringify {"--use_fast_math", "-I" CUDA_INC_DIR}, file_callback); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); typedef float T; T* indata; T* outdata; CHECK_CUDART(cudaMalloc((void**)&indata, sizeof(T))); CHECK_CUDART(cudaMalloc((void**)&outdata, sizeof(T))); T inval = 3.14159f; CHECK_CUDART(cudaMemcpy(indata, &inval, sizeof(T), cudaMemcpyHostToDevice)); dim3 grid(1); dim3 block(1); CHECK_CUDA(program.kernel("my_kernel1") .instantiate() .configure(grid, block) .launch(indata, outdata)); enum { C = 123 }; // These invocations are all equivalent. CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate<NonType<int, C>, T>() .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate({reflect((int)C), reflect<T>()}) .serialize()) .configure(grid, block) .launch(indata, outdata)); // Recommended versions CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, Type<T>()) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, type_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); CHECK_CUDA(jitify::experimental::KernelInstantiation::deserialize( program.kernel("my_kernel2") .instantiate((int)C, instance_of(*indata)) .serialize()) .configure(grid, block) .launch(indata, outdata)); T outval = 0; CHECK_CUDART(cudaMemcpy(&outval, outdata, sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(outdata)); CHECK_CUDART(cudaFree(indata)); EXPECT_FLOAT_EQ(inval, outval); } static const char* const constmem_program_source = "constmem_program\n" "#pragma once\n" "\n" "__constant__ int a;\n" "__device__ int d;\n" "namespace b { __constant__ int a; __device__ int d; }\n" "namespace c { namespace b { __constant__ int a; __device__ int d; } }\n" "namespace x { __constant__ int a = 3; __device__ int d = 7; }\n" "namespace y { __constant__ int a[] = {4, 5}; __device__ int d[] = {8, 9}; " "}\n" "\n" "__global__ void constant_test(int *x) {\n" " x[0] = a;\n" " x[1] = b::a;\n" " x[2] = c::b::a;\n" " x[3] = d;\n" " x[4] = b::d;\n" " x[5] = c::b::d;\n" " x[6] = x::a;\n" " x[7] = x::d;\n" " x[8] = y::a[0];\n" " x[9] = y::a[1];\n" " x[10] = y::d[0];\n" " x[11] = y::d[1];\n" "}\n"; TEST(JitifyTest, ConstantMemory) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; constexpr int n_const = 12; int* outdata; CHECK_CUDART(cudaMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using diffrent namespaces jitify::Program program = kernel_cache.program( constmem_program_source, 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test").instantiate(); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(cudaDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::Program program = kernel_cache.program("example_headers/constant_header.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto instance = program.kernel("constant_test2").instantiate(); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(cudaFree(outdata)); } TEST(JitifyTest, ConstantMemory_experimental) { using jitify::reflection::Type; constexpr int n_const = 12; int* outdata; CHECK_CUDART(cudaMalloc((void**)&outdata, n_const * sizeof(int))); dim3 grid(1); dim3 block(1); { // test __constant__ look up in kernel string using different namespaces jitify::experimental::Program program_orig( constmem_program_source, {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test").instantiate().serialize()); int inval[] = {2, 4, 8, 12, 14, 18, 22, 26, 30, 34, 38, 42}; int dval; CHECK_CUDA(instance.get_global_value("x::a", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 3); CHECK_CUDA(instance.get_global_value("x::d", &dval)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(dval, 7); int darr[2]; CHECK_CUDA(instance.get_global_array("y::a", &darr[0], 2)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 4); EXPECT_EQ(darr[1], 5); CHECK_CUDA(instance.get_global_value("y::d", &darr)); CHECK_CUDART(cudaDeviceSynchronize()); EXPECT_EQ(darr[0], 8); EXPECT_EQ(darr[1], 9); CHECK_CUDA(instance.set_global_value("a", inval[0])); CHECK_CUDA(instance.set_global_value("b::a", inval[1])); CHECK_CUDA(instance.set_global_value("c::b::a", inval[2])); CHECK_CUDA(instance.set_global_value("d", inval[3])); CHECK_CUDA(instance.set_global_value("b::d", inval[4])); CHECK_CUDA(instance.set_global_value("c::b::d", inval[5])); CHECK_CUDA(instance.set_global_value("x::a", inval[6])); CHECK_CUDA(instance.set_global_value("x::d", inval[7])); CHECK_CUDA(instance.set_global_array("y::a", &inval[8], 2)); int inarr[] = {inval[10], inval[11]}; CHECK_CUDA(instance.set_global_value("y::d", inarr)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); CHECK_CUDART(cudaDeviceSynchronize()); int outval[n_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } { // test __constant__ array look up in header nested in both anonymous and // explicit namespace jitify::experimental::Program program_orig( "example_headers/constant_header.cuh", {}, {"--use_fast_math", "-I" CUDA_INC_DIR}); auto program = jitify::experimental::Program::deserialize(program_orig.serialize()); auto instance = jitify::experimental::KernelInstantiation::deserialize( program.kernel("constant_test2").instantiate().serialize()); constexpr int n_anon_const = 6; int inval[] = {3, 5, 9, 13, 15, 19}; CHECK_CUDA( cuMemcpyHtoD(instance.get_constant_ptr("(anonymous namespace)::b::a"), inval, sizeof(inval) / 2)); CHECK_CUDA( cuMemcpyHtoD(instance.get_global_ptr("(anonymous namespace)::b::d"), inval + 3, sizeof(inval) / 2)); CHECK_CUDA(instance.configure(grid, block).launch(outdata)); int outval[n_anon_const]; CHECK_CUDART( cudaMemcpy(outval, outdata, sizeof(outval), cudaMemcpyDeviceToHost)); for (int i = 0; i < n_anon_const; i++) { EXPECT_EQ(inval[i], outval[i]); } } CHECK_CUDART(cudaFree(outdata)); } TEST(JitifyTest, ParallelFor) { int n = 10000; typedef float T; T* d_out; CHECK_CUDART(cudaMalloc((void**)&d_out, n * sizeof(T))); T val = 3.14159f; jitify::ExecutionPolicy policy(jitify::DEVICE); auto lambda = JITIFY_LAMBDA((d_out, val), d_out[i] = (float)i * val); CHECK_CUDA(jitify::parallel_for(policy, 0, n, lambda)); std::vector<T> h_out(n); CHECK_CUDART( cudaMemcpy(&h_out[0], d_out, n * sizeof(T), cudaMemcpyDeviceToHost)); CHECK_CUDART(cudaFree(d_out)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_out[i], (T)i * val); } } TEST(JitifyTest, InvalidPrograms) { jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program("empty_program\n"); // OK EXPECT_THROW(auto program_v2 = kernel_cache.program("missing_filename"), std::runtime_error); EXPECT_THROW( auto program_v3 = kernel_cache.program("bad_program\nNOT CUDA C!"), std::runtime_error); jitify::experimental::Program program_v4("empty_program\n"); // OK EXPECT_THROW(jitify::experimental::Program program_v5("missing_filename"), std::runtime_error); EXPECT_THROW( jitify::experimental::Program program_v6("bad_program\nNOT CUDA C!"), std::runtime_error); } static const char* const pragma_repl_program_source = R"(my_program template <int N, typename T> __global__ void my_kernel(T* data) { if (blockIdx.x != 0 || threadIdx.x != 0) return; T data0 = data[0]; #pragma unroll for (int i = 0; i < N - 1; ++i) data[0] *= data0; #pragma unroll 1 for (int i = 0; i < N - 1; ++i) data[0] *= data0; #pragma unroll 1 // Make sure parsing works with comments for (int i = 0; i < N - 1; ++i) data[0] *= data0; // TODO: Add support for block comments. //#pragma unroll 1 /* Make sure parsing works with comments */ //for (int i = 0; i < N - 1; ++i) data[0] *= data0; } )"; TEST(JitifyTest, PragmaReplacement) { static jitify::JitCache kernel_cache; jitify::Program program = kernel_cache.program(pragma_repl_program_source); typedef float T; T* d_data = nullptr; using jitify::reflection::type_of; auto kernel_inst = program.kernel("my_kernel").instantiate(3, type_of(*d_data)); } // TODO: Expand this to include more Thrust code. static const char* const thrust_program_source = "thrust_program\n" "#include <thrust/iterator/counting_iterator.h>\n" "__global__ void my_kernel(thrust::counting_iterator<int> begin,\n" " thrust::counting_iterator<int> end) {\n" "}\n"; TEST(JitifyTest, ThrustHeaders) { // Checks that basic Thrust headers can be compiled. jitify::JitCache kernel_cache; #if CUDA_VERSION < 11000 const char* cppstd = "-std=c++98"; #else const char* cppstd = "-std=c++11"; #endif auto program_v1 = kernel_cache.program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); auto program_v2 = jitify::experimental::Program(thrust_program_source, {}, {"-I" CUDA_INC_DIR, cppstd}); } static const char* const cub_program_source = "cub_program\n" "#include <cub/block/block_load.cuh>\n" "#include <cub/block/block_radix_sort.cuh>\n" "#include <cub/block/block_reduce.cuh>\n" "#include <cub/block/block_store.cuh>\n" "\n" "template<int BLOCK_SIZE, int PER_THREAD>\n" "__global__ void my_kernel(float* data) {\n" " typedef cub::BlockLoad<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_LOAD_VECTORIZE> BlockLoad;\n" " typedef cub::BlockRadixSort<float, BLOCK_SIZE, PER_THREAD>\n" " BlockSort;\n" " typedef cub::BlockReduce<float, BLOCK_SIZE> BlockReduce;\n" " typedef cub::BlockStore<float, BLOCK_SIZE, PER_THREAD,\n" " cub::BLOCK_STORE_VECTORIZE> BlockStore;\n" " __shared__ union {\n" " typename BlockLoad::TempStorage load;\n" " typename BlockSort::TempStorage sort;\n" " typename BlockReduce::TempStorage reduce;\n" " typename BlockStore::TempStorage store;\n" " float sum;\n" " } temp_storage;\n" " float thread_data[PER_THREAD];\n" " BlockLoad(temp_storage.load).Load(data, thread_data);\n" " __syncthreads();\n" " BlockSort(temp_storage.sort).Sort(thread_data);\n" " __syncthreads();\n" " float sum = BlockReduce(temp_storage.reduce).Sum(thread_data);\n" " __syncthreads();\n" " if (threadIdx.x == 0) {\n" " temp_storage.sum = sum;\n" " }\n" " __syncthreads();\n" " sum = temp_storage.sum;\n" " #pragma unroll\n" " for (int i = 0; i < PER_THREAD; ++i) {\n" " thread_data[i] *= 1.f / sum;\n" " }\n" " __syncthreads();\n" " BlockStore(temp_storage.store).Store(data, thread_data);\n" "}\n"; TEST(JitifyTest, CubBlockPrimitives) { int block_size = 64; int per_thread = 4; int n = block_size * per_thread; std::vector<float> h_data(n); float sum = 0; for (int i = 0; i < n; ++i) { // Start with values sorted in reverse. h_data[i] = (float)(n - 1 - i); sum += h_data[i]; } // Shuffle the values a bit. std::swap(h_data[3], h_data[7]); std::swap(h_data[10], h_data[20]); std::vector<float> h_expected(n); for (int i = 0; i < n; ++i) { // Expected sorted and normalized. h_expected[i] = (float)i / sum; } std::vector<float> h_result(n); float* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, n * sizeof(float))); jitify::JitCache kernel_cache; auto program_v1 = kernel_cache.program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}); CHECK_CUDART(cudaMemcpy(d_data, h_data.data(), n * sizeof(float), cudaMemcpyHostToDevice)); CHECK_CUDA(program_v1.kernel("my_kernel") .instantiate(block_size, per_thread) .configure(1, block_size) .launch(d_data)); CHECK_CUDART(cudaMemcpy(h_result.data(), d_data, n * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } auto program_v2 = jitify::experimental::Program::deserialize( jitify::experimental::Program(cub_program_source, {}, {"-I" CUB_DIR, "-I" CUDA_INC_DIR}) .serialize()); auto kernel_inst_v2 = jitify::experimental::KernelInstantiation::deserialize( program_v2.kernel("my_kernel") .instantiate(block_size, per_thread) .serialize()); CHECK_CUDART(cudaMemcpy(d_data, h_data.data(), n * sizeof(float), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, block_size).launch(d_data)); CHECK_CUDART(cudaMemcpy(h_result.data(), d_data, n * sizeof(float), cudaMemcpyDeviceToHost)); for (int i = 0; i < n; ++i) { EXPECT_FLOAT_EQ(h_result[i], h_expected[i]); } CHECK_CUDART(cudaFree(d_data)); } static const char* const unused_globals_source = "unused_globals_program\n" "struct Foo { static const int value = 7; };\n" "struct Bar { int a; double b; };\n" "__device__ float used_scalar;\n" "__device__ float used_array[2];\n" "__device__ Bar used_struct;\n" "__device__ float unused_scalar;\n" "__device__ float unused_array[3];\n" "__device__ Bar unused_struct;\n" "__device__ float reg, ret, bra;\n" // Tricky names "__global__ void foo_kernel(int* data) {\n" " if (blockIdx.x != 0 || threadIdx.x != 0) return;\n" " used_scalar = 1.f;\n" " used_array[1] = 2.f;\n" " used_struct.b = 3.f;\n" " __syncthreads();\n" " *data += Foo::value + used_scalar + used_array[1] + used_struct.b;\n" " printf(\"*data = %i\\n\", *data);\n" // Produces global symbols named // $str "}\n"; TEST(JitifyTest, RemoveUnusedGlobals) { cudaFree(0); auto program_v2 = jitify::experimental::Program( unused_globals_source, {}, // Note: Flag added twice to test handling of repeats. {"-remove-unused-globals", "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("foo_kernel").instantiate(); std::string ptx = kernel_inst_v2.ptx(); EXPECT_TRUE(ptx.find(".global .align 4 .f32 used_scalar;") != std::string::npos); // Note: PTX represents arrays and structs as .b8 instead of the actual type. EXPECT_TRUE(ptx.find(".global .align 4 .b8 used_array[8];") != std::string::npos); EXPECT_TRUE(ptx.find(".global .align 8 .b8 used_struct[16];") != std::string::npos); EXPECT_FALSE(ptx.find("_ZN3Foo5valueE") != std::string::npos); EXPECT_FALSE(ptx.find("unused_scalar;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_array;") != std::string::npos); EXPECT_FALSE(ptx.find("unused_struct;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 reg;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 ret;") != std::string::npos); EXPECT_FALSE(ptx.find(".global .align 4 .f32 bra;") != std::string::npos); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 16); CHECK_CUDART(cudaFree(d_data)); } static const char* const curand_program_source = "curand_program\n" "#include <curand_kernel.h>\n" "__global__ void my_kernel() {}\n" "\n"; TEST(JitifyTest, CuRandKernel) { auto program_v2 = jitify::experimental::Program( curand_program_source, {}, // Note: --remove-unused-globals is added to remove huge precomputed // arrays that come from CURAND. {"-I" CUDA_INC_DIR, "--remove-unused-globals"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); // TODO: Expand this test to actually call curand kernels and check outputs. } static const char* const linktest_program1_source = "linktest_program1\n" "__constant__ int c = 5;\n" "__device__ int d = 7;\n" "__device__ int f(int i) { return i + 11; }\n" "\n"; static const char* const linktest_program2_source = "linktest_program2\n" "extern __constant__ int c;\n" "extern __device__ int d;\n" "extern __device__ int f(int);\n" "__global__ void my_kernel(int* data) {\n" " *data = f(*data + c + d);\n" "}\n" "\n"; TEST(JitifyTest, LinkExternalFiles) { cudaFree(0); // Ensure temporary file is deleted at the end. std::unique_ptr<const char, int (*)(const char*)> ptx_filename( "example_headers/linktest.ptx", std::remove); { std::ofstream ptx_file(ptx_filename.get()); ptx_file.exceptions(std::ofstream::failbit | std::ofstream::badbit); ptx_file << jitify::experimental::Program(linktest_program1_source, {}, {"-rdc=true"}) .kernel("") .instantiate() .ptx(); } auto program_v2 = jitify::experimental::Program( linktest_program2_source, {}, {"-rdc=true", "-Lexample_headers", "-llinktest.ptx"}); auto kernel_inst_v2 = program_v2.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst_v2.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 26); CHECK_CUDART(cudaFree(d_data)); } namespace a { __host__ __device__ int external_device_func(int i) { return i + 1; } } // namespace a static const char* const selflink_program_source = "selflink_program\n" "namespace a {\n" "extern __device__ int external_device_func(int);\n" "}\n" "__global__ void my_kernel(int* data) {\n" " *data = a::external_device_func(*data);\n" "}\n" "\n"; TEST(JitifyTest, LinkCurrentExecutable) { cudaFree(0); using namespace jitify::experimental; auto program = Program(selflink_program_source, {}, {"-l."}); auto kernel_inst = program.kernel("my_kernel").instantiate(); int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); int h_data = 3; CHECK_CUDART( cudaMemcpy(d_data, &h_data, sizeof(int), cudaMemcpyHostToDevice)); CHECK_CUDA(kernel_inst.configure(1, 1).launch(d_data)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(h_data, 4); CHECK_CUDART(cudaFree(d_data)); } static const char* const reflection_program_source = "reflection_program\n" "struct Base { virtual ~Base() {} };\n" "template <typename T>\n" "struct Derived : public Base {};\n" "template<typename T>\n" "__global__ void type_kernel() {}\n" "template<unsigned short N>\n" "__global__ void nontype_kernel() {}\n" "\n"; struct Base { virtual ~Base() {} }; template <typename T> struct Derived : public Base {}; TEST(JitifyTest, Reflection) { cudaFree(0); using namespace jitify::experimental; using jitify::reflection::instance_of; Program program(reflection_program_source); auto type_kernel = program.kernel("type_kernel"); #define JITIFY_TYPE_REFLECTION_TEST(T) \ EXPECT_EQ(type_kernel.instantiate<T>().mangled_name(), \ type_kernel.instantiate({#T}).mangled_name()) JITIFY_TYPE_REFLECTION_TEST(const volatile float); JITIFY_TYPE_REFLECTION_TEST(const volatile float*); JITIFY_TYPE_REFLECTION_TEST(const volatile float&); JITIFY_TYPE_REFLECTION_TEST(Base * (const volatile float)); JITIFY_TYPE_REFLECTION_TEST(const volatile float[4]); #undef JITIFY_TYPE_REFLECTION_TEST typedef Derived<float> derived_type; const Base& base = derived_type(); EXPECT_EQ(type_kernel.instantiate(instance_of(base)).mangled_name(), type_kernel.instantiate<derived_type>().mangled_name()); auto nontype_kernel = program.kernel("nontype_kernel"); #define JITIFY_NONTYPE_REFLECTION_TEST(N) \ EXPECT_EQ(nontype_kernel.instantiate(N).mangled_name(), \ nontype_kernel.instantiate({#N}).mangled_name()) JITIFY_NONTYPE_REFLECTION_TEST(7); JITIFY_NONTYPE_REFLECTION_TEST('J'); #undef JITIFY_NONTYPE_REFLECTION_TEST } static const char* const builtin_numeric_limits_program_source = "builtin_numeric_limits_program\n" "#include <limits>\n" "struct MyType {};\n" "namespace std {\n" "template<> class numeric_limits<MyType> {\n" " public:\n" " static MyType min() { return {}; }\n" " static MyType max() { return {}; }\n" "};\n" "} // namespace std\n" "template <typename T>\n" "__global__ void my_kernel(T* data) {\n" " data[0] = std::numeric_limits<T>::min();\n" " data[1] = std::numeric_limits<T>::max();\n" "}\n"; TEST(JitifyTest, BuiltinNumericLimitsHeader) { cudaFree(0); using namespace jitify::experimental; auto program = Program(builtin_numeric_limits_program_source); for (const auto& type : {"float", "double", "char", "signed char", "unsigned char", "short", "unsigned short", "int", "unsigned int", "long", "unsigned long", "long long", "unsigned long long", "MyType"}) { program.kernel("my_kernel").instantiate({type}); } } TEST(JitifyTest, ClassKernelArg) { using jitify::reflection::Type; thread_local static jitify::JitCache kernel_cache; int h_data; int* d_data; CHECK_CUDART(cudaMalloc((void**)&d_data, sizeof(int))); dim3 grid(1); dim3 block(1); jitify::Program program = kernel_cache.program("example_headers/class_arg_kernel.cuh", 0, {"--use_fast_math", "-I" CUDA_INC_DIR}); { // test that we can pass an arg object to a kernel Arg arg(-1); CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART(cudaDeviceSynchronize()); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(arg.x, h_data); } { // test that we can pass an arg object rvalue to a kernel int value = -2; CHECK_CUDA(program.kernel("class_arg_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, Arg(value))); CHECK_CUDART(cudaDeviceSynchronize()); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(value, h_data); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-3); // references are passed as pointers since refernces are just pointers from // an ABI point of view CHECK_CUDA(program.kernel("class_arg_ref_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } { // test that we can pass an arg object reference to a kernel Arg* arg = new Arg(-4); CHECK_CUDA(program.kernel("class_arg_ptr_kernel") .instantiate(Type<Arg>()) .configure(grid, block) .launch(d_data, arg)); CHECK_CUDART( cudaMemcpy(&h_data, d_data, sizeof(int), cudaMemcpyDeviceToHost)); EXPECT_EQ(arg->x, h_data); delete (arg); } CHECK_CUDART(cudaFree(d_data)); } static const char* const assert_program_source = R"( #include <cassert> __global__ void my_assert_kernel() { assert(0 == 1); } )"; static const char* const get_attribute_program_source = R"( __global__ void get_attribute_kernel(int *out, int *in) { __shared__ int buffer[4096]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, GetAttribute) { // Checks that we can get function attributes jitify::JitCache kernel_cache; auto program = kernel_cache.program(get_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("get_attribute_kernel").instantiate(); EXPECT_EQ(4096 * (int)sizeof(int), instance.get_func_attribute(CU_FUNC_ATTRIBUTE_SHARED_SIZE_BYTES)); } static const char* const set_attribute_program_source = R"( __global__ void set_attribute_kernel(int *out, int *in) { extern __shared__ int buffer[]; buffer[threadIdx.x] = in[threadIdx.x]; __syncthreads(); out[threadIdx.y] = buffer[threadIdx.x]; } )"; TEST(JitifyTest, SetAttribute) { // Checks that we can set function attributes jitify::JitCache kernel_cache; int* in; CHECK_CUDART(cudaMalloc((void**)&in, sizeof(int))); int* out; CHECK_CUDART(cudaMalloc((void**)&out, sizeof(int))); // query the maximum supported shared bytes per block CUdevice device; CHECK_CUDA(cuDeviceGet(&device, 0)); int shared_bytes; CHECK_CUDA(cuDeviceGetAttribute( &shared_bytes, CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN, device)); auto program = kernel_cache.program(set_attribute_program_source, {}, {"-I" CUDA_INC_DIR}); auto instance = program.kernel("set_attribute_kernel").instantiate(); instance.set_func_attribute(CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES, shared_bytes); dim3 grid(1); dim3 block(1); // this kernel will fail on Volta+ unless the set attribute succeeded CHECK_CUDA(instance.configure(grid, block, shared_bytes).launch(out, in)); CHECK_CUDART(cudaFree(out)); CHECK_CUDART(cudaFree(in)); } TEST(JitifyTest, EnvVarOptions) { setenv("JITIFY_OPTIONS", "-bad_option", true); EXPECT_THROW(jitify::JitCache kernel_cache; auto program = kernel_cache.program(simple_program_source), std::runtime_error); EXPECT_THROW(jitify::experimental::Program program(simple_program_source), std::runtime_error); setenv("JITIFY_OPTIONS", "", true); } // NOTE: This MUST be the last test in the file, due to sticky CUDA error. TEST(JitifyTest, AssertHeader) { // Checks that cassert works as expected jitify::JitCache kernel_cache; auto program = kernel_cache.program(assert_program_source, {}, {"-I" CUDA_INC_DIR}); dim3 grid(1); dim3 block(1); CHECK_CUDA((program.kernel("my_assert_kernel") .instantiate<>() .configure(grid, block) .launch())); }
28da5c007d8c15ae60bb50cf6416df797e5d538e.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <assert.h> #include <stdio.h> // this method rewinds a matrix template <int batch_per_block> __global__ static void _cwc_kern_reorder_matrix_major_per_block_rows(float* a, float* b, const int count, const int channels, const int batch) { const int thidx = blockIdx.y * batch_per_block + threadIdx.y; b[(blockIdx.y * count + blockIdx.x) * channels * batch_per_block + threadIdx.y * channels + threadIdx.x] = a[(threadIdx.x * count + blockIdx.x) * batch + thidx]; } // this method rewinds a matrix template <int channel_per_block, int batch_per_block, int batch_group_per_block> __global__ static void _cwc_kern_reorder_matrix_major_per_block(float* a, float* b, const int count, const int channels, const int batch) { const int batch_group_idx = blockIdx.y % (batch / (batch_per_block * batch_group_per_block)); const int channel_group_idx = blockIdx.y / (batch / (batch_per_block * batch_group_per_block)); a += (channel_group_idx * channel_per_block * count + blockIdx.x) * batch + batch_group_idx * batch_per_block * batch_group_per_block; b += (batch_group_idx * batch_group_per_block * count + blockIdx.x) * channels * batch_per_block + channel_group_idx * channel_per_block; __shared__ float prod[channel_per_block][batch_per_block * batch_group_per_block]; int i, j; #pragma unroll for (i = 0; i < channel_per_block; i++) prod[i][threadIdx.x] = a[i * count * batch + threadIdx.x]; __syncthreads(); if (threadIdx.x < channel_per_block) #pragma unroll for (i = 0; i < batch_group_per_block; i++) #pragma unroll for (j = 0; j < batch_per_block; j++) b[(i * count * batch_per_block + j) * channels + threadIdx.x] = prod[threadIdx.x][i * batch_per_block + j]; } int main(int argc, char** argv) { float* in = 0; float* out = 0; hipMalloc(&in, sizeof(float) * (225 * 225 * 3 * 256)); hipMalloc(&out, sizeof(float) * (111 * 111 * 96 * 256)); float* in_host = 0; float* out_host = 0; int i, j, c, k; hipHostMalloc(&in_host, sizeof(float) * 225 * 225 * 3 * 256); for (i = 0; i < 225; i++) for (j = 0; j < 225; j++) for (c = 0; c < 3; c++) for (k = 0; k < 128; k++) in_host[i * 225 * 3 * 128 + j * 3 * 128 + c * 128 + k] = c * k; hipMemcpy(in, in_host, sizeof(float) * 225 * 225 * 3 * 128, hipMemcpyHostToDevice); hipHostMalloc(&out_host, sizeof(float) * 111 * 111 * 96 * 128); for (i = 0; i < 111; i++) for (j = 0; j < 111; j++) for (c = 0; c < 96; c++) for (k = 0; k < 128; k++) out_host[i * 111 * 96 * 128 + j * 96 * 128 + c * 128 + k] = c * k; hipMemcpy(out, out_host, sizeof(float) * 111 * 111 * 96 * 128, hipMemcpyHostToDevice); float* chin = 0; float* chout = 0; hipMalloc(&chin, sizeof(float) * (225 * 225 * 3 * 256)); hipMalloc(&chout, sizeof(float) * (111 * 111 * 96 * 256)); hipLaunchKernelGGL(( _cwc_kern_reorder_matrix_major_per_block_rows <8>) , dim3(dim3(225 * 225, 128 / (8 * 2))), dim3(dim3(3, 8)), 0, 0, in, chin, 225 * 225, 3, 128); hipLaunchKernelGGL(( _cwc_kern_reorder_matrix_major_per_block <3, 8, 2>) , dim3(dim3(225 * 225, 128 / (8 * 2))), dim3(16), sizeof(float) * 3 * 8 * 2, 0, in, chin, 225 * 225, 3, 128); hipLaunchKernelGGL(( _cwc_kern_reorder_matrix_major_per_block <16, 8, 2>) , dim3(dim3(111 * 111, (96 / 16) * (128 / (8 * 2)))), dim3(16), sizeof(float) * 16 * 8 * 2, 0, out, chout, 111 * 111, 96, 128); hipFree(out); hipFree(in); hipHostFree(out_host); hipHostFree(in_host); return 0; }
28da5c007d8c15ae60bb50cf6416df797e5d538e.cu
#include <cuda.h> #include <assert.h> #include <stdio.h> // this method rewinds a matrix template <int batch_per_block> __global__ static void _cwc_kern_reorder_matrix_major_per_block_rows(float* a, float* b, const int count, const int channels, const int batch) { const int thidx = blockIdx.y * batch_per_block + threadIdx.y; b[(blockIdx.y * count + blockIdx.x) * channels * batch_per_block + threadIdx.y * channels + threadIdx.x] = a[(threadIdx.x * count + blockIdx.x) * batch + thidx]; } // this method rewinds a matrix template <int channel_per_block, int batch_per_block, int batch_group_per_block> __global__ static void _cwc_kern_reorder_matrix_major_per_block(float* a, float* b, const int count, const int channels, const int batch) { const int batch_group_idx = blockIdx.y % (batch / (batch_per_block * batch_group_per_block)); const int channel_group_idx = blockIdx.y / (batch / (batch_per_block * batch_group_per_block)); a += (channel_group_idx * channel_per_block * count + blockIdx.x) * batch + batch_group_idx * batch_per_block * batch_group_per_block; b += (batch_group_idx * batch_group_per_block * count + blockIdx.x) * channels * batch_per_block + channel_group_idx * channel_per_block; __shared__ float prod[channel_per_block][batch_per_block * batch_group_per_block]; int i, j; #pragma unroll for (i = 0; i < channel_per_block; i++) prod[i][threadIdx.x] = a[i * count * batch + threadIdx.x]; __syncthreads(); if (threadIdx.x < channel_per_block) #pragma unroll for (i = 0; i < batch_group_per_block; i++) #pragma unroll for (j = 0; j < batch_per_block; j++) b[(i * count * batch_per_block + j) * channels + threadIdx.x] = prod[threadIdx.x][i * batch_per_block + j]; } int main(int argc, char** argv) { float* in = 0; float* out = 0; cudaMalloc(&in, sizeof(float) * (225 * 225 * 3 * 256)); cudaMalloc(&out, sizeof(float) * (111 * 111 * 96 * 256)); float* in_host = 0; float* out_host = 0; int i, j, c, k; cudaMallocHost(&in_host, sizeof(float) * 225 * 225 * 3 * 256); for (i = 0; i < 225; i++) for (j = 0; j < 225; j++) for (c = 0; c < 3; c++) for (k = 0; k < 128; k++) in_host[i * 225 * 3 * 128 + j * 3 * 128 + c * 128 + k] = c * k; cudaMemcpy(in, in_host, sizeof(float) * 225 * 225 * 3 * 128, cudaMemcpyHostToDevice); cudaMallocHost(&out_host, sizeof(float) * 111 * 111 * 96 * 128); for (i = 0; i < 111; i++) for (j = 0; j < 111; j++) for (c = 0; c < 96; c++) for (k = 0; k < 128; k++) out_host[i * 111 * 96 * 128 + j * 96 * 128 + c * 128 + k] = c * k; cudaMemcpy(out, out_host, sizeof(float) * 111 * 111 * 96 * 128, cudaMemcpyHostToDevice); float* chin = 0; float* chout = 0; cudaMalloc(&chin, sizeof(float) * (225 * 225 * 3 * 256)); cudaMalloc(&chout, sizeof(float) * (111 * 111 * 96 * 256)); _cwc_kern_reorder_matrix_major_per_block_rows <8> <<<dim3(225 * 225, 128 / (8 * 2)), dim3(3, 8)>>> (in, chin, 225 * 225, 3, 128); _cwc_kern_reorder_matrix_major_per_block <3, 8, 2> <<<dim3(225 * 225, 128 / (8 * 2)), 16, sizeof(float) * 3 * 8 * 2>>> (in, chin, 225 * 225, 3, 128); _cwc_kern_reorder_matrix_major_per_block <16, 8, 2> <<<dim3(111 * 111, (96 / 16) * (128 / (8 * 2))), 16, sizeof(float) * 16 * 8 * 2>>> (out, chout, 111 * 111, 96, 128); cudaFree(out); cudaFree(in); cudaFreeHost(out_host); cudaFreeHost(in_host); return 0; }
43c42cc76d1ddec27e07f0a4030b650ce052f6cb.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <benchmark/benchmark.h> #include "init/init.hpp" #include "unsafe_reduction/args.hpp" #include "utils/utils.hpp" #include "kernel_hip.cuh" using namespace wmma_unsafe_reduction; template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void tryCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N(benchmark::State &state) { const size_t num_segments = state.range(0); const size_t segment_size = state.range(1); if (segment_size != SEGMENT_SIZE) { state.SkipWithError(fmt::format("segment_size={} must be equal to SEGMENT_SIZE={} ", segment_size, SEGMENT_SIZE) .c_str()); return; } const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE; const size_t num_elements = num_segments * segment_size; const int segments_per_block = WARPS_PER_BLOCK * 16; defer(hipDeviceReset()); half *d_in_fp16 = nullptr; half *d_out = nullptr; hipEvent_t start, stop; try { PRINT_IF_ERROR(hipMalloc(&d_in_fp16, num_elements * sizeof(half))); PRINT_IF_ERROR(hipMalloc(&d_out, num_segments * sizeof(half))); cuda_memory_set(d_in_fp16, 0.001f, num_elements); dim3 gridDim, blockDim; blockDim.x = BLOCK_DIM; gridDim.x = (num_segments + segments_per_block - 1) / segments_per_block; if (gridDim.x >= CUDA_MAX_GRID_SIZE) { state.SkipWithError( fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x) .c_str()); return; } PRINT_IF_ERROR(hipEventCreate(&start)); PRINT_IF_ERROR(hipEventCreate(&stop)); defer(hipEventDestroy(start)); defer(hipEventDestroy(stop)); for (auto _ : state) { PRINT_IF_ERROR(hipEventRecord(start)); hipLaunchKernelGGL(( compute_wmma_segmented_reduction_16n<SEGMENT_SIZE, WARPS_PER_BLOCK, BLOCK_DIM>) , dim3(gridDim), dim3(blockDim), 0, 0, d_in_fp16, d_out, num_segments); PRINT_IF_ERROR(hipEventRecord(stop)); PRINT_IF_ERROR(hipEventSynchronize(stop)); /* state.SkipWithError("break"); */ state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(hipEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"num_segments", num_segments}, {"segment_size", segment_size}, {"num_elements", num_segments * segment_size}, {"warps_per_block", WARPS_PER_BLOCK}, {"flops", {state.iterations() * 1.0 * num_segments * segment_size, benchmark::Counter::kAvgThreadsRate}}}); #if 0 half *h_out = new half[num_segments]; PRINT_IF_ERROR(hipMemcpy(h_out, d_out, num_segments * sizeof(half), hipMemcpyDeviceToHost)); int errors = 0; for (int j = 0; j < num_segments; j++) { float correct_segment_sum = 0; for (int i = 0; i < segment_size; i++) { correct_segment_sum += h_in[j * segment_size + i]; } if (fabs(half_to_float(h_out[j]) - correct_segment_sum) > 0.1) { errors++; printf("Expected %f, get h_out[%d] = %f\n", correct_segment_sum, j, half_to_float(h_out[j])); } } if (errors > 0) { printf( "CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N does not agree with SEQUENTIAL! %d errors!\n", errors); } else { printf("Results verified: they agree.\n\n"); } delete h_out; #endif hipFree(d_in_fp16); hipFree(d_out); } catch (...) { hipFree(d_in_fp16); hipFree(d_out); hipDeviceReset(); const auto p = std::current_exception(); std::rethrow_exception(p); } } template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> static void iCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N(benchmark::State &state) { hipDeviceReset(); try { tryCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N<SEGMENT_SIZE, WARPS_PER_BLOCK>(state); } catch (const std::exception &e) { state.SkipWithError(e.what()); } catch (const std::string &e) { state.SkipWithError(e.c_str()); } catch (...) { state.SkipWithError("unknown exception"); } } template <int WARPS_PER_BLOCK> static void CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N(benchmark::State &state) { const size_t segment_size = state.range(1); switch (segment_size) { #define Dispatch(N) \ case N: \ iCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N<N, WARPS_PER_BLOCK>(state); \ break Dispatch(16); Dispatch(32); Dispatch(64); Dispatch(128); Dispatch(256); Dispatch(512); Dispatch(1024); Dispatch(2048); Dispatch(4096); Dispatch(8192); Dispatch(16384); Dispatch(32768); Dispatch(65536); Dispatch(131072); Dispatch(262144); Dispatch(524288); Dispatch(1048576); Dispatch(2097152); Dispatch(4194304); Dispatch(8388608); Dispatch(16777216); Dispatch(33554432); Dispatch(67108864); Dispatch(134217728); Dispatch(268435456); Dispatch(536870912); Dispatch(1073741824); default: static_assert(true, "invalid segment size"); state.SkipWithError("invalid segment size"); #undef DISPATCH } } template <int WARPS_PER_BLOCK> static void CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N(benchmark::State &state) { CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N<WARPS_PER_BLOCK>(state); } #define RUN_CUDA_UNSAFE_WMMA_TUNE(TUNE_ARGS) \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N, 1) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N, 2) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N, 4) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N, 8) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N, 16) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning16_x_14); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning16_x_18); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning16_x_22); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning16_x_26); RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning16_x_30); #define RUN_CUDA_UNSAFE_WMMA(Args) \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N, 1) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N, 2) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N, 4) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N, 8) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N, 16) \ ->Args() \ ->UseManualTime(); RUN_CUDA_UNSAFE_WMMA(SEG_16_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_32_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_64_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_128_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_256_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_512_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_1024_ARGS);
43c42cc76d1ddec27e07f0a4030b650ce052f6cb.cu
#include <benchmark/benchmark.h> #include "init/init.hpp" #include "unsafe_reduction/args.hpp" #include "utils/utils.hpp" #include "kernel.cuh" using namespace wmma_unsafe_reduction; template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> void tryCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N(benchmark::State &state) { const size_t num_segments = state.range(0); const size_t segment_size = state.range(1); if (segment_size != SEGMENT_SIZE) { state.SkipWithError(fmt::format("segment_size={} must be equal to SEGMENT_SIZE={} ", segment_size, SEGMENT_SIZE) .c_str()); return; } const int BLOCK_DIM = WARPS_PER_BLOCK * WARP_SIZE; const size_t num_elements = num_segments * segment_size; const int segments_per_block = WARPS_PER_BLOCK * 16; defer(cudaDeviceReset()); half *d_in_fp16 = nullptr; half *d_out = nullptr; cudaEvent_t start, stop; try { PRINT_IF_ERROR(cudaMalloc(&d_in_fp16, num_elements * sizeof(half))); PRINT_IF_ERROR(cudaMalloc(&d_out, num_segments * sizeof(half))); cuda_memory_set(d_in_fp16, 0.001f, num_elements); dim3 gridDim, blockDim; blockDim.x = BLOCK_DIM; gridDim.x = (num_segments + segments_per_block - 1) / segments_per_block; if (gridDim.x >= CUDA_MAX_GRID_SIZE) { state.SkipWithError( fmt::format("gridDim.x={} is greater than CUDA_MAX_GRID_SIZE", gridDim.x) .c_str()); return; } PRINT_IF_ERROR(cudaEventCreate(&start)); PRINT_IF_ERROR(cudaEventCreate(&stop)); defer(cudaEventDestroy(start)); defer(cudaEventDestroy(stop)); for (auto _ : state) { PRINT_IF_ERROR(cudaEventRecord(start)); compute_wmma_segmented_reduction_16n<SEGMENT_SIZE, WARPS_PER_BLOCK, BLOCK_DIM> <<<gridDim, blockDim>>>(d_in_fp16, d_out, num_segments); PRINT_IF_ERROR(cudaEventRecord(stop)); PRINT_IF_ERROR(cudaEventSynchronize(stop)); /* state.SkipWithError("break"); */ state.PauseTiming(); float msecTotal = 0.0f; PRINT_IF_ERROR(cudaEventElapsedTime(&msecTotal, start, stop)); state.SetIterationTime(msecTotal / 1000); state.ResumeTiming(); } state.counters.insert({{"num_segments", num_segments}, {"segment_size", segment_size}, {"num_elements", num_segments * segment_size}, {"warps_per_block", WARPS_PER_BLOCK}, {"flops", {state.iterations() * 1.0 * num_segments * segment_size, benchmark::Counter::kAvgThreadsRate}}}); #if 0 half *h_out = new half[num_segments]; PRINT_IF_ERROR(cudaMemcpy(h_out, d_out, num_segments * sizeof(half), cudaMemcpyDeviceToHost)); int errors = 0; for (int j = 0; j < num_segments; j++) { float correct_segment_sum = 0; for (int i = 0; i < segment_size; i++) { correct_segment_sum += h_in[j * segment_size + i]; } if (fabs(half_to_float(h_out[j]) - correct_segment_sum) > 0.1) { errors++; printf("Expected %f, get h_out[%d] = %f\n", correct_segment_sum, j, half_to_float(h_out[j])); } } if (errors > 0) { printf( "CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N does not agree with SEQUENTIAL! %d errors!\n", errors); } else { printf("Results verified: they agree.\n\n"); } delete h_out; #endif cudaFree(d_in_fp16); cudaFree(d_out); } catch (...) { cudaFree(d_in_fp16); cudaFree(d_out); cudaDeviceReset(); const auto p = std::current_exception(); std::rethrow_exception(p); } } template <size_t SEGMENT_SIZE, int WARPS_PER_BLOCK> static void iCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N(benchmark::State &state) { cudaDeviceReset(); try { tryCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N<SEGMENT_SIZE, WARPS_PER_BLOCK>(state); } catch (const std::exception &e) { state.SkipWithError(e.what()); } catch (const std::string &e) { state.SkipWithError(e.c_str()); } catch (...) { state.SkipWithError("unknown exception"); } } template <int WARPS_PER_BLOCK> static void CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N(benchmark::State &state) { const size_t segment_size = state.range(1); switch (segment_size) { #define Dispatch(N) \ case N: \ iCUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N<N, WARPS_PER_BLOCK>(state); \ break Dispatch(16); Dispatch(32); Dispatch(64); Dispatch(128); Dispatch(256); Dispatch(512); Dispatch(1024); Dispatch(2048); Dispatch(4096); Dispatch(8192); Dispatch(16384); Dispatch(32768); Dispatch(65536); Dispatch(131072); Dispatch(262144); Dispatch(524288); Dispatch(1048576); Dispatch(2097152); Dispatch(4194304); Dispatch(8388608); Dispatch(16777216); Dispatch(33554432); Dispatch(67108864); Dispatch(134217728); Dispatch(268435456); Dispatch(536870912); Dispatch(1073741824); default: static_assert(true, "invalid segment size"); state.SkipWithError("invalid segment size"); #undef DISPATCH } } template <int WARPS_PER_BLOCK> static void CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N(benchmark::State &state) { CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N<WARPS_PER_BLOCK>(state); } #define RUN_CUDA_UNSAFE_WMMA_TUNE(TUNE_ARGS) \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N, 1) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N, 2) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N, 4) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N, 8) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_TUNE_SEGMENTED_REDUCTION_16N, 16) \ ->Apply(TUNE_ARGS) \ ->UseManualTime(); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning16_x_14); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning16_x_18); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning16_x_22); // RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning16_x_26); RUN_CUDA_UNSAFE_WMMA_TUNE(Tuning16_x_30); #define RUN_CUDA_UNSAFE_WMMA(Args) \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N, 1) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N, 2) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N, 4) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N, 8) \ ->Args() \ ->UseManualTime(); \ BENCHMARK_TEMPLATE(CUDA_UNSAFE_WMMA_SEGMENTED_REDUCTION_16N, 16) \ ->Args() \ ->UseManualTime(); RUN_CUDA_UNSAFE_WMMA(SEG_16_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_32_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_64_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_128_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_256_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_512_ARGS); RUN_CUDA_UNSAFE_WMMA(SEG_1024_ARGS);
4fa54a758152132609012427eb9c1d09a6925f17.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //Added by Karel Adamek #include "headers/params.h" #include "device_MSD_BLN_grid_kernel.hip" void MSD_BLN_grid_init(){ //---------> Specific nVidia stuff hipDeviceSetCacheConfig (hipFuncCachePreferShared); hipDeviceSetSharedMemConfig (hipSharedMemBankSizeFourByte); } int MSD_BLN_grid(float *d_input, float *d_MSD, int CellDim_x, int CellDim_y, int nDMs, int nTimesamples, int offset, float multiplier){ //---------> Task specific int GridSize_x, GridSize_y, x_steps, y_steps, nThreads; GridSize_x=(nTimesamples-offset)/CellDim_x; GridSize_y=nDMs/CellDim_y; x_steps=CellDim_x/WARP; if(CellDim_y<HALF_WARP) { y_steps = 1; nThreads = WARP*CellDim_y; } else { nThreads = WARP*HALF_WARP; y_steps = CellDim_y/HALF_WARP; } //---------> Initial phase dim3 gridSize(GridSize_x, GridSize_y, 1); dim3 blockSize(nThreads, 1, 1); //---------> Final phase dim3 final_gridSize(1, 1, 1); dim3 final_blockSize(WARP*WARP, 1, 1); //---------> Allocation of temporary memory float *d_output; hipMalloc((void **) &d_output, GridSize_x*GridSize_y*3*sizeof(float)); //---------> MSD MSD_BLN_grid_init(); hipLaunchKernelGGL(( MSD_BLN_grid_calculate_partials), dim3(gridSize),dim3(blockSize),nThreads*8, 0, d_input, d_output, x_steps, y_steps, nTimesamples, 0); hipLaunchKernelGGL(( MSD_BLN_grid_outlier_rejection), dim3(final_gridSize), dim3(final_blockSize), 0, 0, d_output, d_MSD, GridSize_x*GridSize_y, (float) (CellDim_x*CellDim_y), multiplier); //---------> De-allocation of temporary memory hipFree(d_output); return(1); }
4fa54a758152132609012427eb9c1d09a6925f17.cu
//Added by Karel Adamek #include "headers/params.h" #include "device_MSD_BLN_grid_kernel.cu" void MSD_BLN_grid_init(){ //---------> Specific nVidia stuff cudaDeviceSetCacheConfig (cudaFuncCachePreferShared); cudaDeviceSetSharedMemConfig (cudaSharedMemBankSizeFourByte); } int MSD_BLN_grid(float *d_input, float *d_MSD, int CellDim_x, int CellDim_y, int nDMs, int nTimesamples, int offset, float multiplier){ //---------> Task specific int GridSize_x, GridSize_y, x_steps, y_steps, nThreads; GridSize_x=(nTimesamples-offset)/CellDim_x; GridSize_y=nDMs/CellDim_y; x_steps=CellDim_x/WARP; if(CellDim_y<HALF_WARP) { y_steps = 1; nThreads = WARP*CellDim_y; } else { nThreads = WARP*HALF_WARP; y_steps = CellDim_y/HALF_WARP; } //---------> Initial phase dim3 gridSize(GridSize_x, GridSize_y, 1); dim3 blockSize(nThreads, 1, 1); //---------> Final phase dim3 final_gridSize(1, 1, 1); dim3 final_blockSize(WARP*WARP, 1, 1); //---------> Allocation of temporary memory float *d_output; cudaMalloc((void **) &d_output, GridSize_x*GridSize_y*3*sizeof(float)); //---------> MSD MSD_BLN_grid_init(); MSD_BLN_grid_calculate_partials<<<gridSize,blockSize,nThreads*8>>>(d_input, d_output, x_steps, y_steps, nTimesamples, 0); MSD_BLN_grid_outlier_rejection<<<final_gridSize, final_blockSize>>>(d_output, d_MSD, GridSize_x*GridSize_y, (float) (CellDim_x*CellDim_y), multiplier); //---------> De-allocation of temporary memory cudaFree(d_output); return(1); }
8801c3970863c0846396d874b2886168a7dd0c02.hip
// !!! This is a file automatically generated by hipify!!! // ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha // ============================================================================= // // Base class for processing sph force in fsi system.// // ============================================================================= #include <thrust/extrema.h> #include <thrust/sort.h> #include "chrono_fsi/physics/ChFsiForce.cuh" #include "chrono_fsi/utils/ChUtilsDevice.cuh" #include "chrono_fsi/physics/ChSphGeneral.cuh" //========================================================================================================================================== namespace chrono { namespace fsi { ChFsiForce::ChFsiForce(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<NumberOfObjects> otherNumObjects) : bceWorker(otherBceWorker), sortedSphMarkersD(otherSortedSphMarkersD), markersProximityD(otherMarkersProximityD), fsiGeneralData(otherFsiGeneralData), numObjectsH(otherNumObjects), paramsH(otherParamsH) { fsiCollisionSystem = chrono_types::make_shared<ChCollisionSystemFsi>(sortedSphMarkersD, markersProximityD, paramsH, numObjectsH); sphMarkersD = NULL; } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::Finalize() { hipMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); hipMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects)); printf("ChFsiForce::Finalize() number of all particles = %zd\n", numObjectsH->numAllMarkers); vel_XSPH_Sorted_D.resize(numObjectsH->numAllMarkers); vel_vis_Sorted_D.resize(numObjectsH->numAllMarkers); derivVelRhoD_Sorted_D.resize(numObjectsH->numAllMarkers); fsiCollisionSystem->Finalize(); } //-------------------------------------------------------------------------------------------------------------------------------- ChFsiForce::~ChFsiForce() {} void ChFsiForce::SetLinearSolver(ChFsiLinearSolver::SolverType other_solverType) { switch (other_solverType) { case ChFsiLinearSolver::SolverType::BICGSTAB: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); break; case ChFsiLinearSolver::SolverType::GMRES: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverGMRES>(); break; default: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); std::cout << "The ChFsiLinearSolver you chose has not been implemented, reverting back to " "ChFsiLinearSolverBiCGStab\n"; } } //-------------------------------------------------------------------------------------------------------------------------------- // Use invasive to avoid one extra copy. // However, keep in mind that sorted is changed. void ChFsiForce::CopySortedToOriginal_Invasive_R3(thrust::device_vector<Real3>& original, thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R3(thrust::device_vector<Real3>& original, const thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real3> dummySorted = sorted; CopySortedToOriginal_Invasive_R3(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- // Use invasive to avoid one extra copy. // However, keep in mind that sorted is changed. void ChFsiForce::CopySortedToOriginal_Invasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real4> dummySorted = sorted; CopySortedToOriginal_Invasive_R4(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- } // namespace fsi } // namespace chrono
8801c3970863c0846396d874b2886168a7dd0c02.cu
// ============================================================================= // PROJECT CHRONO - http://projectchrono.org // // Copyright (c) 2014 projectchrono.org // All rights reserved. // // Use of this source code is governed by a BSD-style license that can be found // in the LICENSE file at the top level of the distribution and at // http://projectchrono.org/license-chrono.txt. // // ============================================================================= // Author: Milad Rakhsha // ============================================================================= // // Base class for processing sph force in fsi system.// // ============================================================================= #include <thrust/extrema.h> #include <thrust/sort.h> #include "chrono_fsi/physics/ChFsiForce.cuh" #include "chrono_fsi/utils/ChUtilsDevice.cuh" #include "chrono_fsi/physics/ChSphGeneral.cuh" //========================================================================================================================================== namespace chrono { namespace fsi { ChFsiForce::ChFsiForce(std::shared_ptr<ChBce> otherBceWorker, std::shared_ptr<SphMarkerDataD> otherSortedSphMarkersD, std::shared_ptr<ProximityDataD> otherMarkersProximityD, std::shared_ptr<FsiGeneralData> otherFsiGeneralData, std::shared_ptr<SimParams> otherParamsH, std::shared_ptr<NumberOfObjects> otherNumObjects) : bceWorker(otherBceWorker), sortedSphMarkersD(otherSortedSphMarkersD), markersProximityD(otherMarkersProximityD), fsiGeneralData(otherFsiGeneralData), numObjectsH(otherNumObjects), paramsH(otherParamsH) { fsiCollisionSystem = chrono_types::make_shared<ChCollisionSystemFsi>(sortedSphMarkersD, markersProximityD, paramsH, numObjectsH); sphMarkersD = NULL; } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::Finalize() { cudaMemcpyToSymbolAsync(paramsD, paramsH.get(), sizeof(SimParams)); cudaMemcpyToSymbolAsync(numObjectsD, numObjectsH.get(), sizeof(NumberOfObjects)); printf("ChFsiForce::Finalize() number of all particles = %zd\n", numObjectsH->numAllMarkers); vel_XSPH_Sorted_D.resize(numObjectsH->numAllMarkers); vel_vis_Sorted_D.resize(numObjectsH->numAllMarkers); derivVelRhoD_Sorted_D.resize(numObjectsH->numAllMarkers); fsiCollisionSystem->Finalize(); } //-------------------------------------------------------------------------------------------------------------------------------- ChFsiForce::~ChFsiForce() {} void ChFsiForce::SetLinearSolver(ChFsiLinearSolver::SolverType other_solverType) { switch (other_solverType) { case ChFsiLinearSolver::SolverType::BICGSTAB: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); break; case ChFsiLinearSolver::SolverType::GMRES: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverGMRES>(); break; default: myLinearSolver = chrono_types::make_shared<ChFsiLinearSolverBiCGStab>(); std::cout << "The ChFsiLinearSolver you chose has not been implemented, reverting back to " "ChFsiLinearSolverBiCGStab\n"; } } //-------------------------------------------------------------------------------------------------------------------------------- // Use invasive to avoid one extra copy. // However, keep in mind that sorted is changed. void ChFsiForce::CopySortedToOriginal_Invasive_R3(thrust::device_vector<Real3>& original, thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R3(thrust::device_vector<Real3>& original, const thrust::device_vector<Real3>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real3> dummySorted = sorted; CopySortedToOriginal_Invasive_R3(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- // Use invasive to avoid one extra copy. // However, keep in mind that sorted is changed. void ChFsiForce::CopySortedToOriginal_Invasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<uint> dummyMarkerIndex = gridMarkerIndex; thrust::sort_by_key(dummyMarkerIndex.begin(), dummyMarkerIndex.end(), sorted.begin()); dummyMarkerIndex.clear(); thrust::copy(sorted.begin(), sorted.end(), original.begin()); } //-------------------------------------------------------------------------------------------------------------------------------- void ChFsiForce::CopySortedToOriginal_NonInvasive_R4(thrust::device_vector<Real4>& original, thrust::device_vector<Real4>& sorted, const thrust::device_vector<uint>& gridMarkerIndex) { thrust::device_vector<Real4> dummySorted = sorted; CopySortedToOriginal_Invasive_R4(original, dummySorted, gridMarkerIndex); } //-------------------------------------------------------------------------------------------------------------------------------- } // namespace fsi } // namespace chrono
e04c2ee88a32bb64559af7d6c31fc978b3144a50.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2021-2022 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/matrix/syrk.h" #include "cunumeric/matrix/syrk_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { using namespace legate; template <typename Syrk, typename VAL, typename CONS> static inline void syrk_template( Syrk syrk, VAL* lhs, const VAL* rhs, int32_t m, int32_t n, CONS _fake_param_for_type_inference) { auto context = get_cublas(); auto stream = get_cached_stream(); CHECK_CUBLAS(hipblasSetStream(context, stream)); auto uplo = HIPBLAS_FILL_MODE_LOWER; auto trans = HIPBLAS_OP_N; CONS alpha = -1.0; CONS beta = 1.0; CHECK_CUBLAS(syrk(context, uplo, trans, m, n, &alpha, rhs, m, &beta, lhs, m)); CHECK_CUDA_STREAM(stream); } template <> struct SyrkImplBody<VariantKind::GPU, Type::Code::FLOAT32> { void operator()(float* lhs, const float* rhs, int32_t m, int32_t n) { syrk_template(hipblasSsyrk, lhs, rhs, m, n, static_cast<float>(0)); } }; template <> struct SyrkImplBody<VariantKind::GPU, Type::Code::FLOAT64> { void operator()(double* lhs, const double* rhs, int32_t m, int32_t n) { syrk_template(hipblasDsyrk, lhs, rhs, m, n, static_cast<double>(0)); } }; template <> struct SyrkImplBody<VariantKind::GPU, Type::Code::COMPLEX64> { void operator()(complex<float>* lhs_, const complex<float>* rhs_, int32_t m, int32_t n) { auto lhs = reinterpret_cast<hipComplex*>(lhs_); auto rhs = reinterpret_cast<const hipComplex*>(rhs_); syrk_template(hipblasCherk, lhs, rhs, m, n, static_cast<float>(0)); } }; template <> struct SyrkImplBody<VariantKind::GPU, Type::Code::COMPLEX128> { void operator()(complex<double>* lhs_, const complex<double>* rhs_, int32_t m, int32_t n) { auto lhs = reinterpret_cast<hipDoubleComplex*>(lhs_); auto rhs = reinterpret_cast<const hipDoubleComplex*>(rhs_); // TODO: We're not actually calling syrk but calling hekr instead here, // as this task is used only for Cholesky factorization. syrk_template(hipblasZherk, lhs, rhs, m, n, static_cast<double>(0)); } }; /*static*/ void SyrkTask::gpu_variant(TaskContext& context) { syrk_template<VariantKind::GPU>(context); } } // namespace cunumeric
e04c2ee88a32bb64559af7d6c31fc978b3144a50.cu
/* Copyright 2021-2022 NVIDIA Corporation * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * */ #include "cunumeric/matrix/syrk.h" #include "cunumeric/matrix/syrk_template.inl" #include "cunumeric/cuda_help.h" namespace cunumeric { using namespace legate; template <typename Syrk, typename VAL, typename CONS> static inline void syrk_template( Syrk syrk, VAL* lhs, const VAL* rhs, int32_t m, int32_t n, CONS _fake_param_for_type_inference) { auto context = get_cublas(); auto stream = get_cached_stream(); CHECK_CUBLAS(cublasSetStream(context, stream)); auto uplo = CUBLAS_FILL_MODE_LOWER; auto trans = CUBLAS_OP_N; CONS alpha = -1.0; CONS beta = 1.0; CHECK_CUBLAS(syrk(context, uplo, trans, m, n, &alpha, rhs, m, &beta, lhs, m)); CHECK_CUDA_STREAM(stream); } template <> struct SyrkImplBody<VariantKind::GPU, Type::Code::FLOAT32> { void operator()(float* lhs, const float* rhs, int32_t m, int32_t n) { syrk_template(cublasSsyrk, lhs, rhs, m, n, static_cast<float>(0)); } }; template <> struct SyrkImplBody<VariantKind::GPU, Type::Code::FLOAT64> { void operator()(double* lhs, const double* rhs, int32_t m, int32_t n) { syrk_template(cublasDsyrk, lhs, rhs, m, n, static_cast<double>(0)); } }; template <> struct SyrkImplBody<VariantKind::GPU, Type::Code::COMPLEX64> { void operator()(complex<float>* lhs_, const complex<float>* rhs_, int32_t m, int32_t n) { auto lhs = reinterpret_cast<cuComplex*>(lhs_); auto rhs = reinterpret_cast<const cuComplex*>(rhs_); syrk_template(cublasCherk, lhs, rhs, m, n, static_cast<float>(0)); } }; template <> struct SyrkImplBody<VariantKind::GPU, Type::Code::COMPLEX128> { void operator()(complex<double>* lhs_, const complex<double>* rhs_, int32_t m, int32_t n) { auto lhs = reinterpret_cast<cuDoubleComplex*>(lhs_); auto rhs = reinterpret_cast<const cuDoubleComplex*>(rhs_); // TODO: We're not actually calling syrk but calling hekr instead here, // as this task is used only for Cholesky factorization. syrk_template(cublasZherk, lhs, rhs, m, n, static_cast<double>(0)); } }; /*static*/ void SyrkTask::gpu_variant(TaskContext& context) { syrk_template<VariantKind::GPU>(context); } } // namespace cunumeric
7250dcf8e986f2e94f9b80e27d6e806ed2537cae.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "../utilities.cuh" using torch::Tensor; template <typename scalar_t> __global__ void Smear_CUDA_Forward_Kernel( const scalar_t* __restrict__ spixel_feats, const int* __restrict__ index_map, scalar_t* __restrict__ pixel_feats, const int n_spixels, const int n_spatial_dim, const int n_channels ) { const int nCurrInd= blockIdx.x * blockDim.x + threadIdx.x; // ind=i*w+j const int nCurrBatch = blockIdx.y; const int nBaseInd = nCurrBatch*n_channels; const int nCopyInd = static_cast<int>(index_map[nCurrBatch * n_spatial_dim + nCurrInd]); if (nCurrInd >= n_spatial_dim) return; if (nCopyInd >= n_spixels || nCopyInd < 0) return; // Ignore all invalid cases, could be unsafe here for (int c = 0; c < n_channels; c++) { pixel_feats[(nBaseInd + c) * n_spatial_dim + nCurrInd] = spixel_feats[(nBaseInd + c) * n_spixels + nCopyInd]; } } template <typename scalar_t> __global__ void Smear_CUDA_Backward_Kernel( const scalar_t* __restrict__ grad_output, const int* __restrict__ index_map, scalar_t* __restrict__ grad_spixel_feats, const int n_spixels, const int n_spatial_dim, const int n_channels ) { const int nCurrInd= blockIdx.x * blockDim.x + threadIdx.x; // ind=i*w+j const int nCurrBatch = blockIdx.y; const int nBaseInd = nCurrBatch*n_channels; const int nCopyInd = static_cast<int>(index_map[nCurrBatch * n_spatial_dim + nCurrInd]); if (nCurrInd >= n_spatial_dim) return; if (nCopyInd >= n_spixels || nCopyInd < 0) return; // Ignore all invalid cases, could be unsafe here for (int c = 0; c < n_channels; c++) { AtomicAdd( &grad_spixel_feats[(nBaseInd + c) * n_spixels + nCopyInd], grad_output[(nBaseInd + c) * n_spatial_dim + nCurrInd] ); } } Tensor Smear_CUDA_Forward(Tensor spixel_feats, Tensor index_map, int n_spixels) { const auto nBatchSize = spixel_feats.size(0); const auto nChannels = spixel_feats.size(1); const auto nHeight = index_map.size(2); const auto nWidth = index_map.size(3); Tensor tPixelFeats = torch::zeros({nBatchSize, nChannels, nHeight, nWidth}).type_as(spixel_feats); const int nThreads = 1024; const int nSpatialDim = nHeight * nWidth; const dim3 nBlocks((nSpatialDim + nThreads - 1) / nThreads, nBatchSize); AT_DISPATCH_FLOATING_TYPES(spixel_feats.type(), "smear forward", [&] { hipLaunchKernelGGL(( Smear_CUDA_Forward_Kernel<scalar_t>), dim3(nBlocks), dim3(nThreads), 0, 0, spixel_feats.data<scalar_t>(), index_map.data<int>(), tPixelFeats.data<scalar_t>(), n_spixels, nSpatialDim, nChannels); }); return tPixelFeats; } Tensor Smear_CUDA_Backward(Tensor grad_output, Tensor index_map, int n_spixels) { const auto nBatchSize = grad_output.size(0); const auto nChannels = grad_output.size(1); const auto nHeight = index_map.size(2); const auto nWidth = index_map.size(3); Tensor tGradSpixelFeats = torch::zeros({nBatchSize, nChannels, n_spixels}).type_as(grad_output); const int nThreads = 1024; const int nSpatialDim = nHeight * nWidth; const dim3 nBlocks((nSpatialDim + nThreads - 1) / nThreads, nBatchSize); AT_DISPATCH_FLOATING_TYPES(grad_output.type(), "smear backward", [&] { hipLaunchKernelGGL(( Smear_CUDA_Backward_Kernel<scalar_t>), dim3(nBlocks), dim3(nThreads), 0, 0, grad_output.data<scalar_t>(), index_map.data<int>(), tGradSpixelFeats.data<scalar_t>(), n_spixels, nSpatialDim, nChannels); }); return tGradSpixelFeats; }
7250dcf8e986f2e94f9b80e27d6e806ed2537cae.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include "../utilities.cuh" using torch::Tensor; template <typename scalar_t> __global__ void Smear_CUDA_Forward_Kernel( const scalar_t* __restrict__ spixel_feats, const int* __restrict__ index_map, scalar_t* __restrict__ pixel_feats, const int n_spixels, const int n_spatial_dim, const int n_channels ) { const int nCurrInd= blockIdx.x * blockDim.x + threadIdx.x; // ind=i*w+j const int nCurrBatch = blockIdx.y; const int nBaseInd = nCurrBatch*n_channels; const int nCopyInd = static_cast<int>(index_map[nCurrBatch * n_spatial_dim + nCurrInd]); if (nCurrInd >= n_spatial_dim) return; if (nCopyInd >= n_spixels || nCopyInd < 0) return; // Ignore all invalid cases, could be unsafe here for (int c = 0; c < n_channels; c++) { pixel_feats[(nBaseInd + c) * n_spatial_dim + nCurrInd] = spixel_feats[(nBaseInd + c) * n_spixels + nCopyInd]; } } template <typename scalar_t> __global__ void Smear_CUDA_Backward_Kernel( const scalar_t* __restrict__ grad_output, const int* __restrict__ index_map, scalar_t* __restrict__ grad_spixel_feats, const int n_spixels, const int n_spatial_dim, const int n_channels ) { const int nCurrInd= blockIdx.x * blockDim.x + threadIdx.x; // ind=i*w+j const int nCurrBatch = blockIdx.y; const int nBaseInd = nCurrBatch*n_channels; const int nCopyInd = static_cast<int>(index_map[nCurrBatch * n_spatial_dim + nCurrInd]); if (nCurrInd >= n_spatial_dim) return; if (nCopyInd >= n_spixels || nCopyInd < 0) return; // Ignore all invalid cases, could be unsafe here for (int c = 0; c < n_channels; c++) { AtomicAdd( &grad_spixel_feats[(nBaseInd + c) * n_spixels + nCopyInd], grad_output[(nBaseInd + c) * n_spatial_dim + nCurrInd] ); } } Tensor Smear_CUDA_Forward(Tensor spixel_feats, Tensor index_map, int n_spixels) { const auto nBatchSize = spixel_feats.size(0); const auto nChannels = spixel_feats.size(1); const auto nHeight = index_map.size(2); const auto nWidth = index_map.size(3); Tensor tPixelFeats = torch::zeros({nBatchSize, nChannels, nHeight, nWidth}).type_as(spixel_feats); const int nThreads = 1024; const int nSpatialDim = nHeight * nWidth; const dim3 nBlocks((nSpatialDim + nThreads - 1) / nThreads, nBatchSize); AT_DISPATCH_FLOATING_TYPES(spixel_feats.type(), "smear forward", [&] { Smear_CUDA_Forward_Kernel<scalar_t><<<nBlocks, nThreads>>>( spixel_feats.data<scalar_t>(), index_map.data<int>(), tPixelFeats.data<scalar_t>(), n_spixels, nSpatialDim, nChannels); }); return tPixelFeats; } Tensor Smear_CUDA_Backward(Tensor grad_output, Tensor index_map, int n_spixels) { const auto nBatchSize = grad_output.size(0); const auto nChannels = grad_output.size(1); const auto nHeight = index_map.size(2); const auto nWidth = index_map.size(3); Tensor tGradSpixelFeats = torch::zeros({nBatchSize, nChannels, n_spixels}).type_as(grad_output); const int nThreads = 1024; const int nSpatialDim = nHeight * nWidth; const dim3 nBlocks((nSpatialDim + nThreads - 1) / nThreads, nBatchSize); AT_DISPATCH_FLOATING_TYPES(grad_output.type(), "smear backward", [&] { Smear_CUDA_Backward_Kernel<scalar_t><<<nBlocks, nThreads>>>( grad_output.data<scalar_t>(), index_map.data<int>(), tGradSpixelFeats.data<scalar_t>(), n_spixels, nSpatialDim, nChannels); }); return tGradSpixelFeats; }
a91a982f5c13f1887b32f7885d3b71a08e171ad8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads, const Dtype* input_data, const Dtype* target, Dtype* loss, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(i, nthreads) { const int target_value = static_cast<int>(target[i]); if (has_ignore_label_ && target_value == ignore_label_) { loss[i] = 0; counts[i] = 0; } else { loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); counts[i] = 1; } } } template <typename Dtype> __global__ void SigmoidCrossEntropyLossIgnoreDiffGPU(const int count, const int ignore_label, const Dtype* target, Dtype* diff) { CUDA_KERNEL_LOOP(i, count) { const int target_value = static_cast<int>(target[i]); if (target_value == ignore_label) { diff[i] = 0; } } } template <typename Dtype> void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top, const bool preforward_flag) { // The forward pass computes the sigmoid outputs. sigmoid_bottom_vec_[0] = bottom[0]; sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_, preforward_flag); // Compute the loss (negative log likelihood) const int count = bottom[0]->count(); // Stable version of loss computation from input data const Dtype* input_data = bottom[0]->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); // Since this memory is not used for anything, we use it here to avoid having // to allocate new GPU memory to accumulate intermediate results. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); Dtype* count_data = bottom[1]->mutable_gpu_diff(); Dtype valid_count; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SigmoidCrossEntropyLossForwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, input_data, target, loss_data, has_ignore_label_, ignore_label_, count_data); // Only launch another CUDA kernel if we actually need the valid count. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(count, count_data, &valid_count); } else { valid_count = count; } Dtype loss; caffe_gpu_asum(count, loss_data, &loss); normalizer_ = get_normalizer(normalization_, valid_count); top[0]->mutable_cpu_data()[0] = loss / normalizer_; // Clear scratch memory to prevent interfering with backward (see #6202). caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); } template <typename Dtype> void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom, const bool prebackward_flag) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { // First, compute the diff const int count = bottom[0]->count(); const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_copy(count, sigmoid_output_data, bottom_diff); caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); // Zero out gradient of ignored targets. if (has_ignore_label_) { // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( SigmoidCrossEntropyLossIgnoreDiffGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, ignore_label_, target, bottom_diff); } // Scale down gradient Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_; caffe_gpu_scal(count, loss_weight, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); } // namespace caffe
a91a982f5c13f1887b32f7885d3b71a08e171ad8.cu
#include <vector> #include "caffe/layers/sigmoid_cross_entropy_loss_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void SigmoidCrossEntropyLossForwardGPU(const int nthreads, const Dtype* input_data, const Dtype* target, Dtype* loss, const bool has_ignore_label_, const int ignore_label_, Dtype* counts) { CUDA_KERNEL_LOOP(i, nthreads) { const int target_value = static_cast<int>(target[i]); if (has_ignore_label_ && target_value == ignore_label_) { loss[i] = 0; counts[i] = 0; } else { loss[i] = input_data[i] * (target[i] - (input_data[i] >= 0)) - log(1 + exp(input_data[i] - 2 * input_data[i] * (input_data[i] >= 0))); counts[i] = 1; } } } template <typename Dtype> __global__ void SigmoidCrossEntropyLossIgnoreDiffGPU(const int count, const int ignore_label, const Dtype* target, Dtype* diff) { CUDA_KERNEL_LOOP(i, count) { const int target_value = static_cast<int>(target[i]); if (target_value == ignore_label) { diff[i] = 0; } } } template <typename Dtype> void SigmoidCrossEntropyLossLayer<Dtype>::Forward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top, const bool preforward_flag) { // The forward pass computes the sigmoid outputs. sigmoid_bottom_vec_[0] = bottom[0]; sigmoid_layer_->Forward(sigmoid_bottom_vec_, sigmoid_top_vec_, preforward_flag); // Compute the loss (negative log likelihood) const int count = bottom[0]->count(); // Stable version of loss computation from input data const Dtype* input_data = bottom[0]->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); // Since this memory is not used for anything, we use it here to avoid having // to allocate new GPU memory to accumulate intermediate results. Dtype* loss_data = bottom[0]->mutable_gpu_diff(); Dtype* count_data = bottom[1]->mutable_gpu_diff(); Dtype valid_count; // NOLINT_NEXT_LINE(whitespace/operators) SigmoidCrossEntropyLossForwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, input_data, target, loss_data, has_ignore_label_, ignore_label_, count_data); // Only launch another CUDA kernel if we actually need the valid count. if (normalization_ == LossParameter_NormalizationMode_VALID && has_ignore_label_) { caffe_gpu_asum(count, count_data, &valid_count); } else { valid_count = count; } Dtype loss; caffe_gpu_asum(count, loss_data, &loss); normalizer_ = get_normalizer(normalization_, valid_count); top[0]->mutable_cpu_data()[0] = loss / normalizer_; // Clear scratch memory to prevent interfering with backward (see #6202). caffe_gpu_set(bottom[0]->count(), Dtype(0), bottom[0]->mutable_gpu_diff()); caffe_gpu_set(bottom[1]->count(), Dtype(0), bottom[1]->mutable_gpu_diff()); } template <typename Dtype> void SigmoidCrossEntropyLossLayer<Dtype>::Backward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom, const bool prebackward_flag) { if (propagate_down[1]) { LOG(FATAL) << this->type() << " Layer cannot backpropagate to label inputs."; } if (propagate_down[0]) { // First, compute the diff const int count = bottom[0]->count(); const Dtype* sigmoid_output_data = sigmoid_output_->gpu_data(); const Dtype* target = bottom[1]->gpu_data(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); caffe_copy(count, sigmoid_output_data, bottom_diff); caffe_gpu_axpy(count, Dtype(-1), target, bottom_diff); // Zero out gradient of ignored targets. if (has_ignore_label_) { // NOLINT_NEXT_LINE(whitespace/operators) SigmoidCrossEntropyLossIgnoreDiffGPU<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(count, ignore_label_, target, bottom_diff); } // Scale down gradient Dtype loss_weight = top[0]->cpu_diff()[0] / normalizer_; caffe_gpu_scal(count, loss_weight, bottom_diff); } } INSTANTIATE_LAYER_GPU_FUNCS(SigmoidCrossEntropyLossLayer); } // namespace caffe
da5cfca8b13fa08e6d693539abad88747ad2092b.hip
// !!! This is a file automatically generated by hipify!!! #include "stdafx.h" #include "Dispatcher.h" #include "ModelPotential.h" #include "ModelSimulated.h" Dispatcher::Dispatcher(void) { } Dispatcher::~Dispatcher(void) { } bool Dispatcher::CheckFileExist(const char *fname) { _finddata_t data; intptr_t nFind = _findfirst(fname,&data); if (nFind != -1) { // , _findclose(nFind); return true; } return false; } int Dispatcher::parseCommand(const char* fileNameXML, Command& command) { xml_document doc; xml_parse_result result = doc.load_file(fileNameXML); if (result) std::cout << "XML [" << fileNameXML << "] parsed without errors, attr value: [" << doc.child("node").attribute("attr").value() << "]\n\n"; else { std::cout << "XML [" << fileNameXML << "] parsed with errors, attr value: [" << doc.child("node").attribute("attr").value() << "]\n"; std::cout << "Error description: " << result.description() << "\n"; std::cout << "Error offset: " << result.offset << " (error at [..." << (fileNameXML + result.offset) << "]\n\n"; return -1; } char buffer[256]; strcpy(command.fileNameInput, doc.child("action").child("io").child("fileNameInput").child_value()); if( strlen(this->command.fileNameInput) == 0 ) { std::cerr << "Empty file input field!" << std::endl; return -1; } strcpy(command.fileNameOutput, doc.child("action").child("io").child("fileNameOutput").child_value()); if( strlen(command.fileNameOutput) == 0 ) { std::cerr << "Empty file output field!" << std::endl; return -1; } //////////////////////////////////////////////////////////////////////////////////////////////////////////// strcpy(buffer, doc.child("action").child("image").child("nx").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty nx field!" << std::endl; return -1; } try { command.nx = atoi(buffer); } catch(...) { std::cerr << "Convert nx problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("image").child("ny").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty ny field!" << std::endl; return -1; } try { command.ny = atoi(buffer); } catch(...) { std::cerr << "Convert ny problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("image").child("dpa").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty dpa field!" << std::endl; return -1; } try { command.dpa = (float) atof(buffer); } catch(...) { std::cerr << "Convert dpa problems!" << std::endl; return -1; } //////////////////////////////////////////////////////////////////////////////////////////////////////////// strcpy(buffer, doc.child("action").child("slicing").child("radiuc").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty radiuc field!" << std::endl; return -1; } try { command.radiuc = (float) atof(buffer); } catch(...) { std::cerr << "Convert radiuc problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("slicing").child("numberslices").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty numberslices field!" << std::endl; return -1; } try { command.numberSlices = atoi(buffer); } catch(...) { std::cerr << "Convert numberslices problems!" << std::endl; return -1; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// strcpy(buffer, doc.child("action").child("microscope").child("aperture").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty aperture field!" << std::endl; return -1; } try { command.aperture = (float) atof(buffer); } catch(...) { std::cerr << "Convert aperture problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("microscope").child("cs").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty cs field!" << std::endl; return -1; } try { command.cs = (float) atof(buffer); } catch(...) { std::cerr << "Convert cs problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("microscope").child("defocus").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty defocus field!" << std::endl; return -1; } try { command.defocus = (float) atof(buffer); } catch(...) { std::cerr << "Convert defocus problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("microscope").child("keV").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty keV field!" << std::endl; return -1; } try { command.keV = (float) atof(buffer); } catch(...) { std::cerr << "Convert keV problems!" << std::endl; return -1; } return 0; } int Dispatcher::Run(const char* fileNameXML) { if(!CheckFileExist(fileNameXML)) { std::cerr << "XML File with name [" << fileNameXML << "] doesn't exist." << std::endl; return -1; } else { std::cout << "XML File with name [" << fileNameXML << "] exist." << std::endl; } if( parseCommand(fileNameXML, command) == -1) { return -1; } AModel::Model *model = getModelType(command.fileNameInput); if( model->read(command.fileNameInput) == -1 ) { std::cout << "Can not read file " << command.fileNameInput << "!!!" << std::endl; return -1; } else { std::cout << "Read file model [" << command.fileNameInput << "] successful." << std::endl; } /************************************************************************/ /* Calculating map potentials *****************************************/ /************************************************************************/ std::cout << std::endl; std::cout << "Image size = " << command.nx << "x" << command.ny << std::endl; std::cout << "Number slides = " << command.numberSlices << std::endl; std::cout << "dpa = " << command.dpa << std::endl; /////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////// int deviceCount = 0; hipGetDeviceCount(&deviceCount); printf("\nDetected %d CUDA accelerators:\n", deviceCount); int dev; for (dev=0; dev < deviceCount; dev++) { hipDeviceProp_t deviceProp; hipGetDeviceProperties(&deviceProp, dev); printf(" [%d]: '%s' Clock: %.1f GHz Mem: %dMB Rev: %d.%d\n", dev, deviceProp.name, deviceProp.clockRate / 1000000.0f, deviceProp.totalGlobalMem / (1024*1024), deviceProp.major, deviceProp.minor); } int cudadev = 0; printf(" Single-threaded single-GPU test run.\n"); printf(" Opening CUDA device %d...\n\n", cudadev); hipSetDevice(cudadev); /////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////// ModelPotential *modelPotential = new ModelPotential(model, command.nx, command.ny, command.numberSlices, command.dpa, command.radiuc); if(modelPotential->calculatePotentialGrid() == -1) return -1; modelPotential->savePotential(command.fileNameOutput); ModelSimulated *modelSimulated = new ModelSimulated(modelPotential, command.nx, command.ny, command.numberSlices, command.dpa); Microscope *microscope = new Microscope(command.keV, command.cs, command.aperture, command.defocus); Image *result = new Image(command.nx, command.ny, 1, sizeof(double), 2); modelSimulated->imageCalculation(result, microscope); Image *result_module = result->getModule(); result_module->saveMRC(command.fileNameOutput, model, command.nx, command.ny, 1, mrc_FLOAT); delete result_module; delete result; delete microscope; delete modelSimulated; delete modelPotential; delete model; /************************************************************************/ /************************************************************************/ /************************************************************************/ hipDeviceReset(); std::cout << "Calculation for [" << fileNameXML << "] finished successful." << std::endl << std::endl; return 0; }
da5cfca8b13fa08e6d693539abad88747ad2092b.cu
#include "stdafx.h" #include "Dispatcher.h" #include "ModelPotential.h" #include "ModelSimulated.h" Dispatcher::Dispatcher(void) { } Dispatcher::~Dispatcher(void) { } bool Dispatcher::CheckFileExist(const char *fname) { _finddata_t data; intptr_t nFind = _findfirst(fname,&data); if (nFind != -1) { // Если этого не сделать, то произойдет утечка ресурсов _findclose(nFind); return true; } return false; } int Dispatcher::parseCommand(const char* fileNameXML, Command& command) { xml_document doc; xml_parse_result result = doc.load_file(fileNameXML); if (result) std::cout << "XML [" << fileNameXML << "] parsed without errors, attr value: [" << doc.child("node").attribute("attr").value() << "]\n\n"; else { std::cout << "XML [" << fileNameXML << "] parsed with errors, attr value: [" << doc.child("node").attribute("attr").value() << "]\n"; std::cout << "Error description: " << result.description() << "\n"; std::cout << "Error offset: " << result.offset << " (error at [..." << (fileNameXML + result.offset) << "]\n\n"; return -1; } char buffer[256]; strcpy(command.fileNameInput, doc.child("action").child("io").child("fileNameInput").child_value()); if( strlen(this->command.fileNameInput) == 0 ) { std::cerr << "Empty file input field!" << std::endl; return -1; } strcpy(command.fileNameOutput, doc.child("action").child("io").child("fileNameOutput").child_value()); if( strlen(command.fileNameOutput) == 0 ) { std::cerr << "Empty file output field!" << std::endl; return -1; } //////////////////////////////////////////////////////////////////////////////////////////////////////////// strcpy(buffer, doc.child("action").child("image").child("nx").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty nx field!" << std::endl; return -1; } try { command.nx = atoi(buffer); } catch(...) { std::cerr << "Convert nx problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("image").child("ny").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty ny field!" << std::endl; return -1; } try { command.ny = atoi(buffer); } catch(...) { std::cerr << "Convert ny problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("image").child("dpa").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty dpa field!" << std::endl; return -1; } try { command.dpa = (float) atof(buffer); } catch(...) { std::cerr << "Convert dpa problems!" << std::endl; return -1; } //////////////////////////////////////////////////////////////////////////////////////////////////////////// strcpy(buffer, doc.child("action").child("slicing").child("radiuc").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty radiuc field!" << std::endl; return -1; } try { command.radiuc = (float) atof(buffer); } catch(...) { std::cerr << "Convert radiuc problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("slicing").child("numberslices").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty numberslices field!" << std::endl; return -1; } try { command.numberSlices = atoi(buffer); } catch(...) { std::cerr << "Convert numberslices problems!" << std::endl; return -1; } ///////////////////////////////////////////////////////////////////////////////////////////////////////////// strcpy(buffer, doc.child("action").child("microscope").child("aperture").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty aperture field!" << std::endl; return -1; } try { command.aperture = (float) atof(buffer); } catch(...) { std::cerr << "Convert aperture problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("microscope").child("cs").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty cs field!" << std::endl; return -1; } try { command.cs = (float) atof(buffer); } catch(...) { std::cerr << "Convert cs problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("microscope").child("defocus").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty defocus field!" << std::endl; return -1; } try { command.defocus = (float) atof(buffer); } catch(...) { std::cerr << "Convert defocus problems!" << std::endl; return -1; } strcpy(buffer, doc.child("action").child("microscope").child("keV").child_value() ); if( strlen(buffer) == 0 ) { std::cerr << "Empty keV field!" << std::endl; return -1; } try { command.keV = (float) atof(buffer); } catch(...) { std::cerr << "Convert keV problems!" << std::endl; return -1; } return 0; } int Dispatcher::Run(const char* fileNameXML) { if(!CheckFileExist(fileNameXML)) { std::cerr << "XML File with name [" << fileNameXML << "] doesn't exist." << std::endl; return -1; } else { std::cout << "XML File with name [" << fileNameXML << "] exist." << std::endl; } if( parseCommand(fileNameXML, command) == -1) { return -1; } AModel::Model *model = getModelType(command.fileNameInput); if( model->read(command.fileNameInput) == -1 ) { std::cout << "Can not read file " << command.fileNameInput << "!!!" << std::endl; return -1; } else { std::cout << "Read file model [" << command.fileNameInput << "] successful." << std::endl; } /************************************************************************/ /* Calculating map potentials *****************************************/ /************************************************************************/ std::cout << std::endl; std::cout << "Image size = " << command.nx << "x" << command.ny << std::endl; std::cout << "Number slides = " << command.numberSlices << std::endl; std::cout << "dpa = " << command.dpa << std::endl; /////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////// int deviceCount = 0; cudaGetDeviceCount(&deviceCount); printf("\nDetected %d CUDA accelerators:\n", deviceCount); int dev; for (dev=0; dev < deviceCount; dev++) { cudaDeviceProp deviceProp; cudaGetDeviceProperties(&deviceProp, dev); printf(" [%d]: '%s' Clock: %.1f GHz Mem: %dMB Rev: %d.%d\n", dev, deviceProp.name, deviceProp.clockRate / 1000000.0f, deviceProp.totalGlobalMem / (1024*1024), deviceProp.major, deviceProp.minor); } int cudadev = 0; printf(" Single-threaded single-GPU test run.\n"); printf(" Opening CUDA device %d...\n\n", cudadev); cudaSetDevice(cudadev); /////////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////////// ModelPotential *modelPotential = new ModelPotential(model, command.nx, command.ny, command.numberSlices, command.dpa, command.radiuc); if(modelPotential->calculatePotentialGrid() == -1) return -1; modelPotential->savePotential(command.fileNameOutput); ModelSimulated *modelSimulated = new ModelSimulated(modelPotential, command.nx, command.ny, command.numberSlices, command.dpa); Microscope *microscope = new Microscope(command.keV, command.cs, command.aperture, command.defocus); Image *result = new Image(command.nx, command.ny, 1, sizeof(double), 2); modelSimulated->imageCalculation(result, microscope); Image *result_module = result->getModule(); result_module->saveMRC(command.fileNameOutput, model, command.nx, command.ny, 1, mrc_FLOAT); delete result_module; delete result; delete microscope; delete modelSimulated; delete modelPotential; delete model; /************************************************************************/ /************************************************************************/ /************************************************************************/ cudaDeviceReset(); std::cout << "Calculation for [" << fileNameXML << "] finished successful." << std::endl << std::endl; return 0; }
8033039cdd23bbad332f7ef0d6aacdd9662a6ada.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "pfc_cuda_device_info.h" #include "util.hpp" #include "host.hpp" #include "device_host.hpp" #include <hip/hip_vector_types.h> #include <stdio.h> #include <thread> #include <iostream> #include <complex> using namespace std::literals; auto const cpu_count = std::max<int>(1, std::thread::hardware_concurrency()); auto const run_count = 3; __constant__ auto const g_block_size = 64; __constant__ auto const g_points = 75000; auto const g_grid_size = grid_size( g_block_size, {g_points , 1, 1} ); auto const point_count = 100; int main() { try { int count{0}; mpv_exception::check(hipGetDeviceCount(&count)); if (count > 0) { hipSetDevice(0); auto const deviceInfo{pfc::cuda::get_device_info()}; auto const deviceProps{pfc::cuda::get_device_props()}; std::cout << "Device : " << deviceProps.name << std::endl; std::cout << "Compute capability: " << deviceInfo.cc_major << "." << deviceInfo.cc_minor << std::endl; std::cout << "Arch : " << deviceInfo.uarch << std::endl; std::cout << std::endl; auto const tib{g_block_size}; //threads in block auto const big{(g_points + tib - 1) / tib}; //blocks in grid + round auto hp_points{std::make_unique<float3[]>(g_points)}; auto hp_result{std::make_unique<int[]>(g_points)}; // create data points create_data(g_points, hp_points.get(), 1, 10000); // Allocate on device std::cout << "Allocating memory on device ..." << std::endl; float3* dp_points = CUDA_MALLOC(float3, g_points); int* dp_result = CUDA_MALLOC(int, g_points); std::cout << "Calculating distances on device (block size " << g_block_size << ", " << run_count << " runs) ..." << std::endl << std::endl; auto const duration_gpu = mpv_runtime::run_with_measure(run_count, [&] { CUDA_MEMCPY(dp_points, hp_points.get(), g_points, hipMemcpyHostToDevice); find_all_closest_GPU << <big, tib >> >( g_points, dp_points, dp_result); hipDeviceSynchronize(); mpv_exception::check(hipGetLastError()); CUDA_MEMCPY(hp_result.get(), dp_result, g_points, hipMemcpyDeviceToHost); }); std::cout << "GPU time (average of " << run_count << " runs): " << std::chrono::duration_cast<std::chrono::milliseconds>(duration_gpu).count() << " milliseconds" << std::endl << std::endl; std::cout << "Warming up CPU ..." << std::endl << std::endl; mpv_threading::warm_up_cpu(5s); std::cout << "Calculating distances on host (" << cpu_count << " threads, " << run_count << " runs ) ..." << std:: endl << std::endl; auto chunk{(g_points + cpu_count - 1) / cpu_count}; auto const duration_cpu = mpv_runtime::run_with_measure(run_count, [&] { std::vector<std::future<void>> task_group; for (auto i = 0; i < cpu_count; i++) { auto index{i}; task_group.push_back(std::async(std::launch::async, [&] { find_all_closest_CPU( g_points, hp_points.get(), hp_result.get(), std::make_pair( index * chunk, (index + 1) * chunk)); })); } for (auto& f : task_group) { f.get(); } }); std::cout << "CPU time (average of " << run_count << " runs): " << std::chrono::duration_cast<std::chrono::milliseconds>(duration_cpu).count() << " milliseconds" << std::endl << std::endl; std::cout << "Speedup: " << mpv_runtime::speedup(duration_cpu, duration_gpu) << std::endl; } } catch (std::exception const& x) { std::cerr << x.what() << std::endl; } }
8033039cdd23bbad332f7ef0d6aacdd9662a6ada.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include "pfc_cuda_device_info.h" #include "util.hpp" #include "host.hpp" #include "device_host.hpp" #include <vector_types.h> #include <stdio.h> #include <thread> #include <iostream> #include <complex> using namespace std::literals; auto const cpu_count = std::max<int>(1, std::thread::hardware_concurrency()); auto const run_count = 3; __constant__ auto const g_block_size = 64; __constant__ auto const g_points = 75000; auto const g_grid_size = grid_size( g_block_size, {g_points , 1, 1} ); auto const point_count = 100; int main() { try { int count{0}; mpv_exception::check(cudaGetDeviceCount(&count)); if (count > 0) { cudaSetDevice(0); auto const deviceInfo{pfc::cuda::get_device_info()}; auto const deviceProps{pfc::cuda::get_device_props()}; std::cout << "Device : " << deviceProps.name << std::endl; std::cout << "Compute capability: " << deviceInfo.cc_major << "." << deviceInfo.cc_minor << std::endl; std::cout << "Arch : " << deviceInfo.uarch << std::endl; std::cout << std::endl; auto const tib{g_block_size}; //threads in block auto const big{(g_points + tib - 1) / tib}; //blocks in grid + round auto hp_points{std::make_unique<float3[]>(g_points)}; auto hp_result{std::make_unique<int[]>(g_points)}; // create data points create_data(g_points, hp_points.get(), 1, 10000); // Allocate on device std::cout << "Allocating memory on device ..." << std::endl; float3* dp_points = CUDA_MALLOC(float3, g_points); int* dp_result = CUDA_MALLOC(int, g_points); std::cout << "Calculating distances on device (block size " << g_block_size << ", " << run_count << " runs) ..." << std::endl << std::endl; auto const duration_gpu = mpv_runtime::run_with_measure(run_count, [&] { CUDA_MEMCPY(dp_points, hp_points.get(), g_points, cudaMemcpyHostToDevice); find_all_closest_GPU << <big, tib >> >( g_points, dp_points, dp_result); cudaDeviceSynchronize(); mpv_exception::check(cudaGetLastError()); CUDA_MEMCPY(hp_result.get(), dp_result, g_points, cudaMemcpyDeviceToHost); }); std::cout << "GPU time (average of " << run_count << " runs): " << std::chrono::duration_cast<std::chrono::milliseconds>(duration_gpu).count() << " milliseconds" << std::endl << std::endl; std::cout << "Warming up CPU ..." << std::endl << std::endl; mpv_threading::warm_up_cpu(5s); std::cout << "Calculating distances on host (" << cpu_count << " threads, " << run_count << " runs ) ..." << std:: endl << std::endl; auto chunk{(g_points + cpu_count - 1) / cpu_count}; auto const duration_cpu = mpv_runtime::run_with_measure(run_count, [&] { std::vector<std::future<void>> task_group; for (auto i = 0; i < cpu_count; i++) { auto index{i}; task_group.push_back(std::async(std::launch::async, [&] { find_all_closest_CPU( g_points, hp_points.get(), hp_result.get(), std::make_pair( index * chunk, (index + 1) * chunk)); })); } for (auto& f : task_group) { f.get(); } }); std::cout << "CPU time (average of " << run_count << " runs): " << std::chrono::duration_cast<std::chrono::milliseconds>(duration_cpu).count() << " milliseconds" << std::endl << std::endl; std::cout << "Speedup: " << mpv_runtime::speedup(duration_cpu, duration_gpu) << std::endl; } } catch (std::exception const& x) { std::cerr << x.what() << std::endl; } }
f087845bf4b5c6e0f57aa972db7cbaf547552f8b.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Parallel reduction This sample shows how to perform a reduction operation on an array of values to produce a single value in a single kernel (as opposed to two or more kernel calls as shown in the "reduction" CUDA Sample). Single-pass reduction requires global atomic instructions (Compute Capability 1.1 or later) and the __threadfence() intrinsic (CUDA 2.2 or later). Reductions are a very common computation in parallel algorithms. Any time an array of values needs to be reduced to a single value using a binary associative operator, a reduction can be used. Example applications include statistics computations such as mean and standard deviation, and image processing applications such as finding the total luminance of an image. This code performs sum reductions, but any associative operator such as min() or max() could also be used. It assumes the input size is a power of 2. COMMAND LINE ARGUMENTS "--shmoo": Test performance for 1 to 32M elements with each of the 7 different kernels "--n=<N>": Specify the number of elements to reduce (default 1048576) "--threads=<N>": Specify the number of threads per block (default 128) "--maxblocks=<N>": Specify the maximum number of thread blocks to launch (kernel 6 only, default 64) "--cpufinal": Read back the per-block results and do final sum of block sums on CPU (default false) "--cputhresh=<N>": The threshold of number of blocks sums below which to perform a CPU final reduction (default 1) "--multipass": Use a multipass reduction instead of a single-pass reduction */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <helper_functions.h> #include <helper_cuda.h> #define VERSION_MAJOR (CUDART_VERSION/1000) #define VERSION_MINOR (CUDART_VERSION%100)/10 const char *sSDKsample = "threadFenceReduction"; #if CUDART_VERSION >= 2020 #include "threadFenceReduction_kernel.cuh" #else #pragma comment(user, "CUDA 2.2 is required to build for threadFenceReduction") #endif //////////////////////////////////////////////////////////////////////////////// // declaration, forward bool runTest(int argc, char **argv); extern "C" { void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata); void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata); } #if CUDART_VERSION < 2020 void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata) { printf("reduce(), compiler not supported, aborting tests\n"); } void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata) { printf("reduceSinglePass(), compiler not supported, aborting tests\n"); } #endif //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { hipDeviceProp_t deviceProp; deviceProp.major = 0; deviceProp.minor = 0; //int dev; printf("%s Starting...\n\n", sSDKsample); //dev = findCudaDevice(argc, (const char **)argv); //checkCudaErrors(hipGetDeviceProperties(&deviceProp, dev)); //printf("GPU Device supports SM %d.%d compute capability\n\n", deviceProp.major, deviceProp.minor); int dev = 0; if(argc == 2) { dev = atoi(argv[1]); } printf("select device : %d\n", dev); hipSetDevice(dev); hipError_t error; //hipDeviceProp_t deviceProp; error = hipGetDeviceProperties(&deviceProp, dev); if (error != hipSuccess) { printf("hipGetDeviceProperties returned error %s (code %d), line(%d)\n", hipGetErrorString(error), error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", dev, deviceProp.name, deviceProp.major, deviceProp.minor); } bool bTestResult = false; #if CUDART_VERSION >= 2020 bTestResult = runTest(argc, argv); #else print_NVCC_min_spec(sSDKsample, "2.2", "Version 185"); exit(EXIT_SUCCESS); #endif exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE); } //////////////////////////////////////////////////////////////////////////////// //! Compute sum reduction on CPU //! We use Kahan summation for an accurate sum of large arrays. //! http://en.wikipedia.org/wiki/Kahan_summation_algorithm //! //! @param data pointer to input data //! @param size number of input data elements //////////////////////////////////////////////////////////////////////////////// template<class T> T reduceCPU(T *data, int size) { T sum = data[0]; T c = (T)0.0; for (int i = 1; i < size; i++) { T y = data[i] - c; T t = sum + y; c = (t - sum) - y; sum = t; } return sum; } unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } //////////////////////////////////////////////////////////////////////////////// // Compute the number of threads and blocks to use for the reduction // We set threads / block to the minimum of maxThreads and n/2. //////////////////////////////////////////////////////////////////////////////// void getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (n == 1) { threads = 1; blocks = 1; } else { threads = (n < maxThreads*2) ? nextPow2(n / 2) : maxThreads; blocks = max(1, n / (threads * 2)); } blocks = min(maxBlocks, blocks); } //////////////////////////////////////////////////////////////////////////////// // This function performs a reduction of the input data multiple times and // measures the average reduction time. //////////////////////////////////////////////////////////////////////////////// float benchmarkReduce(int n, int numThreads, int numBlocks, int maxThreads, int maxBlocks, int testIterations, bool multiPass, bool cpuFinalReduction, int cpuFinalThreshold, StopWatchInterface *timer, float *h_odata, float *d_idata, float *d_odata) { float gpu_result = 0; bool bNeedReadback = true; hipError_t error; for (int i = 0; i < testIterations; ++i) { gpu_result = 0; unsigned int retCnt = 0; error = setRetirementCount(retCnt); checkCudaErrors(error); hipDeviceSynchronize(); sdkStartTimer(&timer); if (multiPass) { // execute the kernel reduce(n, numThreads, numBlocks, d_idata, d_odata); // check if kernel execution generated an error getLastCudaError("Kernel execution failed"); if (cpuFinalReduction) { // sum partial sums from each block on CPU // copy result from device to host error = hipMemcpy(h_odata, d_odata, numBlocks*sizeof(float), hipMemcpyDeviceToHost); checkCudaErrors(error); for (int i=0; i<numBlocks; i++) { gpu_result += h_odata[i]; } bNeedReadback = false; } else { // sum partial block sums on GPU int s=numBlocks; while (s > cpuFinalThreshold) { int threads = 0, blocks = 0; getNumBlocksAndThreads(s, maxBlocks, maxThreads, blocks, threads); reduce(s, threads, blocks, d_odata, d_odata); s = s / (threads*2); } if (s > 1) { // copy result from device to host error = hipMemcpy(h_odata, d_odata, s * sizeof(float), hipMemcpyDeviceToHost); checkCudaErrors(error); for (int i=0; i < s; i++) { gpu_result += h_odata[i]; } bNeedReadback = false; } } } else { getLastCudaError("Kernel execution failed"); // execute the kernel reduceSinglePass(n, numThreads, numBlocks, d_idata, d_odata); // check if kernel execution generated an error getLastCudaError("Kernel execution failed"); } hipDeviceSynchronize(); sdkStopTimer(&timer); } if (bNeedReadback) { // copy final sum from device to host error = hipMemcpy(&gpu_result, d_odata, sizeof(float), hipMemcpyDeviceToHost); checkCudaErrors(error); } return gpu_result; } //////////////////////////////////////////////////////////////////////////////// // This function calls benchmarkReduce multiple times for a range of array sizes // and prints a report in CSV (comma-separated value) format that can be used for // generating a "shmoo" plot showing the performance for each kernel variation // over a wide range of input sizes. //////////////////////////////////////////////////////////////////////////////// void shmoo(int minN, int maxN, int maxThreads, int maxBlocks) { // create random input data on CPU unsigned int bytes = maxN * sizeof(float); float *h_idata = (float *) malloc(bytes); for (int i = 0; i < maxN; i++) { // Keep the numbers small so we don't get truncation error in the sum h_idata[i] = (rand() & 0xFF) / (float)RAND_MAX; } int maxNumBlocks = min(65535, maxN / maxThreads); // allocate mem for the result on host side float *h_odata = (float *) malloc(maxNumBlocks*sizeof(float)); // allocate device memory and data float *d_idata = NULL; float *d_odata = NULL; checkCudaErrors(hipMalloc((void **) &d_idata, bytes)); checkCudaErrors(hipMalloc((void **) &d_odata, maxNumBlocks*sizeof(float))); // copy data directly to device memory checkCudaErrors(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_odata, h_idata, maxNumBlocks*sizeof(float), hipMemcpyHostToDevice)); // warm-up reduce(maxN, maxThreads, maxNumBlocks, d_idata, d_odata); int testIterations = 100; StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); // print headers printf("N, %d blocks one pass, %d blocks multipass\n", maxBlocks, maxBlocks); for (int i = minN; i <= maxN; i *= 2) { printf("%d, ", i); for (int multiPass = 0; multiPass <= 1; multiPass++) { sdkResetTimer(&timer); int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(i, maxBlocks, maxThreads, numBlocks, numThreads); benchmarkReduce(i, numThreads, numBlocks, maxThreads, maxBlocks, testIterations, multiPass==1, false, 1, timer, h_odata, d_idata, d_odata); float reduceTime = sdkGetAverageTimerValue(&timer); printf("%f%s", reduceTime, multiPass==0 ? ", " : "\n"); } } printf("\n"); // cleanup sdkDeleteTimer(&timer); free(h_idata); free(h_odata); hipFree(d_idata); hipFree(d_odata); } //////////////////////////////////////////////////////////////////////////////// // The main function which runs the reduction test. //////////////////////////////////////////////////////////////////////////////// bool runTest(int argc, char **argv) { int size = 1<<20; // number of elements to reduce int maxThreads = 128; // number of threads per block int maxBlocks = 64; bool cpuFinalReduction = false; int cpuFinalThreshold = 1; bool multipass = false; bool bTestResult = false; if (checkCmdLineFlag(argc, (const char **) argv, "b")) { size = getCmdLineArgumentInt(argc, (const char **)argv, "b"); } if (checkCmdLineFlag(argc, (const char **) argv, "threads")) { maxThreads = getCmdLineArgumentInt(argc, (const char **)argv, "threads"); } if (checkCmdLineFlag(argc, (const char **) argv, "maxblocks")) { maxBlocks = getCmdLineArgumentInt(argc, (const char **)argv, "maxblocks"); } printf("%d elements\n", size); printf("%d threads (max)\n", maxThreads); cpuFinalReduction = checkCmdLineFlag(argc, (const char **) argv, "cpufinal"); multipass = checkCmdLineFlag(argc, (const char **) argv, "multipass"); if (checkCmdLineFlag(argc, (const char **) argv, "cputhresh")) { cpuFinalThreshold = getCmdLineArgumentInt(argc, (const char **) argv, "cputhresh"); } bool runShmoo = checkCmdLineFlag(argc, (const char **) argv, "shmoo"); if (runShmoo) { shmoo(1, 33554432, maxThreads, maxBlocks); } else { // create random input data on CPU unsigned int bytes = size * sizeof(float); float *h_idata = (float *) malloc(bytes); for (int i=0; i<size; i++) { // Keep the numbers small so we don't get truncation error in the sum h_idata[i] = (rand() & 0xFF) / (float)RAND_MAX; } int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(size, maxBlocks, maxThreads, numBlocks, numThreads); if (numBlocks == 1) { cpuFinalThreshold = 1; } // allocate mem for the result on host side float *h_odata = (float *) malloc(numBlocks*sizeof(float)); printf("%d blocks\n", numBlocks); // allocate device memory and data float *d_idata = NULL; float *d_odata = NULL; checkCudaErrors(hipMalloc((void **) &d_idata, bytes)); checkCudaErrors(hipMalloc((void **) &d_odata, numBlocks*sizeof(float))); // copy data directly to device memory checkCudaErrors(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice)); checkCudaErrors(hipMemcpy(d_odata, h_idata, numBlocks*sizeof(float), hipMemcpyHostToDevice)); // warm-up reduce(size, numThreads, numBlocks, d_idata, d_odata); int testIterations = 100; StopWatchInterface *timer = 0; sdkCreateTimer(&timer); float gpu_result = 0; gpu_result = benchmarkReduce(size, numThreads, numBlocks, maxThreads, maxBlocks, testIterations, multipass, cpuFinalReduction, cpuFinalThreshold, timer, h_odata, d_idata, d_odata); float reduceTime = sdkGetAverageTimerValue(&timer); printf("Average time: %f ms\n", reduceTime); printf("Bandwidth: %f GB/s\n\n", (size * sizeof(int)) / (reduceTime * 1.0e6)); // compute reference solution float cpu_result = reduceCPU<float>(h_idata, size); printf("GPU result = %0.12f\n", gpu_result); printf("CPU result = %0.12f\n", cpu_result); double threshold = 1e-8 * size; double diff = abs((double)gpu_result - (double)cpu_result); bTestResult = (diff < threshold); // cleanup sdkDeleteTimer(&timer); free(h_idata); free(h_odata); hipFree(d_idata); hipFree(d_odata); } return bTestResult; }
f087845bf4b5c6e0f57aa972db7cbaf547552f8b.cu
/** * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Parallel reduction This sample shows how to perform a reduction operation on an array of values to produce a single value in a single kernel (as opposed to two or more kernel calls as shown in the "reduction" CUDA Sample). Single-pass reduction requires global atomic instructions (Compute Capability 1.1 or later) and the __threadfence() intrinsic (CUDA 2.2 or later). Reductions are a very common computation in parallel algorithms. Any time an array of values needs to be reduced to a single value using a binary associative operator, a reduction can be used. Example applications include statistics computations such as mean and standard deviation, and image processing applications such as finding the total luminance of an image. This code performs sum reductions, but any associative operator such as min() or max() could also be used. It assumes the input size is a power of 2. COMMAND LINE ARGUMENTS "--shmoo": Test performance for 1 to 32M elements with each of the 7 different kernels "--n=<N>": Specify the number of elements to reduce (default 1048576) "--threads=<N>": Specify the number of threads per block (default 128) "--maxblocks=<N>": Specify the maximum number of thread blocks to launch (kernel 6 only, default 64) "--cpufinal": Read back the per-block results and do final sum of block sums on CPU (default false) "--cputhresh=<N>": The threshold of number of blocks sums below which to perform a CPU final reduction (default 1) "--multipass": Use a multipass reduction instead of a single-pass reduction */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <helper_functions.h> #include <helper_cuda.h> #define VERSION_MAJOR (CUDART_VERSION/1000) #define VERSION_MINOR (CUDART_VERSION%100)/10 const char *sSDKsample = "threadFenceReduction"; #if CUDART_VERSION >= 2020 #include "threadFenceReduction_kernel.cuh" #else #pragma comment(user, "CUDA 2.2 is required to build for threadFenceReduction") #endif //////////////////////////////////////////////////////////////////////////////// // declaration, forward bool runTest(int argc, char **argv); extern "C" { void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata); void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata); } #if CUDART_VERSION < 2020 void reduce(int size, int threads, int blocks, float *d_idata, float *d_odata) { printf("reduce(), compiler not supported, aborting tests\n"); } void reduceSinglePass(int size, int threads, int blocks, float *d_idata, float *d_odata) { printf("reduceSinglePass(), compiler not supported, aborting tests\n"); } #endif //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char **argv) { cudaDeviceProp deviceProp; deviceProp.major = 0; deviceProp.minor = 0; //int dev; printf("%s Starting...\n\n", sSDKsample); //dev = findCudaDevice(argc, (const char **)argv); //checkCudaErrors(cudaGetDeviceProperties(&deviceProp, dev)); //printf("GPU Device supports SM %d.%d compute capability\n\n", deviceProp.major, deviceProp.minor); int dev = 0; if(argc == 2) { dev = atoi(argv[1]); } printf("select device : %d\n", dev); cudaSetDevice(dev); cudaError_t error; //cudaDeviceProp deviceProp; error = cudaGetDeviceProperties(&deviceProp, dev); if (error != cudaSuccess) { printf("cudaGetDeviceProperties returned error %s (code %d), line(%d)\n", cudaGetErrorString(error), error, __LINE__); } else { printf("GPU Device %d: \"%s\" with compute capability %d.%d\n\n", dev, deviceProp.name, deviceProp.major, deviceProp.minor); } bool bTestResult = false; #if CUDART_VERSION >= 2020 bTestResult = runTest(argc, argv); #else print_NVCC_min_spec(sSDKsample, "2.2", "Version 185"); exit(EXIT_SUCCESS); #endif exit(bTestResult ? EXIT_SUCCESS : EXIT_FAILURE); } //////////////////////////////////////////////////////////////////////////////// //! Compute sum reduction on CPU //! We use Kahan summation for an accurate sum of large arrays. //! http://en.wikipedia.org/wiki/Kahan_summation_algorithm //! //! @param data pointer to input data //! @param size number of input data elements //////////////////////////////////////////////////////////////////////////////// template<class T> T reduceCPU(T *data, int size) { T sum = data[0]; T c = (T)0.0; for (int i = 1; i < size; i++) { T y = data[i] - c; T t = sum + y; c = (t - sum) - y; sum = t; } return sum; } unsigned int nextPow2(unsigned int x) { --x; x |= x >> 1; x |= x >> 2; x |= x >> 4; x |= x >> 8; x |= x >> 16; return ++x; } //////////////////////////////////////////////////////////////////////////////// // Compute the number of threads and blocks to use for the reduction // We set threads / block to the minimum of maxThreads and n/2. //////////////////////////////////////////////////////////////////////////////// void getNumBlocksAndThreads(int n, int maxBlocks, int maxThreads, int &blocks, int &threads) { if (n == 1) { threads = 1; blocks = 1; } else { threads = (n < maxThreads*2) ? nextPow2(n / 2) : maxThreads; blocks = max(1, n / (threads * 2)); } blocks = min(maxBlocks, blocks); } //////////////////////////////////////////////////////////////////////////////// // This function performs a reduction of the input data multiple times and // measures the average reduction time. //////////////////////////////////////////////////////////////////////////////// float benchmarkReduce(int n, int numThreads, int numBlocks, int maxThreads, int maxBlocks, int testIterations, bool multiPass, bool cpuFinalReduction, int cpuFinalThreshold, StopWatchInterface *timer, float *h_odata, float *d_idata, float *d_odata) { float gpu_result = 0; bool bNeedReadback = true; cudaError_t error; for (int i = 0; i < testIterations; ++i) { gpu_result = 0; unsigned int retCnt = 0; error = setRetirementCount(retCnt); checkCudaErrors(error); cudaDeviceSynchronize(); sdkStartTimer(&timer); if (multiPass) { // execute the kernel reduce(n, numThreads, numBlocks, d_idata, d_odata); // check if kernel execution generated an error getLastCudaError("Kernel execution failed"); if (cpuFinalReduction) { // sum partial sums from each block on CPU // copy result from device to host error = cudaMemcpy(h_odata, d_odata, numBlocks*sizeof(float), cudaMemcpyDeviceToHost); checkCudaErrors(error); for (int i=0; i<numBlocks; i++) { gpu_result += h_odata[i]; } bNeedReadback = false; } else { // sum partial block sums on GPU int s=numBlocks; while (s > cpuFinalThreshold) { int threads = 0, blocks = 0; getNumBlocksAndThreads(s, maxBlocks, maxThreads, blocks, threads); reduce(s, threads, blocks, d_odata, d_odata); s = s / (threads*2); } if (s > 1) { // copy result from device to host error = cudaMemcpy(h_odata, d_odata, s * sizeof(float), cudaMemcpyDeviceToHost); checkCudaErrors(error); for (int i=0; i < s; i++) { gpu_result += h_odata[i]; } bNeedReadback = false; } } } else { getLastCudaError("Kernel execution failed"); // execute the kernel reduceSinglePass(n, numThreads, numBlocks, d_idata, d_odata); // check if kernel execution generated an error getLastCudaError("Kernel execution failed"); } cudaDeviceSynchronize(); sdkStopTimer(&timer); } if (bNeedReadback) { // copy final sum from device to host error = cudaMemcpy(&gpu_result, d_odata, sizeof(float), cudaMemcpyDeviceToHost); checkCudaErrors(error); } return gpu_result; } //////////////////////////////////////////////////////////////////////////////// // This function calls benchmarkReduce multiple times for a range of array sizes // and prints a report in CSV (comma-separated value) format that can be used for // generating a "shmoo" plot showing the performance for each kernel variation // over a wide range of input sizes. //////////////////////////////////////////////////////////////////////////////// void shmoo(int minN, int maxN, int maxThreads, int maxBlocks) { // create random input data on CPU unsigned int bytes = maxN * sizeof(float); float *h_idata = (float *) malloc(bytes); for (int i = 0; i < maxN; i++) { // Keep the numbers small so we don't get truncation error in the sum h_idata[i] = (rand() & 0xFF) / (float)RAND_MAX; } int maxNumBlocks = min(65535, maxN / maxThreads); // allocate mem for the result on host side float *h_odata = (float *) malloc(maxNumBlocks*sizeof(float)); // allocate device memory and data float *d_idata = NULL; float *d_odata = NULL; checkCudaErrors(cudaMalloc((void **) &d_idata, bytes)); checkCudaErrors(cudaMalloc((void **) &d_odata, maxNumBlocks*sizeof(float))); // copy data directly to device memory checkCudaErrors(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_odata, h_idata, maxNumBlocks*sizeof(float), cudaMemcpyHostToDevice)); // warm-up reduce(maxN, maxThreads, maxNumBlocks, d_idata, d_odata); int testIterations = 100; StopWatchInterface *timer = NULL; sdkCreateTimer(&timer); // print headers printf("N, %d blocks one pass, %d blocks multipass\n", maxBlocks, maxBlocks); for (int i = minN; i <= maxN; i *= 2) { printf("%d, ", i); for (int multiPass = 0; multiPass <= 1; multiPass++) { sdkResetTimer(&timer); int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(i, maxBlocks, maxThreads, numBlocks, numThreads); benchmarkReduce(i, numThreads, numBlocks, maxThreads, maxBlocks, testIterations, multiPass==1, false, 1, timer, h_odata, d_idata, d_odata); float reduceTime = sdkGetAverageTimerValue(&timer); printf("%f%s", reduceTime, multiPass==0 ? ", " : "\n"); } } printf("\n"); // cleanup sdkDeleteTimer(&timer); free(h_idata); free(h_odata); cudaFree(d_idata); cudaFree(d_odata); } //////////////////////////////////////////////////////////////////////////////// // The main function which runs the reduction test. //////////////////////////////////////////////////////////////////////////////// bool runTest(int argc, char **argv) { int size = 1<<20; // number of elements to reduce int maxThreads = 128; // number of threads per block int maxBlocks = 64; bool cpuFinalReduction = false; int cpuFinalThreshold = 1; bool multipass = false; bool bTestResult = false; if (checkCmdLineFlag(argc, (const char **) argv, "b")) { size = getCmdLineArgumentInt(argc, (const char **)argv, "b"); } if (checkCmdLineFlag(argc, (const char **) argv, "threads")) { maxThreads = getCmdLineArgumentInt(argc, (const char **)argv, "threads"); } if (checkCmdLineFlag(argc, (const char **) argv, "maxblocks")) { maxBlocks = getCmdLineArgumentInt(argc, (const char **)argv, "maxblocks"); } printf("%d elements\n", size); printf("%d threads (max)\n", maxThreads); cpuFinalReduction = checkCmdLineFlag(argc, (const char **) argv, "cpufinal"); multipass = checkCmdLineFlag(argc, (const char **) argv, "multipass"); if (checkCmdLineFlag(argc, (const char **) argv, "cputhresh")) { cpuFinalThreshold = getCmdLineArgumentInt(argc, (const char **) argv, "cputhresh"); } bool runShmoo = checkCmdLineFlag(argc, (const char **) argv, "shmoo"); if (runShmoo) { shmoo(1, 33554432, maxThreads, maxBlocks); } else { // create random input data on CPU unsigned int bytes = size * sizeof(float); float *h_idata = (float *) malloc(bytes); for (int i=0; i<size; i++) { // Keep the numbers small so we don't get truncation error in the sum h_idata[i] = (rand() & 0xFF) / (float)RAND_MAX; } int numBlocks = 0; int numThreads = 0; getNumBlocksAndThreads(size, maxBlocks, maxThreads, numBlocks, numThreads); if (numBlocks == 1) { cpuFinalThreshold = 1; } // allocate mem for the result on host side float *h_odata = (float *) malloc(numBlocks*sizeof(float)); printf("%d blocks\n", numBlocks); // allocate device memory and data float *d_idata = NULL; float *d_odata = NULL; checkCudaErrors(cudaMalloc((void **) &d_idata, bytes)); checkCudaErrors(cudaMalloc((void **) &d_odata, numBlocks*sizeof(float))); // copy data directly to device memory checkCudaErrors(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(d_odata, h_idata, numBlocks*sizeof(float), cudaMemcpyHostToDevice)); // warm-up reduce(size, numThreads, numBlocks, d_idata, d_odata); int testIterations = 100; StopWatchInterface *timer = 0; sdkCreateTimer(&timer); float gpu_result = 0; gpu_result = benchmarkReduce(size, numThreads, numBlocks, maxThreads, maxBlocks, testIterations, multipass, cpuFinalReduction, cpuFinalThreshold, timer, h_odata, d_idata, d_odata); float reduceTime = sdkGetAverageTimerValue(&timer); printf("Average time: %f ms\n", reduceTime); printf("Bandwidth: %f GB/s\n\n", (size * sizeof(int)) / (reduceTime * 1.0e6)); // compute reference solution float cpu_result = reduceCPU<float>(h_idata, size); printf("GPU result = %0.12f\n", gpu_result); printf("CPU result = %0.12f\n", cpu_result); double threshold = 1e-8 * size; double diff = abs((double)gpu_result - (double)cpu_result); bTestResult = (diff < threshold); // cleanup sdkDeleteTimer(&timer); free(h_idata); free(h_odata); cudaFree(d_idata); cudaFree(d_odata); } return bTestResult; }
77a0b123b12a3ba53ebe5e0468be3b4f07e2b0af.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * Global Memory (Linear Array) using Unified Memory */ #include <stdio.h> #include <stdlib.h> void check_cuda_errors() { hipError_t rc; rc = hipGetLastError(); if (rc != hipSuccess) { printf("Last CUDA error %s\n", hipGetErrorString(rc)); } } __global__ void incrementor(int* numbers) { numbers[threadIdx.x]++; } int main(int argc, char **argv) { int *device_mem; int i, num_elements; // Ask user for number of elements printf("How many elements to increment? "); scanf("%d", &num_elements); // Seed our RNG srand(0); // "Malloc" device memory hipMallocManaged((void **)&device_mem, num_elements * sizeof(int)); printf("Incrementor input:\n"); for (i = 0; i < num_elements; i++) { device_mem[i] = rand() % 100; printf("start[%d] = %d\n", i, device_mem[i]); } hipLaunchKernelGGL(( incrementor), dim3(1), dim3(num_elements), 0, 0, device_mem); check_cuda_errors(); // Ensure that we don't proceed till we get the results! hipDeviceSynchronize(); printf("Incrementor results:\n"); for (i = 0; i < num_elements; i++) { printf("result[%d] = %d\n", i, device_mem[i]); } // Free both host and device memory hipFree(device_mem); return 0; }
77a0b123b12a3ba53ebe5e0468be3b4f07e2b0af.cu
/** * Global Memory (Linear Array) using Unified Memory */ #include <stdio.h> #include <stdlib.h> void check_cuda_errors() { cudaError_t rc; rc = cudaGetLastError(); if (rc != cudaSuccess) { printf("Last CUDA error %s\n", cudaGetErrorString(rc)); } } __global__ void incrementor(int* numbers) { numbers[threadIdx.x]++; } int main(int argc, char **argv) { int *device_mem; int i, num_elements; // Ask user for number of elements printf("How many elements to increment? "); scanf("%d", &num_elements); // Seed our RNG srand(0); // "Malloc" device memory cudaMallocManaged((void **)&device_mem, num_elements * sizeof(int)); printf("Incrementor input:\n"); for (i = 0; i < num_elements; i++) { device_mem[i] = rand() % 100; printf("start[%d] = %d\n", i, device_mem[i]); } incrementor<<<1, num_elements>>>(device_mem); check_cuda_errors(); // Ensure that we don't proceed till we get the results! cudaDeviceSynchronize(); printf("Incrementor results:\n"); for (i = 0; i < num_elements; i++) { printf("result[%d] = %d\n", i, device_mem[i]); } // Free both host and device memory cudaFree(device_mem); return 0; }
d860a453020de41e564f512fb95a453d9b06a014.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO 1 // halo width along one direction when advancing to the next iteration #define BENCH_PRINT void run(int argc, char** argv); int rows, cols; int* data; int** wall; int* result; #define M_SEED 9 int pyramid_height; int num_iter_control; //#define BENCH_PRINT void init(int argc, char** argv) { if(argc==6){ cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height=atoi(argv[3]); /////////////////////////////// // Running Control Added By Roy /////////////////////////////// int dev_id = atoi(argv[4]); num_iter_control = atoi(argv[5]); hipDeviceProp_t prop; printf("Device ID is %d, Loop is %d \n",dev_id,num_iter_control); printf("Choosing CUDA Device....\n"); hipError_t set_result = hipSetDevice(dev_id); printf("Set Result is: %s\n",hipGetErrorString(set_result)); hipGetDevice(&dev_id); hipGetDeviceProperties(&prop, dev_id); printf("Name: %s\n", prop.name); /////////////////////////////// // End of Running Control /////////////////////////////// }else{ printf("Usage: dynproc row_len col_len pyramid_height\n"); exit(0); } data = new int[rows*cols]; wall = new int*[rows]; for(int n=0; n<rows; n++) wall[n]=data+cols*n; result = new int[cols]; int seed = M_SEED; srand(seed); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } #ifdef BENCH_PRINT for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { //printf("%d ",wall[i][j]) ; } //printf("\n") ; } #endif } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void dynproc_kernel( int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx=threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE-iteration*HALO*2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols*bx-border; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int xidx = blkX+tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1; int W = tx-1; int E = tx+1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if(IN_RANGE(xidx, 0, cols-1)){ prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ isValid){ computed = true; int left = prev[W]; int up = prev[tx]; int right = prev[E]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols*(startStep+i)+xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range prev[tx]= result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ gpuResults[xidx]=result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \ int pyramid_height, int blockCols, int borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; hipEvent_t start, stop; float elapsedTime; hipEventCreate(&start); hipEventRecord(start,0); for(int ii=0;ii<num_iter_control;ii++){ for (int t = 0; t < rows-1; t+=pyramid_height) { int temp = src; src = dst; dst = temp; hipLaunchKernelGGL(( dynproc_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, MIN(pyramid_height, rows-t-1), gpuWall, gpuResult[src], gpuResult[dst], cols,rows, t, borderCols); } } hipDeviceSynchronize(); hipEventCreate(&stop); hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start,stop); printf("Kernal Elapsed time : %f ms\n" ,elapsedTime); return dst; } int main(int argc, char** argv) { int num_devices; hipGetDeviceCount(&num_devices); if (num_devices > 1) hipSetDevice(DEVICE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2; int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\ pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows*cols; hipMalloc((void**)&gpuResult[0], sizeof(int)*cols); hipMalloc((void**)&gpuResult[1], sizeof(int)*cols); hipMemcpy(gpuResult[0], data, sizeof(int)*cols, hipMemcpyHostToDevice); hipMalloc((void**)&gpuWall, sizeof(int)*(size-cols)); hipMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), hipMemcpyHostToDevice); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \ pyramid_height, blockCols, borderCols); hipMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, hipMemcpyDeviceToHost); hipFree(gpuWall); hipFree(gpuResult[0]); hipFree(gpuResult[1]); delete [] data; delete [] wall; delete [] result; }
d860a453020de41e564f512fb95a453d9b06a014.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <assert.h> #define BLOCK_SIZE 256 #define STR_SIZE 256 #define DEVICE 0 #define HALO 1 // halo width along one direction when advancing to the next iteration #define BENCH_PRINT void run(int argc, char** argv); int rows, cols; int* data; int** wall; int* result; #define M_SEED 9 int pyramid_height; int num_iter_control; //#define BENCH_PRINT void init(int argc, char** argv) { if(argc==6){ cols = atoi(argv[1]); rows = atoi(argv[2]); pyramid_height=atoi(argv[3]); /////////////////////////////// // Running Control Added By Roy /////////////////////////////// int dev_id = atoi(argv[4]); num_iter_control = atoi(argv[5]); cudaDeviceProp prop; printf("Device ID is %d, Loop is %d \n",dev_id,num_iter_control); printf("Choosing CUDA Device....\n"); cudaError_t set_result = cudaSetDevice(dev_id); printf("Set Result is: %s\n",cudaGetErrorString(set_result)); cudaGetDevice(&dev_id); cudaGetDeviceProperties(&prop, dev_id); printf("Name: %s\n", prop.name); /////////////////////////////// // End of Running Control /////////////////////////////// }else{ printf("Usage: dynproc row_len col_len pyramid_height\n"); exit(0); } data = new int[rows*cols]; wall = new int*[rows]; for(int n=0; n<rows; n++) wall[n]=data+cols*n; result = new int[cols]; int seed = M_SEED; srand(seed); for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { wall[i][j] = rand() % 10; } } #ifdef BENCH_PRINT for (int i = 0; i < rows; i++) { for (int j = 0; j < cols; j++) { //printf("%d ",wall[i][j]) ; } //printf("\n") ; } #endif } void fatal(char *s) { fprintf(stderr, "error: %s\n", s); } #define IN_RANGE(x, min, max) ((x)>=(min) && (x)<=(max)) #define CLAMP_RANGE(x, min, max) x = (x<(min)) ? min : ((x>(max)) ? max : x ) #define MIN(a, b) ((a)<=(b) ? (a) : (b)) __global__ void dynproc_kernel( int iteration, int *gpuWall, int *gpuSrc, int *gpuResults, int cols, int rows, int startStep, int border) { __shared__ int prev[BLOCK_SIZE]; __shared__ int result[BLOCK_SIZE]; int bx = blockIdx.x; int tx=threadIdx.x; // each block finally computes result for a small block // after N iterations. // it is the non-overlapping small blocks that cover // all the input data // calculate the small block size int small_block_cols = BLOCK_SIZE-iteration*HALO*2; // calculate the boundary for the block according to // the boundary of its small block int blkX = small_block_cols*bx-border; int blkXmax = blkX+BLOCK_SIZE-1; // calculate the global thread coordination int xidx = blkX+tx; // effective range within this block that falls within // the valid range of the input data // used to rule out computation outside the boundary. int validXmin = (blkX < 0) ? -blkX : 0; int validXmax = (blkXmax > cols-1) ? BLOCK_SIZE-1-(blkXmax-cols+1) : BLOCK_SIZE-1; int W = tx-1; int E = tx+1; W = (W < validXmin) ? validXmin : W; E = (E > validXmax) ? validXmax : E; bool isValid = IN_RANGE(tx, validXmin, validXmax); if(IN_RANGE(xidx, 0, cols-1)){ prev[tx] = gpuSrc[xidx]; } __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 bool computed; for (int i=0; i<iteration ; i++){ computed = false; if( IN_RANGE(tx, i+1, BLOCK_SIZE-i-2) && \ isValid){ computed = true; int left = prev[W]; int up = prev[tx]; int right = prev[E]; int shortest = MIN(left, up); shortest = MIN(shortest, right); int index = cols*(startStep+i)+xidx; result[tx] = shortest + gpuWall[index]; } __syncthreads(); if(i==iteration-1) break; if(computed) //Assign the computation range prev[tx]= result[tx]; __syncthreads(); // [Ronny] Added sync to avoid race on prev Aug. 14 2012 } // update the global memory // after the last iteration, only threads coordinated within the // small block perform the calculation and switch on ``computed'' if (computed){ gpuResults[xidx]=result[tx]; } } /* compute N time steps */ int calc_path(int *gpuWall, int *gpuResult[2], int rows, int cols, \ int pyramid_height, int blockCols, int borderCols) { dim3 dimBlock(BLOCK_SIZE); dim3 dimGrid(blockCols); int src = 1, dst = 0; cudaEvent_t start, stop; float elapsedTime; cudaEventCreate(&start); cudaEventRecord(start,0); for(int ii=0;ii<num_iter_control;ii++){ for (int t = 0; t < rows-1; t+=pyramid_height) { int temp = src; src = dst; dst = temp; dynproc_kernel<<<dimGrid, dimBlock>>>( MIN(pyramid_height, rows-t-1), gpuWall, gpuResult[src], gpuResult[dst], cols,rows, t, borderCols); } } cudaThreadSynchronize(); cudaEventCreate(&stop); cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start,stop); printf("Kernal Elapsed time : %f ms\n" ,elapsedTime); return dst; } int main(int argc, char** argv) { int num_devices; cudaGetDeviceCount(&num_devices); if (num_devices > 1) cudaSetDevice(DEVICE); run(argc,argv); return EXIT_SUCCESS; } void run(int argc, char** argv) { init(argc, argv); /* --------------- pyramid parameters --------------- */ int borderCols = (pyramid_height)*HALO; int smallBlockCol = BLOCK_SIZE-(pyramid_height)*HALO*2; int blockCols = cols/smallBlockCol+((cols%smallBlockCol==0)?0:1); printf("pyramidHeight: %d\ngridSize: [%d]\nborder:[%d]\nblockSize: %d\nblockGrid:[%d]\ntargetBlock:[%d]\n",\ pyramid_height, cols, borderCols, BLOCK_SIZE, blockCols, smallBlockCol); int *gpuWall, *gpuResult[2]; int size = rows*cols; cudaMalloc((void**)&gpuResult[0], sizeof(int)*cols); cudaMalloc((void**)&gpuResult[1], sizeof(int)*cols); cudaMemcpy(gpuResult[0], data, sizeof(int)*cols, cudaMemcpyHostToDevice); cudaMalloc((void**)&gpuWall, sizeof(int)*(size-cols)); cudaMemcpy(gpuWall, data+cols, sizeof(int)*(size-cols), cudaMemcpyHostToDevice); int final_ret = calc_path(gpuWall, gpuResult, rows, cols, \ pyramid_height, blockCols, borderCols); cudaMemcpy(result, gpuResult[final_ret], sizeof(int)*cols, cudaMemcpyDeviceToHost); cudaFree(gpuWall); cudaFree(gpuResult[0]); cudaFree(gpuResult[1]); delete [] data; delete [] wall; delete [] result; }
6b5230f5048fb7154a354bbe030a23f10c896037.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * assign1_1.c * * Contains code for setting up and finishing the simulation. * NOTE: YOU SHOULD IMPLEMENT NOT HAVE TO LOOK HERE, IMPLEMENT YOUR CODE IN * simulate.c. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "file.h" #include "timer.h" #include "simulate.h" #include <iostream> double c = 0.15; using namespace std; static void checkCudaCall(hipError_t result) { if (result != hipSuccess) { cerr << "cuda error: " << hipGetErrorString(result) << endl; exit(1); } } __global__ void calc(int step, int max, double *old, double *cur, double *next){ unsigned index = blockIdx.x * blockDim.x + threadIdx.x; int i_min = step * index; int i_max = ((index + 1) * step) - 1; // TODO: laatste moet I_MAX == i_max hebben for(int i = i_min; i < i_max; i++){ next[i] = 2.0 * current[i] - old[i] + c * (cur[i - 1] - (2.0 * cur[i] - cur[i + 1])); } } double *simulate(const int i_max, const int t_max, const int num_threads, double *old, double *cur, double *next){ double *old_c, *cur_c, *next_c; // allocate checkCudaCall(hipMalloc(&old_c, i_max * sizeof(double))); checkCudaCall(hipMalloc(&cur_c, i_max * sizeof(double))); checkCudaCall(hipMalloc(&new_c, i_max * sizeof(double))); // fill // copy data to the vectors checkCudaCall(hipMemcpy(old_c, old, i_max * sizeof(double), hipMemcpyHostToDevice)); checkCudaCall(hipMemcpy(cur_c, cur, i_max * sizeof(double), hipMemcpyHostToDevice)); checkCudaCall(hipMemcpy(next, next, i_max * sizeof(double), hipMemcpyHostToDevice)); int step = i_max / num_threads; for(int t = 0; t < t_max; t++){ hipLaunchKernelGGL(( calc), dim3(step), dim3(num_threads), 0, 0, step, i_max - 1, old, cur, next); double *tmp = old; old = cur; cur = next; next = tmp; } // copy cuda array to "normal" array checkCudaCall(hipMemcpy(cur, cur_c, i_max * sizeof(double), hipMemcpyDeviceToHost)); // free cuda mallocs checkCudaCall(hipFree(old)); checkCudaCall(hipFree(cur)); checkCudaCall(hipFree(next)); return current_array; } // ASSIGN COPY: (nothing changed yet) typedef double (*func_t)(double x); double gauss(double x){ return exp((-1 * x * x) / 2); } void fill(double *array, int offset, int range, double sample_start, double sample_end, func_t f) { int i; float dx; dx = (sample_end - sample_start) / range; for (i = 0; i < range; i++) { array[i + offset] = f(sample_start + i * dx); } } int main(int argc, char *argv[]) { double *old, *current, *next; int t_max, i_max, num_threads; double time; /* Parse commandline args: i_max t_max num_threads */ if (argc < 4) { printf("Usage: %s i_max t_max num_threads [initial_data]\n", argv[0]); printf(" - i_max: number of discrete amplitude points, should be >2\n"); printf(" - t_max: number of discrete timesteps, should be >=1\n"); printf(" - num_threads: number of threads to use for simulation, " "should be >=1\n"); printf(" - initial_data: select what data should be used for the first " "two generation.\n"); printf(" Available options are:\n"); printf(" * sin: one period of the sinus function at the start.\n"); printf(" * sinfull: entire data is filled with the sinus.\n"); printf(" * gauss: a single gauss-function at the start.\n"); printf(" * file <2 filenames>: allows you to specify a file with on " "each line a float for both generations.\n"); return EXIT_FAILURE; } i_max = atoi(argv[1]); t_max = atoi(argv[2]); num_threads = atoi(argv[3]); if (i_max < 3) { printf("argument error: i_max should be >2.\n"); return EXIT_FAILURE; } if (t_max < 1) { printf("argument error: t_max should be >=1.\n"); return EXIT_FAILURE; } if (num_threads < 1) { printf("argument error: num_threads should be >=1.\n"); return EXIT_FAILURE; } /* Allocate and initialize buffers. */ old = malloc(i_max * sizeof(double)); current = malloc(i_max * sizeof(double)); next = malloc(i_max * sizeof(double)); if (old == NULL || current == NULL || next == NULL) { fprintf(stderr, "Could not allocate enough memory, aborting.\n"); return EXIT_FAILURE; } memset(old, 0, i_max * sizeof(double)); memset(current, 0, i_max * sizeof(double)); memset(next, 0, i_max * sizeof(double)); /* How should we will our first two generations? */ if (argc > 4) { if (strcmp(argv[4], "sin") == 0) { fill(old, 1, i_max/4, 0, 2*3.14, sin); fill(current, 2, i_max/4, 0, 2*3.14, sin); } else if (strcmp(argv[4], "sinfull") == 0) { fill(old, 1, i_max-2, 0, 10*3.14, sin); fill(current, 2, i_max-3, 0, 10*3.14, sin); } else if (strcmp(argv[4], "gauss") == 0) { fill(old, 1, i_max/4, -3, 3, gauss); fill(current, 2, i_max/4, -3, 3, gauss); } else if (strcmp(argv[4], "file") == 0) { if (argc < 7) { printf("No files specified!\n"); return EXIT_FAILURE; } file_read_double_array(argv[5], old, i_max); file_read_double_array(argv[6], current, i_max); } else { printf("Unknown initial mode: %s.\n", argv[4]); return EXIT_FAILURE; } } else { /* Default to sinus. */ fill(old, 1, i_max/4, 0, 2*3.14, sin); fill(current, 2, i_max/4, 0, 2*3.14, sin); } timer_start(); /* Call the actual simulation that should be implemented in simulate.c. */ simulate(i_max, t_max, num_threads, old, current, next); time = timer_end(); printf("Took %g seconds\n", time); printf("Normalized: %g seconds\n", time / (i_max * t_max)); file_write_double_array("result.txt", current, i_max); free(old); free(current); free(next); return EXIT_SUCCESS; }
6b5230f5048fb7154a354bbe030a23f10c896037.cu
/* * assign1_1.c * * Contains code for setting up and finishing the simulation. * NOTE: YOU SHOULD IMPLEMENT NOT HAVE TO LOOK HERE, IMPLEMENT YOUR CODE IN * simulate.c. */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <math.h> #include "file.h" #include "timer.h" #include "simulate.h" #include <iostream> double c = 0.15; using namespace std; static void checkCudaCall(cudaError_t result) { if (result != cudaSuccess) { cerr << "cuda error: " << cudaGetErrorString(result) << endl; exit(1); } } __global__ void calc(int step, int max, double *old, double *cur, double *next){ unsigned index = blockIdx.x * blockDim.x + threadIdx.x; int i_min = step * index; int i_max = ((index + 1) * step) - 1; // TODO: laatste moet I_MAX == i_max hebben for(int i = i_min; i < i_max; i++){ next[i] = 2.0 * current[i] - old[i] + c * (cur[i - 1] - (2.0 * cur[i] - cur[i + 1])); } } double *simulate(const int i_max, const int t_max, const int num_threads, double *old, double *cur, double *next){ double *old_c, *cur_c, *next_c; // allocate checkCudaCall(cudaMalloc(&old_c, i_max * sizeof(double))); checkCudaCall(cudaMalloc(&cur_c, i_max * sizeof(double))); checkCudaCall(cudaMalloc(&new_c, i_max * sizeof(double))); // fill // copy data to the vectors checkCudaCall(cudaMemcpy(old_c, old, i_max * sizeof(double), cudaMemcpyHostToDevice)); checkCudaCall(cudaMemcpy(cur_c, cur, i_max * sizeof(double), cudaMemcpyHostToDevice)); checkCudaCall(cudaMemcpy(next, next, i_max * sizeof(double), cudaMemcpyHostToDevice)); int step = i_max / num_threads; for(int t = 0; t < t_max; t++){ calc<<<step, num_threads>>>(step, i_max - 1, old, cur, next); double *tmp = old; old = cur; cur = next; next = tmp; } // copy cuda array to "normal" array checkCudaCall(cudaMemcpy(cur, cur_c, i_max * sizeof(double), cudaMemcpyDeviceToHost)); // free cuda mallocs checkCudaCall(cudaFree(old)); checkCudaCall(cudaFree(cur)); checkCudaCall(cudaFree(next)); return current_array; } // ASSIGN COPY: (nothing changed yet) typedef double (*func_t)(double x); double gauss(double x){ return exp((-1 * x * x) / 2); } void fill(double *array, int offset, int range, double sample_start, double sample_end, func_t f) { int i; float dx; dx = (sample_end - sample_start) / range; for (i = 0; i < range; i++) { array[i + offset] = f(sample_start + i * dx); } } int main(int argc, char *argv[]) { double *old, *current, *next; int t_max, i_max, num_threads; double time; /* Parse commandline args: i_max t_max num_threads */ if (argc < 4) { printf("Usage: %s i_max t_max num_threads [initial_data]\n", argv[0]); printf(" - i_max: number of discrete amplitude points, should be >2\n"); printf(" - t_max: number of discrete timesteps, should be >=1\n"); printf(" - num_threads: number of threads to use for simulation, " "should be >=1\n"); printf(" - initial_data: select what data should be used for the first " "two generation.\n"); printf(" Available options are:\n"); printf(" * sin: one period of the sinus function at the start.\n"); printf(" * sinfull: entire data is filled with the sinus.\n"); printf(" * gauss: a single gauss-function at the start.\n"); printf(" * file <2 filenames>: allows you to specify a file with on " "each line a float for both generations.\n"); return EXIT_FAILURE; } i_max = atoi(argv[1]); t_max = atoi(argv[2]); num_threads = atoi(argv[3]); if (i_max < 3) { printf("argument error: i_max should be >2.\n"); return EXIT_FAILURE; } if (t_max < 1) { printf("argument error: t_max should be >=1.\n"); return EXIT_FAILURE; } if (num_threads < 1) { printf("argument error: num_threads should be >=1.\n"); return EXIT_FAILURE; } /* Allocate and initialize buffers. */ old = malloc(i_max * sizeof(double)); current = malloc(i_max * sizeof(double)); next = malloc(i_max * sizeof(double)); if (old == NULL || current == NULL || next == NULL) { fprintf(stderr, "Could not allocate enough memory, aborting.\n"); return EXIT_FAILURE; } memset(old, 0, i_max * sizeof(double)); memset(current, 0, i_max * sizeof(double)); memset(next, 0, i_max * sizeof(double)); /* How should we will our first two generations? */ if (argc > 4) { if (strcmp(argv[4], "sin") == 0) { fill(old, 1, i_max/4, 0, 2*3.14, sin); fill(current, 2, i_max/4, 0, 2*3.14, sin); } else if (strcmp(argv[4], "sinfull") == 0) { fill(old, 1, i_max-2, 0, 10*3.14, sin); fill(current, 2, i_max-3, 0, 10*3.14, sin); } else if (strcmp(argv[4], "gauss") == 0) { fill(old, 1, i_max/4, -3, 3, gauss); fill(current, 2, i_max/4, -3, 3, gauss); } else if (strcmp(argv[4], "file") == 0) { if (argc < 7) { printf("No files specified!\n"); return EXIT_FAILURE; } file_read_double_array(argv[5], old, i_max); file_read_double_array(argv[6], current, i_max); } else { printf("Unknown initial mode: %s.\n", argv[4]); return EXIT_FAILURE; } } else { /* Default to sinus. */ fill(old, 1, i_max/4, 0, 2*3.14, sin); fill(current, 2, i_max/4, 0, 2*3.14, sin); } timer_start(); /* Call the actual simulation that should be implemented in simulate.c. */ simulate(i_max, t_max, num_threads, old, current, next); time = timer_end(); printf("Took %g seconds\n", time); printf("Normalized: %g seconds\n", time / (i_max * t_max)); file_write_double_array("result.txt", current, i_max); free(old); free(current); free(next); return EXIT_SUCCESS; }
cf97c2fc01e5f988070f4d8f0563d99a2ee31bcd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "utility/src/utils.cuh" DREAMPLACE_BEGIN_NAMESPACE template <typename T> __global__ void updatePinOffset( const T* node_size_x, const T* node_size_y, const int *flat_node2pin_start_map, const int *flat_node2pin_map, const T *node_ratios, const int num_nodes, T *pin_offset_x, T *pin_offset_y ) { const int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < num_nodes) { T ratio = (node_ratios[i] - 1) / 2; T sx = node_size_x[i]; T sy = node_size_y[i]; int start = flat_node2pin_start_map[i]; int end = flat_node2pin_start_map[i + 1]; for (int j = start; j < end; ++j) { int pin_id = flat_node2pin_map[j]; pin_offset_x[pin_id] += ratio * sx; pin_offset_y[pin_id] += ratio * sy; } } } template <typename T> void updatePinOffsetCudaLauncher( const T* node_size_x, const T* node_size_y, const int *flat_node2pin_start_map, const int *flat_node2pin_map, const T *node_ratios, const int num_nodes, T *pin_offset_x, T *pin_offset_y ) { int thread_count = 512; int block_count = ceilDiv(num_nodes, thread_count); hipLaunchKernelGGL(( updatePinOffset), dim3(block_count), dim3(thread_count), 0, 0, node_size_x, node_size_y, flat_node2pin_start_map, flat_node2pin_map, node_ratios, num_nodes, pin_offset_x, pin_offset_y ); } #define REGISTER_KERNEL_LAUNCHER(T) \ template void updatePinOffsetCudaLauncher<T>( \ const T* node_size_x, const T* node_size_y, \ const int *flat_node2pin_start_map, \ const int *flat_node2pin_map, \ const T *node_ratios, \ const int num_nodes, \ T *pin_offset_x, T *pin_offset_y) REGISTER_KERNEL_LAUNCHER(float); REGISTER_KERNEL_LAUNCHER(double); DREAMPLACE_END_NAMESPACE
cf97c2fc01e5f988070f4d8f0563d99a2ee31bcd.cu
#include "utility/src/utils.cuh" DREAMPLACE_BEGIN_NAMESPACE template <typename T> __global__ void updatePinOffset( const T* node_size_x, const T* node_size_y, const int *flat_node2pin_start_map, const int *flat_node2pin_map, const T *node_ratios, const int num_nodes, T *pin_offset_x, T *pin_offset_y ) { const int i = threadIdx.x + blockDim.x * blockIdx.x; if (i < num_nodes) { T ratio = (node_ratios[i] - 1) / 2; T sx = node_size_x[i]; T sy = node_size_y[i]; int start = flat_node2pin_start_map[i]; int end = flat_node2pin_start_map[i + 1]; for (int j = start; j < end; ++j) { int pin_id = flat_node2pin_map[j]; pin_offset_x[pin_id] += ratio * sx; pin_offset_y[pin_id] += ratio * sy; } } } template <typename T> void updatePinOffsetCudaLauncher( const T* node_size_x, const T* node_size_y, const int *flat_node2pin_start_map, const int *flat_node2pin_map, const T *node_ratios, const int num_nodes, T *pin_offset_x, T *pin_offset_y ) { int thread_count = 512; int block_count = ceilDiv(num_nodes, thread_count); updatePinOffset<<<block_count, thread_count>>>( node_size_x, node_size_y, flat_node2pin_start_map, flat_node2pin_map, node_ratios, num_nodes, pin_offset_x, pin_offset_y ); } #define REGISTER_KERNEL_LAUNCHER(T) \ template void updatePinOffsetCudaLauncher<T>( \ const T* node_size_x, const T* node_size_y, \ const int *flat_node2pin_start_map, \ const int *flat_node2pin_map, \ const T *node_ratios, \ const int num_nodes, \ T *pin_offset_x, T *pin_offset_y) REGISTER_KERNEL_LAUNCHER(float); REGISTER_KERNEL_LAUNCHER(double); DREAMPLACE_END_NAMESPACE
5dd8ef93f4a675395a7758cef456670dc0e95054.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_flux_calc_kernelz; int xdim0_flux_calc_kernelz_h = -1; __constant__ int ydim0_flux_calc_kernelz; int ydim0_flux_calc_kernelz_h = -1; __constant__ int xdim1_flux_calc_kernelz; int xdim1_flux_calc_kernelz_h = -1; __constant__ int ydim1_flux_calc_kernelz; int ydim1_flux_calc_kernelz_h = -1; __constant__ int xdim2_flux_calc_kernelz; int xdim2_flux_calc_kernelz_h = -1; __constant__ int ydim2_flux_calc_kernelz; int ydim2_flux_calc_kernelz_h = -1; __constant__ int xdim3_flux_calc_kernelz; int xdim3_flux_calc_kernelz_h = -1; __constant__ int ydim3_flux_calc_kernelz; int ydim3_flux_calc_kernelz_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_flux_calc_kernelz*(y)+xdim0_flux_calc_kernelz*ydim0_flux_calc_kernelz*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_flux_calc_kernelz*(y)+xdim1_flux_calc_kernelz*ydim1_flux_calc_kernelz*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_flux_calc_kernelz*(y)+xdim2_flux_calc_kernelz*ydim2_flux_calc_kernelz*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_flux_calc_kernelz*(y)+xdim3_flux_calc_kernelz*ydim3_flux_calc_kernelz*(z)) //user function __device__ void flux_calc_kernelz_gpu( double *vol_flux_z, const double *zarea, const double *zvel0, const double *zvel1) { vol_flux_z[OPS_ACC0(0,0,0)] = 0.125 * dt * (zarea[OPS_ACC1(0,0,0)]) * ( zvel0[OPS_ACC2(0,0,0)] + zvel0[OPS_ACC2(1,0,0)] + zvel0[OPS_ACC2(1,0,0)] + zvel0[OPS_ACC2(1,1,0)] + zvel1[OPS_ACC3(0,0,0)] + zvel1[OPS_ACC3(1,0,0)] + zvel1[OPS_ACC3(0,1,0)] + zvel1[OPS_ACC3(1,1,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_flux_calc_kernelz( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_flux_calc_kernelz + idx_z * 1*1 * xdim0_flux_calc_kernelz * ydim0_flux_calc_kernelz; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_flux_calc_kernelz + idx_z * 1*1 * xdim1_flux_calc_kernelz * ydim1_flux_calc_kernelz; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_flux_calc_kernelz + idx_z * 1*1 * xdim2_flux_calc_kernelz * ydim2_flux_calc_kernelz; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_flux_calc_kernelz + idx_z * 1*1 * xdim3_flux_calc_kernelz * ydim3_flux_calc_kernelz; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { flux_calc_kernelz_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_flux_calc_kernelz(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_flux_calc_kernelz_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,107)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(107,"flux_calc_kernelz"); OPS_kernels[107].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_flux_calc_kernelz_h || ydim0 != ydim0_flux_calc_kernelz_h || xdim1 != xdim1_flux_calc_kernelz_h || ydim1 != ydim1_flux_calc_kernelz_h || xdim2 != xdim2_flux_calc_kernelz_h || ydim2 != ydim2_flux_calc_kernelz_h || xdim3 != xdim3_flux_calc_kernelz_h || ydim3 != ydim3_flux_calc_kernelz_h) { hipMemcpyToSymbol( xdim0_flux_calc_kernelz, &xdim0, sizeof(int) ); xdim0_flux_calc_kernelz_h = xdim0; hipMemcpyToSymbol( ydim0_flux_calc_kernelz, &ydim0, sizeof(int) ); ydim0_flux_calc_kernelz_h = ydim0; hipMemcpyToSymbol( xdim1_flux_calc_kernelz, &xdim1, sizeof(int) ); xdim1_flux_calc_kernelz_h = xdim1; hipMemcpyToSymbol( ydim1_flux_calc_kernelz, &ydim1, sizeof(int) ); ydim1_flux_calc_kernelz_h = ydim1; hipMemcpyToSymbol( xdim2_flux_calc_kernelz, &xdim2, sizeof(int) ); xdim2_flux_calc_kernelz_h = xdim2; hipMemcpyToSymbol( ydim2_flux_calc_kernelz, &ydim2, sizeof(int) ); ydim2_flux_calc_kernelz_h = ydim2; hipMemcpyToSymbol( xdim3_flux_calc_kernelz, &xdim3, sizeof(int) ); xdim3_flux_calc_kernelz_h = xdim3; hipMemcpyToSymbol( ydim3_flux_calc_kernelz, &ydim3, sizeof(int) ); ydim3_flux_calc_kernelz_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[107].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_flux_calc_kernelz), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[107].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[107].mpi_time += t2-t1; OPS_kernels[107].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[107].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[107].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[107].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_flux_calc_kernelz(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 107; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 107; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_flux_calc_kernelz_execute; if (OPS_diags > 1) { ops_timing_realloc(107,"flux_calc_kernelz"); } ops_enqueue_kernel(desc); } #endif
5dd8ef93f4a675395a7758cef456670dc0e95054.cu
// // auto-generated by ops.py // __constant__ int xdim0_flux_calc_kernelz; int xdim0_flux_calc_kernelz_h = -1; __constant__ int ydim0_flux_calc_kernelz; int ydim0_flux_calc_kernelz_h = -1; __constant__ int xdim1_flux_calc_kernelz; int xdim1_flux_calc_kernelz_h = -1; __constant__ int ydim1_flux_calc_kernelz; int ydim1_flux_calc_kernelz_h = -1; __constant__ int xdim2_flux_calc_kernelz; int xdim2_flux_calc_kernelz_h = -1; __constant__ int ydim2_flux_calc_kernelz; int ydim2_flux_calc_kernelz_h = -1; __constant__ int xdim3_flux_calc_kernelz; int xdim3_flux_calc_kernelz_h = -1; __constant__ int ydim3_flux_calc_kernelz; int ydim3_flux_calc_kernelz_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #define OPS_ACC0(x,y,z) (x+xdim0_flux_calc_kernelz*(y)+xdim0_flux_calc_kernelz*ydim0_flux_calc_kernelz*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_flux_calc_kernelz*(y)+xdim1_flux_calc_kernelz*ydim1_flux_calc_kernelz*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_flux_calc_kernelz*(y)+xdim2_flux_calc_kernelz*ydim2_flux_calc_kernelz*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_flux_calc_kernelz*(y)+xdim3_flux_calc_kernelz*ydim3_flux_calc_kernelz*(z)) //user function __device__ void flux_calc_kernelz_gpu( double *vol_flux_z, const double *zarea, const double *zvel0, const double *zvel1) { vol_flux_z[OPS_ACC0(0,0,0)] = 0.125 * dt * (zarea[OPS_ACC1(0,0,0)]) * ( zvel0[OPS_ACC2(0,0,0)] + zvel0[OPS_ACC2(1,0,0)] + zvel0[OPS_ACC2(1,0,0)] + zvel0[OPS_ACC2(1,1,0)] + zvel1[OPS_ACC3(0,0,0)] + zvel1[OPS_ACC3(1,0,0)] + zvel1[OPS_ACC3(0,1,0)] + zvel1[OPS_ACC3(1,1,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 __global__ void ops_flux_calc_kernelz( double* __restrict arg0, const double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_flux_calc_kernelz + idx_z * 1*1 * xdim0_flux_calc_kernelz * ydim0_flux_calc_kernelz; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_flux_calc_kernelz + idx_z * 1*1 * xdim1_flux_calc_kernelz * ydim1_flux_calc_kernelz; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_flux_calc_kernelz + idx_z * 1*1 * xdim2_flux_calc_kernelz * ydim2_flux_calc_kernelz; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_flux_calc_kernelz + idx_z * 1*1 * xdim3_flux_calc_kernelz * ydim3_flux_calc_kernelz; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { flux_calc_kernelz_gpu(arg0, arg1, arg2, arg3); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_flux_calc_kernelz(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { #else void ops_par_loop_flux_calc_kernelz_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; #endif //Timing double t1,t2,c1,c2; ops_arg args[4] = { arg0, arg1, arg2, arg3}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,4,range,107)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(107,"flux_calc_kernelz"); OPS_kernels[107].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; if (xdim0 != xdim0_flux_calc_kernelz_h || ydim0 != ydim0_flux_calc_kernelz_h || xdim1 != xdim1_flux_calc_kernelz_h || ydim1 != ydim1_flux_calc_kernelz_h || xdim2 != xdim2_flux_calc_kernelz_h || ydim2 != ydim2_flux_calc_kernelz_h || xdim3 != xdim3_flux_calc_kernelz_h || ydim3 != ydim3_flux_calc_kernelz_h) { cudaMemcpyToSymbol( xdim0_flux_calc_kernelz, &xdim0, sizeof(int) ); xdim0_flux_calc_kernelz_h = xdim0; cudaMemcpyToSymbol( ydim0_flux_calc_kernelz, &ydim0, sizeof(int) ); ydim0_flux_calc_kernelz_h = ydim0; cudaMemcpyToSymbol( xdim1_flux_calc_kernelz, &xdim1, sizeof(int) ); xdim1_flux_calc_kernelz_h = xdim1; cudaMemcpyToSymbol( ydim1_flux_calc_kernelz, &ydim1, sizeof(int) ); ydim1_flux_calc_kernelz_h = ydim1; cudaMemcpyToSymbol( xdim2_flux_calc_kernelz, &xdim2, sizeof(int) ); xdim2_flux_calc_kernelz_h = xdim2; cudaMemcpyToSymbol( ydim2_flux_calc_kernelz, &ydim2, sizeof(int) ); ydim2_flux_calc_kernelz_h = ydim2; cudaMemcpyToSymbol( xdim3_flux_calc_kernelz, &xdim3, sizeof(int) ); xdim3_flux_calc_kernelz_h = xdim3; cudaMemcpyToSymbol( ydim3_flux_calc_kernelz, &ydim3, sizeof(int) ); ydim3_flux_calc_kernelz_h = ydim3; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); char *p_a[4]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 4); ops_halo_exchanges(args,4,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[107].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_flux_calc_kernelz<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[107].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 4); ops_set_halo_dirtybit3(&args[0],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[107].mpi_time += t2-t1; OPS_kernels[107].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[107].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[107].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[107].transfer += ops_compute_transfer(dim, start, end, &arg3); } } #ifdef OPS_LAZY void ops_par_loop_flux_calc_kernelz(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 107; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 107; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 4; desc->args = (ops_arg*)malloc(4*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->function = ops_par_loop_flux_calc_kernelz_execute; if (OPS_diags > 1) { ops_timing_realloc(107,"flux_calc_kernelz"); } ops_enqueue_kernel(desc); } #endif
1e226a916c06226e86fd9f0a28a8d99377dc2e0c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel2_ydir; int xdim0_advec_cell_kernel2_ydir_h = -1; __constant__ int ydim0_advec_cell_kernel2_ydir; int ydim0_advec_cell_kernel2_ydir_h = -1; __constant__ int xdim1_advec_cell_kernel2_ydir; int xdim1_advec_cell_kernel2_ydir_h = -1; __constant__ int ydim1_advec_cell_kernel2_ydir; int ydim1_advec_cell_kernel2_ydir_h = -1; __constant__ int xdim2_advec_cell_kernel2_ydir; int xdim2_advec_cell_kernel2_ydir_h = -1; __constant__ int ydim2_advec_cell_kernel2_ydir; int ydim2_advec_cell_kernel2_ydir_h = -1; __constant__ int xdim3_advec_cell_kernel2_ydir; int xdim3_advec_cell_kernel2_ydir_h = -1; __constant__ int ydim3_advec_cell_kernel2_ydir; int ydim3_advec_cell_kernel2_ydir_h = -1; __constant__ int xdim4_advec_cell_kernel2_ydir; int xdim4_advec_cell_kernel2_ydir_h = -1; __constant__ int ydim4_advec_cell_kernel2_ydir; int ydim4_advec_cell_kernel2_ydir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel2_ydir*(y)+xdim0_advec_cell_kernel2_ydir*ydim0_advec_cell_kernel2_ydir*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel2_ydir*(y)+xdim1_advec_cell_kernel2_ydir*ydim1_advec_cell_kernel2_ydir*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel2_ydir*(y)+xdim2_advec_cell_kernel2_ydir*ydim2_advec_cell_kernel2_ydir*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel2_ydir*(y)+xdim3_advec_cell_kernel2_ydir*ydim3_advec_cell_kernel2_ydir*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel2_ydir*(y)+xdim4_advec_cell_kernel2_ydir*ydim4_advec_cell_kernel2_ydir*(z)) //user function __device__ inline void advec_cell_kernel2_ydir_gpu( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_y, const double *vol_flux_x) { pre_vol[OPS_ACC0(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_y[OPS_ACC3(0,1,0)] - vol_flux_y[OPS_ACC3(0,0,0)] + vol_flux_x[OPS_ACC4(1,0,0)] - vol_flux_x[OPS_ACC4(0,0,0)]; post_vol[OPS_ACC1(0,0,0)]= pre_vol[OPS_ACC0(0,0,0)]-(vol_flux_y[OPS_ACC3(0,1,0)] - vol_flux_y[OPS_ACC3(0,0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_advec_cell_kernel2_ydir( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim0_advec_cell_kernel2_ydir * ydim0_advec_cell_kernel2_ydir; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim1_advec_cell_kernel2_ydir * ydim1_advec_cell_kernel2_ydir; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim2_advec_cell_kernel2_ydir * ydim2_advec_cell_kernel2_ydir; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim3_advec_cell_kernel2_ydir * ydim3_advec_cell_kernel2_ydir; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim4_advec_cell_kernel2_ydir * ydim4_advec_cell_kernel2_ydir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel2_ydir_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_cell_kernel2_ydir(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_cell_kernel2_ydir_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,114)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(114,"advec_cell_kernel2_ydir"); OPS_kernels[114].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel2_ydir_h || ydim0 != ydim0_advec_cell_kernel2_ydir_h || xdim1 != xdim1_advec_cell_kernel2_ydir_h || ydim1 != ydim1_advec_cell_kernel2_ydir_h || xdim2 != xdim2_advec_cell_kernel2_ydir_h || ydim2 != ydim2_advec_cell_kernel2_ydir_h || xdim3 != xdim3_advec_cell_kernel2_ydir_h || ydim3 != ydim3_advec_cell_kernel2_ydir_h || xdim4 != xdim4_advec_cell_kernel2_ydir_h || ydim4 != ydim4_advec_cell_kernel2_ydir_h) { hipMemcpyToSymbol( xdim0_advec_cell_kernel2_ydir, &xdim0, sizeof(int) ); xdim0_advec_cell_kernel2_ydir_h = xdim0; hipMemcpyToSymbol( ydim0_advec_cell_kernel2_ydir, &ydim0, sizeof(int) ); ydim0_advec_cell_kernel2_ydir_h = ydim0; hipMemcpyToSymbol( xdim1_advec_cell_kernel2_ydir, &xdim1, sizeof(int) ); xdim1_advec_cell_kernel2_ydir_h = xdim1; hipMemcpyToSymbol( ydim1_advec_cell_kernel2_ydir, &ydim1, sizeof(int) ); ydim1_advec_cell_kernel2_ydir_h = ydim1; hipMemcpyToSymbol( xdim2_advec_cell_kernel2_ydir, &xdim2, sizeof(int) ); xdim2_advec_cell_kernel2_ydir_h = xdim2; hipMemcpyToSymbol( ydim2_advec_cell_kernel2_ydir, &ydim2, sizeof(int) ); ydim2_advec_cell_kernel2_ydir_h = ydim2; hipMemcpyToSymbol( xdim3_advec_cell_kernel2_ydir, &xdim3, sizeof(int) ); xdim3_advec_cell_kernel2_ydir_h = xdim3; hipMemcpyToSymbol( ydim3_advec_cell_kernel2_ydir, &ydim3, sizeof(int) ); ydim3_advec_cell_kernel2_ydir_h = ydim3; hipMemcpyToSymbol( xdim4_advec_cell_kernel2_ydir, &xdim4, sizeof(int) ); xdim4_advec_cell_kernel2_ydir_h = xdim4; hipMemcpyToSymbol( ydim4_advec_cell_kernel2_ydir, &ydim4, sizeof(int) ); ydim4_advec_cell_kernel2_ydir_h = ydim4; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[114].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) hipLaunchKernelGGL(( ops_advec_cell_kernel2_ydir), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[114].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[114].mpi_time += t2-t1; OPS_kernels[114].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[114].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[114].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[114].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[114].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_cell_kernel2_ydir(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 114; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 114; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_cell_kernel2_ydir_execute; if (OPS_diags > 1) { ops_timing_realloc(114,"advec_cell_kernel2_ydir"); } ops_enqueue_kernel(desc); } #endif
1e226a916c06226e86fd9f0a28a8d99377dc2e0c.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_cell_kernel2_ydir; int xdim0_advec_cell_kernel2_ydir_h = -1; __constant__ int ydim0_advec_cell_kernel2_ydir; int ydim0_advec_cell_kernel2_ydir_h = -1; __constant__ int xdim1_advec_cell_kernel2_ydir; int xdim1_advec_cell_kernel2_ydir_h = -1; __constant__ int ydim1_advec_cell_kernel2_ydir; int ydim1_advec_cell_kernel2_ydir_h = -1; __constant__ int xdim2_advec_cell_kernel2_ydir; int xdim2_advec_cell_kernel2_ydir_h = -1; __constant__ int ydim2_advec_cell_kernel2_ydir; int ydim2_advec_cell_kernel2_ydir_h = -1; __constant__ int xdim3_advec_cell_kernel2_ydir; int xdim3_advec_cell_kernel2_ydir_h = -1; __constant__ int ydim3_advec_cell_kernel2_ydir; int ydim3_advec_cell_kernel2_ydir_h = -1; __constant__ int xdim4_advec_cell_kernel2_ydir; int xdim4_advec_cell_kernel2_ydir_h = -1; __constant__ int ydim4_advec_cell_kernel2_ydir; int ydim4_advec_cell_kernel2_ydir_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x,y,z) (x+xdim0_advec_cell_kernel2_ydir*(y)+xdim0_advec_cell_kernel2_ydir*ydim0_advec_cell_kernel2_ydir*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_advec_cell_kernel2_ydir*(y)+xdim1_advec_cell_kernel2_ydir*ydim1_advec_cell_kernel2_ydir*(z)) #define OPS_ACC2(x,y,z) (x+xdim2_advec_cell_kernel2_ydir*(y)+xdim2_advec_cell_kernel2_ydir*ydim2_advec_cell_kernel2_ydir*(z)) #define OPS_ACC3(x,y,z) (x+xdim3_advec_cell_kernel2_ydir*(y)+xdim3_advec_cell_kernel2_ydir*ydim3_advec_cell_kernel2_ydir*(z)) #define OPS_ACC4(x,y,z) (x+xdim4_advec_cell_kernel2_ydir*(y)+xdim4_advec_cell_kernel2_ydir*ydim4_advec_cell_kernel2_ydir*(z)) //user function __device__ inline void advec_cell_kernel2_ydir_gpu( double *pre_vol, double *post_vol, const double *volume, const double *vol_flux_y, const double *vol_flux_x) { pre_vol[OPS_ACC0(0,0,0)] = volume[OPS_ACC2(0,0,0)] + vol_flux_y[OPS_ACC3(0,1,0)] - vol_flux_y[OPS_ACC3(0,0,0)] + vol_flux_x[OPS_ACC4(1,0,0)] - vol_flux_x[OPS_ACC4(0,0,0)]; post_vol[OPS_ACC1(0,0,0)]= pre_vol[OPS_ACC0(0,0,0)]-(vol_flux_y[OPS_ACC3(0,1,0)] - vol_flux_y[OPS_ACC3(0,0,0)]); } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_advec_cell_kernel2_ydir( double* __restrict arg0, double* __restrict arg1, const double* __restrict arg2, const double* __restrict arg3, const double* __restrict arg4, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1*1 + idx_y * 1*1 * xdim0_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim0_advec_cell_kernel2_ydir * ydim0_advec_cell_kernel2_ydir; arg1 += idx_x * 1*1 + idx_y * 1*1 * xdim1_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim1_advec_cell_kernel2_ydir * ydim1_advec_cell_kernel2_ydir; arg2 += idx_x * 1*1 + idx_y * 1*1 * xdim2_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim2_advec_cell_kernel2_ydir * ydim2_advec_cell_kernel2_ydir; arg3 += idx_x * 1*1 + idx_y * 1*1 * xdim3_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim3_advec_cell_kernel2_ydir * ydim3_advec_cell_kernel2_ydir; arg4 += idx_x * 1*1 + idx_y * 1*1 * xdim4_advec_cell_kernel2_ydir + idx_z * 1*1 * xdim4_advec_cell_kernel2_ydir * ydim4_advec_cell_kernel2_ydir; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_cell_kernel2_ydir_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_cell_kernel2_ydir(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_cell_kernel2_ydir_execute(ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif //Timing double t1,t2,c1,c2; ops_arg args[5] = { arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args,5,range,114)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(114,"advec_cell_kernel2_ydir"); OPS_kernels[114].count++; ops_timers_core(&c1,&t1); } //compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_advec_cell_kernel2_ydir_h || ydim0 != ydim0_advec_cell_kernel2_ydir_h || xdim1 != xdim1_advec_cell_kernel2_ydir_h || ydim1 != ydim1_advec_cell_kernel2_ydir_h || xdim2 != xdim2_advec_cell_kernel2_ydir_h || ydim2 != ydim2_advec_cell_kernel2_ydir_h || xdim3 != xdim3_advec_cell_kernel2_ydir_h || ydim3 != ydim3_advec_cell_kernel2_ydir_h || xdim4 != xdim4_advec_cell_kernel2_ydir_h || ydim4 != ydim4_advec_cell_kernel2_ydir_h) { cudaMemcpyToSymbol( xdim0_advec_cell_kernel2_ydir, &xdim0, sizeof(int) ); xdim0_advec_cell_kernel2_ydir_h = xdim0; cudaMemcpyToSymbol( ydim0_advec_cell_kernel2_ydir, &ydim0, sizeof(int) ); ydim0_advec_cell_kernel2_ydir_h = ydim0; cudaMemcpyToSymbol( xdim1_advec_cell_kernel2_ydir, &xdim1, sizeof(int) ); xdim1_advec_cell_kernel2_ydir_h = xdim1; cudaMemcpyToSymbol( ydim1_advec_cell_kernel2_ydir, &ydim1, sizeof(int) ); ydim1_advec_cell_kernel2_ydir_h = ydim1; cudaMemcpyToSymbol( xdim2_advec_cell_kernel2_ydir, &xdim2, sizeof(int) ); xdim2_advec_cell_kernel2_ydir_h = xdim2; cudaMemcpyToSymbol( ydim2_advec_cell_kernel2_ydir, &ydim2, sizeof(int) ); ydim2_advec_cell_kernel2_ydir_h = ydim2; cudaMemcpyToSymbol( xdim3_advec_cell_kernel2_ydir, &xdim3, sizeof(int) ); xdim3_advec_cell_kernel2_ydir_h = xdim3; cudaMemcpyToSymbol( ydim3_advec_cell_kernel2_ydir, &ydim3, sizeof(int) ); ydim3_advec_cell_kernel2_ydir_h = ydim3; cudaMemcpyToSymbol( xdim4_advec_cell_kernel2_ydir, &xdim4, sizeof(int) ); xdim4_advec_cell_kernel2_ydir_h = xdim4; cudaMemcpyToSymbol( ydim4_advec_cell_kernel2_ydir, &ydim4, sizeof(int) ); ydim4_advec_cell_kernel2_ydir_h = ydim4; } dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, (z_size-1)/OPS_block_size_z +1); dim3 tblock(OPS_block_size_x,OPS_block_size_y,OPS_block_size_z); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; //set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2+ dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2+ dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3+ dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3+ dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4+ dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4+ dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args,5,range); #endif if (OPS_diags > 1) { ops_timers_core(&c2,&t2); OPS_kernels[114].mpi_time += t2-t1; } //call kernel wrapper function, passing in pointers to data if (x_size > 0 && y_size > 0 && z_size > 0) ops_advec_cell_kernel2_ydir<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4],x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1,&t1); OPS_kernels[114].time += t1-t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); #endif if (OPS_diags > 1) { //Update kernel record ops_timers_core(&c2,&t2); OPS_kernels[114].mpi_time += t2-t1; OPS_kernels[114].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[114].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[114].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[114].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[114].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_cell_kernel2_ydir(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 114; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 114; for ( int i=0; i<6; i++ ){ desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg*)malloc(5*sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_cell_kernel2_ydir_execute; if (OPS_diags > 1) { ops_timing_realloc(114,"advec_cell_kernel2_ydir"); } ops_enqueue_kernel(desc); } #endif
6a7de86a6640bc88ddee9169186b6d5769334824.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #ifndef vector #define vector 1 #endif #if (vector==1) #define floatvector float #elif (vector == 2) #define floatvector float2 #elif (vector == 4) #define floatvector float4 #endif #if use_shuffle == 1 #define stop_loop 16 #elif use_shuffle == 0 #define stop_loop 0 #endif __global__ void sum_floats(float *sum_global, floatvector *array, int n) { int ti = threadIdx.x; int x = blockIdx.x * block_size_x + threadIdx.x; int step_size = num_blocks * block_size_x; float sum = 0.0f; //cooperatively iterate over input array with all thread blocks for (int i=x; i<n/vector; i+=step_size) { floatvector v = array[i]; #if vector == 1 sum += v; #elif vector == 2 sum += v.x + v.y; #elif vector == 4 sum += v.x + v.y + v.z + v.w; #endif } //reduce sum to single value (or last 32 in case of use_shuffle) __shared__ float sh_mem[block_size_x]; sh_mem[ti] = sum; __syncthreads(); #pragma unroll for (unsigned int s=block_size_x/2; s>stop_loop; s>>=1) { if (ti < s) { sh_mem[ti] += sh_mem[ti + s]; } __syncthreads(); } //reduce last 32 values to single value using warp shuffle instructions #if use_shuffle == 1 if (ti < 32) { sum = sh_mem[ti]; #pragma unroll for (unsigned int s=16; s>0; s>>=1) { sum += __shfl_down(sum, s); } } #else sum = sh_mem[0]; #endif //write back one value per thread block, run kernel again with one tread block if (ti == 0) { sum_global[blockIdx.x] = sum; } }
6a7de86a6640bc88ddee9169186b6d5769334824.cu
#include <stdio.h> #ifndef vector #define vector 1 #endif #if (vector==1) #define floatvector float #elif (vector == 2) #define floatvector float2 #elif (vector == 4) #define floatvector float4 #endif #if use_shuffle == 1 #define stop_loop 16 #elif use_shuffle == 0 #define stop_loop 0 #endif __global__ void sum_floats(float *sum_global, floatvector *array, int n) { int ti = threadIdx.x; int x = blockIdx.x * block_size_x + threadIdx.x; int step_size = num_blocks * block_size_x; float sum = 0.0f; //cooperatively iterate over input array with all thread blocks for (int i=x; i<n/vector; i+=step_size) { floatvector v = array[i]; #if vector == 1 sum += v; #elif vector == 2 sum += v.x + v.y; #elif vector == 4 sum += v.x + v.y + v.z + v.w; #endif } //reduce sum to single value (or last 32 in case of use_shuffle) __shared__ float sh_mem[block_size_x]; sh_mem[ti] = sum; __syncthreads(); #pragma unroll for (unsigned int s=block_size_x/2; s>stop_loop; s>>=1) { if (ti < s) { sh_mem[ti] += sh_mem[ti + s]; } __syncthreads(); } //reduce last 32 values to single value using warp shuffle instructions #if use_shuffle == 1 if (ti < 32) { sum = sh_mem[ti]; #pragma unroll for (unsigned int s=16; s>0; s>>=1) { sum += __shfl_down(sum, s); } } #else sum = sh_mem[0]; #endif //write back one value per thread block, run kernel again with one tread block if (ti == 0) { sum_global[blockIdx.x] = sum; } }
d7ee7fb79c20e2fba49d355ff1c8ef6218960dbc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" #define _USE_MATH_DEFINES static void CheckCudaErrorAux(const char *, unsigned, const char *, hipError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ __global__ void convolutionNoTiling(float *I, float *P, int channels, int width, int height) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int depth = threadIdx.z; if (col < width && row < height && depth < channels) { // Evaluate convolution float pValue = 0; int startRow = row - maskRowsRadius; int startCol = col - maskColumnsRadius; for (int i = 0; i < maskRows; i++) { for (int j = 0; j < maskColumns; j++) { int currentRow = startRow + i; int currentCol = startCol + j; float iValue; // Check for ghost elements if (currentRow >= 0 && currentRow < height && currentCol >= 0 && currentCol < width) { iValue = I[(currentRow * width + currentCol) * channels + depth]; } else { iValue = 0.0f; } pValue += iValue * deviceMaskData[i * maskRows + j]; } } // Salva il risultato dal registro alla global P[(row * width + col) * channels + depth] = pValue; } }
d7ee7fb79c20e2fba49d355ff1c8ef6218960dbc.cu
#include "includes.h" #define _USE_MATH_DEFINES static void CheckCudaErrorAux(const char *, unsigned, const char *, cudaError_t); #define CUDA_CHECK_RETURN(value) CheckCudaErrorAux(__FILE__,__LINE__, #value, value) /** * Check the return value of the CUDA runtime API call and exit * the application if the call has failed. */ __global__ void convolutionNoTiling(float *I, float *P, int channels, int width, int height) { int col = blockIdx.x * blockDim.x + threadIdx.x; int row = blockIdx.y * blockDim.y + threadIdx.y; int depth = threadIdx.z; if (col < width && row < height && depth < channels) { // Evaluate convolution float pValue = 0; int startRow = row - maskRowsRadius; int startCol = col - maskColumnsRadius; for (int i = 0; i < maskRows; i++) { for (int j = 0; j < maskColumns; j++) { int currentRow = startRow + i; int currentCol = startCol + j; float iValue; // Check for ghost elements if (currentRow >= 0 && currentRow < height && currentCol >= 0 && currentCol < width) { iValue = I[(currentRow * width + currentCol) * channels + depth]; } else { iValue = 0.0f; } pValue += iValue * deviceMaskData[i * maskRows + j]; } } // Salva il risultato dal registro alla global P[(row * width + col) * channels + depth] = pValue; } }
af4cf01795e9d19e58e6eeb66a219faa9bed9b05.hip
// !!! This is a file automatically generated by hipify!!! #include "layer.h" #include "net.h" #include <string.h> static void setup_frcnn_7_1_1(Net* const net) { const char* names[] = { // PVANET: 18 layers "data", "conv1_1", "conv1_2", "conv2_1", "conv2_2", "conv3_1", "conv3_2", "conv3_3", "downsample", "conv4_1", "conv4_2", "conv4_3", "conv5_1", "conv5_2", "conv5_3", "upsample", "concat", "convf", // Multi-scale RPN: 13 layers "rpn_conv1", "rpn_cls_score1", "rpn_bbox_pred1", "rpn_conv3", "rpn_cls_score3", "rpn_bbox_pred3", "rpn_conv5", "rpn_cls_score5", "rpn_bbox_pred5", "rpn_score", "__deleted__", "rpn_bbox", "rpn_roi", // R-CNN: 10 layers "rcnn_roipool", "__deleted__", "fc6_L", "fc6_U", "fc7_L", "fc7_U", "cls_score", "cls_pred", "bbox_pred", "out" }; net->num_layers = 41; for (int i = 0; i < net->num_layers; ++i) { net->layers[i] = (Layer*)malloc(sizeof(Layer)); init_layer(net->layers[i]); strcpy(net->layers[i]->name, names[i]); } net->img_info = (Tensor*)malloc(sizeof(Tensor)); real anchor_scales[9] = { 3.0f, 6.0f, 9.0f, 4.0f, 8.0f, 16.0f, 7.0f, 13.0f, 32.0f }; real anchor_ratios[3] = { 0.5f, 1.0f, 2.0f }; memcpy(net->anchor_scales, anchor_scales, 9 * sizeof(real)); memcpy(net->anchor_ratios, anchor_ratios, 3 * sizeof(real)); net->num_layer_data = 4; { for (int i = 1; i <= 15; ++i) { net->layers[i]->option.num_groups = 1; net->layers[i]->option.kernel_h = 3; net->layers[i]->option.kernel_w = 3; net->layers[i]->option.pad_h = 1; net->layers[i]->option.pad_w = 1; net->layers[i]->option.bias = 1; net->layers[i]->option.stride_h = 2; net->layers[i]->option.stride_w = 2; net->layers[i]->option.negative_slope = 0; #ifdef GPU net->layers[i]->option.handle = (void*)&net->cublas_handle; #endif } { net->layers[8]->option.pad_h = 0; net->layers[8]->option.pad_w = 0; net->layers[8]->option.stride_h = 2; net->layers[8]->option.stride_w = 2; net->layers[15]->option.num_groups = 512; net->layers[15]->option.kernel_h = 4; net->layers[15]->option.kernel_w = 4; net->layers[15]->option.pad_h = 1; net->layers[15]->option.pad_w = 1; net->layers[15]->option.bias = 0; net->layers[15]->option.stride_h = 2; net->layers[15]->option.stride_w = 2; net->layers[2]->option.stride_h = 1; net->layers[2]->option.stride_w = 1; net->layers[4]->option.stride_h = 1; net->layers[4]->option.stride_w = 1; net->layers[6]->option.stride_h = 1; net->layers[6]->option.stride_w = 1; net->layers[7]->option.stride_h = 1; net->layers[7]->option.stride_w = 1; net->layers[10]->option.stride_h = 1; net->layers[10]->option.stride_w = 1; net->layers[11]->option.stride_h = 1; net->layers[11]->option.stride_w = 1; net->layers[13]->option.stride_h = 1; net->layers[13]->option.stride_w = 1; net->layers[14]->option.stride_h = 1; net->layers[14]->option.stride_w = 1; } for (int i = 17; i <= 26; ++i) { net->layers[i]->option.num_groups = 1; net->layers[i]->option.kernel_h = 1; net->layers[i]->option.kernel_w = 1; net->layers[i]->option.pad_h = 0; net->layers[i]->option.pad_w = 0; net->layers[i]->option.bias = 1; net->layers[i]->option.stride_h = 1; net->layers[i]->option.stride_w = 1; net->layers[i]->option.negative_slope = 0; #ifdef GPU net->layers[i]->option.handle = (void*)&net->cublas_handle; #endif } { net->layers[21]->option.kernel_h = 3; net->layers[21]->option.kernel_w = 3; net->layers[21]->option.pad_h = 1; net->layers[21]->option.pad_w = 1; net->layers[24]->option.kernel_h = 5; net->layers[24]->option.kernel_w = 5; net->layers[24]->option.pad_h = 2; net->layers[24]->option.pad_w = 2; } { net->layers[16]->option.num_concats = 3; net->layers[27]->option.num_concats = 3; net->layers[29]->option.num_concats = 3; } net->layers[1]->option.out_channels = 32; net->layers[2]->option.out_channels = 32; net->layers[3]->option.out_channels = 64; net->layers[4]->option.out_channels = 64; net->layers[5]->option.out_channels = 96; net->layers[6]->option.out_channels = 64; net->layers[7]->option.out_channels = 128; net->layers[9]->option.out_channels = 192; net->layers[10]->option.out_channels = 128; net->layers[11]->option.out_channels = 256; net->layers[12]->option.out_channels = 384; net->layers[13]->option.out_channels = 256; net->layers[14]->option.out_channels = 512; net->layers[15]->option.out_channels = 512; net->layers[17]->option.out_channels = 512; net->layers[18]->option.out_channels = 128; net->layers[19]->option.out_channels = 18; net->layers[20]->option.out_channels = 36; net->layers[21]->option.out_channels = 256; net->layers[22]->option.out_channels = 18; net->layers[23]->option.out_channels = 36; net->layers[24]->option.out_channels = 128; net->layers[25]->option.out_channels = 18; net->layers[26]->option.out_channels = 36; net->layers[30]->option.scales = &net->anchor_scales[0]; net->layers[30]->option.ratios = &net->anchor_ratios[0]; net->layers[30]->option.num_scales = 9; net->layers[30]->option.num_ratios = 3; net->layers[30]->option.num_concats = 1; net->layers[30]->option.base_size = 16; net->layers[30]->option.feat_stride = 16; net->layers[30]->option.min_size = 16; net->layers[30]->option.pre_nms_topn = 6000; net->layers[30]->option.post_nms_topn = 300; net->layers[30]->option.nms_thresh = 0.7f; net->layers[31]->option.pooled_height = 6; net->layers[31]->option.pooled_width = 6; net->layers[31]->option.spatial_scale = 0.0625; net->layers[31]->option.flatten = 1; for (int i = 33; i <= 39; ++i) { net->layers[i]->option.bias = 1; net->layers[i]->option.negative_slope = 0; net->layers[i]->option.threshold = 0.5f; net->layers[i]->option.test = 1; net->layers[i]->option.scaled = 0; #ifdef GPU net->layers[i]->option.handle = (void*)&net->cublas_handle; #endif } net->layers[33]->option.bias = 0; net->layers[35]->option.bias = 0; net->layers[33]->option.out_channels = 512; net->layers[34]->option.out_channels = 4096; net->layers[35]->option.out_channels = 128; net->layers[36]->option.out_channels = 4096; net->layers[37]->option.out_channels = 22; net->layers[39]->option.out_channels = 88; net->layers[40]->option.min_size = 16; net->layers[40]->option.score_thresh = 0.7f; net->layers[40]->option.nms_thresh = 0.3f; } { net->layers[0]->num_tops = 1; for (int i = 1; i <= 15; ++i) { net->layers[i]->num_bottoms = 1; net->layers[i]->num_tops = 1; net->layers[i]->num_params = 2; } net->layers[8]->num_params = 0; net->layers[15]->num_params = 1; net->layers[16]->num_bottoms = 3; net->layers[16]->num_tops = 1; for (int i = 17; i <= 26; ++i) { net->layers[i]->num_bottoms = 1; net->layers[i]->num_tops = 1; net->layers[i]->num_params = 2; } net->layers[27]->num_bottoms = 3; net->layers[27]->num_tops = 1; net->layers[29]->num_bottoms = 3; net->layers[29]->num_tops = 1; net->layers[30]->num_bottoms = 3; net->layers[30]->num_tops = 1; net->layers[30]->num_aux_data = 1; net->layers[31]->num_bottoms = 2; net->layers[31]->num_tops = 1; for (int i = 33; i <= 39; ++i) { net->layers[i]->num_bottoms = 1; net->layers[i]->num_tops = 1; net->layers[i]->num_params = 2; } net->layers[33]->num_params = 1; net->layers[35]->num_params = 1; net->layers[38]->num_bottoms = 2; net->layers[38]->num_params = 0; net->layers[39]->num_bottoms = 2; net->layers[40]->num_bottoms = 4; net->layers[40]->num_tops = 1; } for (int i = 0; i < net->num_layers; ++i) { net->space_cpu += malloc_layer(net->layers[i]); } { Tensor* input = &net->layers[0]->tops[0]; input->num_items = 1; input->ndim = 3; for (int n = 0; n < input->num_items; ++n) { input->shape[n][0] = 3; input->shape[n][1] = 640; input->shape[n][2] = 1024; input->start[n] = n * 3 * 640 * 1024; } } { net->layers[8]->allocate_top_data[0] = 1; net->layers[19]->allocate_top_data[0] = 1; net->layers[20]->allocate_top_data[0] = 1; net->layers[22]->allocate_top_data[0] = 1; net->layers[23]->allocate_top_data[0] = 1; net->layers[25]->allocate_top_data[0] = 1; net->layers[26]->allocate_top_data[0] = 1; net->layers[30]->allocate_top_data[0] = 1; } } static void connect_frcnn_7_1_1(Net* const net) { // PVANET { // 1_1, 1_2, 2_1, 2_2, 3_1, 3_2, 3_3 for (int i = 1; i <= 7; ++i) { net->layers[i]->p_bottoms[0] = &net->layers[i - 1]->tops[0]; net->layers[i]->f_forward[0] = forward_conv_layer; net->layers[i]->f_forward[1] = forward_inplace_relu_layer; net->layers[i]->f_shape[0] = shape_conv_layer; } // downsample net->layers[8]->p_bottoms[0] = &net->layers[7]->tops[0]; net->layers[8]->f_forward[0] = forward_pool_layer; net->layers[8]->f_shape[0] = shape_pool_layer; // 4_1, 4_2, 4_3, 5_1, 5_2, 5_3 for (int i = 9; i <= 14; ++i) { net->layers[i]->p_bottoms[0] = &net->layers[i - 1]->tops[0]; net->layers[i]->f_forward[0] = forward_conv_layer; net->layers[i]->f_forward[1] = forward_inplace_relu_layer; net->layers[i]->f_shape[0] = shape_conv_layer; } net->layers[9]->p_bottoms[0] = &net->layers[7]->tops[0]; // upsample net->layers[15]->p_bottoms[0] = &net->layers[14]->tops[0]; net->layers[15]->f_forward[0] = forward_deconv_layer; net->layers[15]->f_shape[0] = shape_deconv_layer; // concat net->layers[16]->p_bottoms[0] = &net->layers[8]->tops[0]; net->layers[16]->p_bottoms[1] = &net->layers[11]->tops[0]; net->layers[16]->p_bottoms[2] = &net->layers[15]->tops[0]; net->layers[16]->f_forward[0] = forward_concat_layer; net->layers[16]->f_shape[0] = shape_concat_layer; // convf net->layers[17]->p_bottoms[0] = &net->layers[16]->tops[0]; net->layers[17]->f_forward[0] = forward_conv_layer; net->layers[17]->f_forward[1] = forward_inplace_relu_layer; net->layers[17]->f_shape[0] = shape_conv_layer; } // Multi-scale RPN { // rpn_1, 3, 5 for (int i = 18; i <= 26; i += 3) { // rpn_conv1, 3, 5 net->layers[i]->p_bottoms[0] = &net->layers[17]->tops[0]; net->layers[i]->f_forward[0] = forward_conv_layer; net->layers[i]->f_forward[1] = forward_inplace_relu_layer; net->layers[i]->f_shape[0] = shape_conv_layer; // rpn_cls_score1, 3, 5 net->layers[i + 1]->p_bottoms[0] = &net->layers[i]->tops[0]; net->layers[i + 1]->f_forward[0] = forward_conv_layer; net->layers[i + 1]->f_shape[0] = shape_conv_layer; // rpn_bbox_pred1, 3, 5 net->layers[i + 2]->p_bottoms[0] = &net->layers[i]->tops[0]; net->layers[i + 2]->f_forward[0] = forward_conv_layer; net->layers[i + 2]->f_shape[0] = shape_conv_layer; } // rpn_score net->layers[27]->p_bottoms[0] = &net->layers[19]->tops[0]; net->layers[27]->p_bottoms[1] = &net->layers[22]->tops[0]; net->layers[27]->p_bottoms[2] = &net->layers[25]->tops[0]; net->layers[27]->f_forward[0] = forward_concat_layer; net->layers[27]->f_forward[1] = forward_rpn_pred_layer; net->layers[27]->f_shape[0] = shape_concat_layer; net->layers[27]->f_shape[1] = shape_rpn_pred_layer; // rpn_bbox net->layers[29]->p_bottoms[0] = &net->layers[20]->tops[0]; net->layers[29]->p_bottoms[1] = &net->layers[23]->tops[0]; net->layers[29]->p_bottoms[2] = &net->layers[26]->tops[0]; net->layers[29]->f_forward[0] = forward_concat_layer; net->layers[29]->f_forward[1] = forward_rpn_bbox_layer; net->layers[29]->f_shape[0] = shape_concat_layer; net->layers[29]->f_shape[1] = shape_rpn_bbox_layer; // proposal net->layers[30]->p_bottoms[0] = &net->layers[27]->tops[0]; net->layers[30]->p_bottoms[1] = &net->layers[29]->tops[0]; net->layers[30]->p_bottoms[2] = net->img_info; net->layers[30]->f_forward[0] = forward_proposal_layer; net->layers[30]->f_shape[0] = shape_proposal_layer; net->layers[30]->f_init[0] = init_proposal_layer; } // R-CNN { // roipool net->layers[31]->p_bottoms[0] = &net->layers[17]->tops[0]; net->layers[31]->p_bottoms[1] = &net->layers[30]->tops[0]; net->layers[31]->f_forward[0] = forward_roipool_layer; net->layers[31]->f_shape[0] = shape_roipool_layer; // fc6_L, 6_U, 7_L, 7_U for (int i = 33; i <= 36; i += 2) { net->layers[i]->p_bottoms[0] = &net->layers[i - 1]->tops[0]; net->layers[i]->f_forward[0] = forward_fc_layer; net->layers[i]->f_shape[0] = shape_fc_layer; net->layers[i + 1]->p_bottoms[0] = &net->layers[i]->tops[0]; net->layers[i + 1]->f_forward[0] = forward_fc_layer; net->layers[i + 1]->f_forward[1] = forward_inplace_relu_layer; net->layers[i + 1]->f_forward[2] = forward_inplace_dropout_layer; net->layers[i + 1]->f_shape[0] = shape_fc_layer; } net->layers[33]->p_bottoms[0] = &net->layers[31]->tops[0]; // score net->layers[37]->p_bottoms[0] = &net->layers[36]->tops[0]; net->layers[37]->f_forward[0] = forward_fc_layer; net->layers[37]->f_shape[0] = shape_fc_layer; // pred net->layers[38]->p_bottoms[0] = &net->layers[37]->tops[0]; net->layers[38]->p_bottoms[1] = &net->layers[30]->tops[0]; net->layers[38]->f_forward[0] = forward_rcnn_pred_layer; //net->layers[38]->f_forward[1] = save_layer_tops; net->layers[38]->f_shape[0] = shape_rcnn_pred_layer; // bbox net->layers[39]->p_bottoms[0] = &net->layers[36]->tops[0]; net->layers[39]->p_bottoms[1] = &net->layers[30]->tops[0]; net->layers[39]->f_forward[0] = forward_fc_layer; net->layers[39]->f_forward[1] = forward_rcnn_bbox_layer; //net->layers[39]->f_forward[2] = save_layer_tops; net->layers[39]->f_shape[0] = shape_fc_layer; net->layers[39]->f_shape[1] = shape_rcnn_bbox_layer; // out net->layers[40]->p_bottoms[0] = &net->layers[38]->tops[0]; net->layers[40]->p_bottoms[1] = &net->layers[39]->tops[0]; net->layers[40]->p_bottoms[2] = &net->layers[30]->tops[0]; net->layers[40]->p_bottoms[3] = net->img_info; net->layers[40]->f_forward[0] = forward_odout_layer; net->layers[40]->f_shape[0] = shape_odout_layer; } } void construct_frcnn_7_1_1(Net* net, int gpu_id) { long int space_cpu = 0; #ifdef GPU hipSetDevice(gpu_id); #endif setup_frcnn_7_1_1(net); connect_frcnn_7_1_1(net); shape_net(net); malloc_net(net); net->space_cpu += space_cpu; { for (int i = 0; i < net->num_layers; ++i) { for (int j = 0; j < net->layers[i]->num_tops; ++j) { if (!net->layers[i]->allocate_top_data[j]) { net->layers[i]->tops[j].data = net->layer_data[j]; } } } net->layers[1]->tops[0].data = net->layer_data[1]; net->layers[3]->tops[0].data = net->layer_data[1]; net->layers[5]->tops[0].data = net->layer_data[1]; net->layers[7]->tops[0].data = net->layer_data[1]; net->layers[10]->tops[0].data = net->layer_data[1]; net->layers[12]->tops[0].data = net->layer_data[1]; net->layers[14]->tops[0].data = net->layer_data[1]; net->layers[11]->tops[0].data = net->layer_data[2]; net->layers[15]->tops[0].data = net->layer_data[3]; net->layers[17]->tops[0].data = net->layer_data[1]; net->layers[27]->tops[0].data = net->layer_data[0]; net->layers[29]->tops[0].data = net->layer_data[2]; net->layers[31]->tops[0].data = net->layer_data[2]; net->layers[34]->tops[0].data = net->layer_data[1]; net->layers[36]->tops[0].data = net->layer_data[1]; net->layers[37]->tops[0].data = net->layer_data[0]; net->layers[38]->tops[0].data = net->layers[37]->tops[0].data; net->layers[39]->tops[0].data = net->layer_data[2]; net->layers[40]->tops[0].data = net->layer_data[1]; } init_layers(net); // print total memory size required { #ifdef GPU printf("%ldMB of main memory allocated\n", DIV_THEN_CEIL(net->space_cpu, 1000000)); printf("%ldMB of GPU memory allocated\n", DIV_THEN_CEIL(net->space, 1000000)); #else printf("%ldMB of main memory allocated\n", DIV_THEN_CEIL(net->space_cpu + net->space, 1000000)); #endif } } void get_input_frcnn_7_1_1(Net* net, const char* const filename[], const int num_images) { Tensor* input = &net->layers[0]->tops[0]; input->ndim = 3; input->num_items = 0; input->start[0] = 0; net->img_info->ndim = 1; net->img_info->num_items = 0; for (int i = 0; i < num_images; ++i) { load_image(filename[i], input, net->img_info, net->temp_data); } // network reshape shape_net(net); print_tensor_info("data", input); print_tensor_info("img_info", net->img_info); } void get_output_frcnn_7_1_1(Net* net, const int image_start_index) { // retrieve & print output { const Tensor* const out = &net->layers[40]->tops[0]; const long int output_size = flatten_size(out); #ifdef GPU hipMemcpyAsync(net->output_cpu_data, out->data, output_size * sizeof(real), hipMemcpyDeviceToHost); #else memcpy(net->output_cpu_data, out->data, output_size * sizeof(real)); #endif for (int n = 0; n < out->num_items; ++n) { const int image_index = image_start_index + n; const real* const p_out_item = net->output_cpu_data + out->start[n]; for (int i = 0; i < out->shape[n][0]; ++i) { const int class_index = (int)p_out_item[i * 6 + 0]; printf("Image %d / Box %d: ", image_index, i); printf("class %d, score %f, p1 = (%.2f, %.2f), p2 = (%.2f, %.2f)\n", class_index, p_out_item[i * 6 + 5], p_out_item[i * 6 + 1], p_out_item[i * 6 + 2], p_out_item[i * 6 + 3], p_out_item[i * 6 + 4]); } } } } #include "net.h" static Net* pvanet = NULL; static const char* class_names[] = { "__background", "bicycle", "bird", "bus", "car", "cat", "dog", "horse", "motorbike", "person", "train", "aeroplane", "boat", "bottle", "chair", "cow", "diningtable", "pottedplant", "sheep", "sofa", "tvmonitor", "cake", "vase" }; void pvanet_7_1_1_init(const char* const param_path, int gpu_id) { if (pvanet == NULL) { #ifdef GPU hipSetDevice(0); #endif pvanet = (Net*)malloc(sizeof(Net)); init_net(pvanet); strcpy(pvanet->param_path, param_path); construct_frcnn_7_1_1(pvanet, gpu_id); } } void pvanet_7_1_1_release(void) { if (pvanet) { free_net(pvanet); free(pvanet); pvanet = NULL; } } void pvanet_get_output(std::vector<std::pair<std::string, std::vector<float> > >& boxes) { const Tensor* const out = &pvanet->layers[40]->tops[0]; const long int output_size = flatten_size(out); #ifdef GPU hipMemcpyAsync(pvanet->output_cpu_data, out->data, output_size * sizeof(real), hipMemcpyDeviceToHost); #else memcpy(pvanet->output_cpu_data, out->data, output_size * sizeof(real)); #endif const real* const p_out_item = pvanet->output_cpu_data; for (int i = 0; i < out->shape[0][0]; ++i) { std::pair<std::string, std::vector<float> > box; const int class_index = (int)p_out_item[i * 6 + 0]; box.first = class_names[class_index]; box.second.push_back(p_out_item[i * 6 + 1]); box.second.push_back(p_out_item[i * 6 + 2]); box.second.push_back(p_out_item[i * 6 + 3]); box.second.push_back(p_out_item[i * 6 + 4]); box.second.push_back(p_out_item[i * 6 + 5]); boxes.push_back(box); printf("class %d, score %f, p1 = (%.2f, %.2f), p2 = (%.2f, %.2f)\n", class_index, p_out_item[i * 6 + 5], p_out_item[i * 6 + 1], p_out_item[i * 6 + 2], p_out_item[i * 6 + 3], p_out_item[i * 6 + 4]); } } void pvanet_7_1_1_detect(const unsigned char* image_data, int width, int height, int stride, std::vector<std::pair<std::string, std::vector<float> > >& boxes) { boxes.clear(); if (!pvanet) { return; } Tensor* input = &pvanet->layers[0]->tops[0]; input->ndim = 3; input->num_items = 0; input->start[0] = 0; pvanet->img_info->ndim = 1; pvanet->img_info->num_items = 0; img2input(image_data, input, pvanet->img_info, (unsigned char*)pvanet->temp_data, height, width, stride); printf("%d %d\n", input->shape[0][1], input->shape[0][2]); shape_net(pvanet); forward_net(pvanet); pvanet_get_output(boxes); } #ifdef TEST int main(int argc, char* argv[]) { // CUDA initialization #ifdef GPU { printf("set device\n"); hipSetDevice(0); } #endif // PVANET construction Net frcnn; init_net(&frcnn); strcpy(frcnn.param_path, argv[1]); construct_frcnn_7_1_1(&frcnn); // load a text file containing image filenames to be tested { char buf[10240]; char* line[20]; int total_count = 0, count = 0, buf_count = 0; FILE* fp_list = fopen(argv[2], "r"); if (!fp_list) { printf("File not found: %s\n", argv[2]); } while (fgets(&buf[buf_count], 1024, fp_list)) { const Tensor* const input = &frcnn.layers[0]->tops[0]; const int len = strlen(&buf[buf_count]); buf[buf_count + len - 1] = 0; line[count] = &buf[buf_count]; ++count; buf_count += len; if (count == input->num_items) { // input data loading get_input_frcnn_7_1_1(&frcnn, (const char * const *)&line, count); // forward-pass forward_net(&frcnn); // retrieve output & save to file get_output_frcnn_7_1_1(&frcnn, total_count); total_count += count; count = 0; buf_count = 0; } } if (count > 0) { get_input_frcnn_7_1_1(&frcnn, (const char * const *)&line, count); forward_net(&frcnn); get_output_frcnn_7_1_1(&frcnn, total_count); } fclose(fp_list); } // end free_net(&frcnn); return 0; } #endif
af4cf01795e9d19e58e6eeb66a219faa9bed9b05.cu
#include "layer.h" #include "net.h" #include <string.h> static void setup_frcnn_7_1_1(Net* const net) { const char* names[] = { // PVANET: 18 layers "data", "conv1_1", "conv1_2", "conv2_1", "conv2_2", "conv3_1", "conv3_2", "conv3_3", "downsample", "conv4_1", "conv4_2", "conv4_3", "conv5_1", "conv5_2", "conv5_3", "upsample", "concat", "convf", // Multi-scale RPN: 13 layers "rpn_conv1", "rpn_cls_score1", "rpn_bbox_pred1", "rpn_conv3", "rpn_cls_score3", "rpn_bbox_pred3", "rpn_conv5", "rpn_cls_score5", "rpn_bbox_pred5", "rpn_score", "__deleted__", "rpn_bbox", "rpn_roi", // R-CNN: 10 layers "rcnn_roipool", "__deleted__", "fc6_L", "fc6_U", "fc7_L", "fc7_U", "cls_score", "cls_pred", "bbox_pred", "out" }; net->num_layers = 41; for (int i = 0; i < net->num_layers; ++i) { net->layers[i] = (Layer*)malloc(sizeof(Layer)); init_layer(net->layers[i]); strcpy(net->layers[i]->name, names[i]); } net->img_info = (Tensor*)malloc(sizeof(Tensor)); real anchor_scales[9] = { 3.0f, 6.0f, 9.0f, 4.0f, 8.0f, 16.0f, 7.0f, 13.0f, 32.0f }; real anchor_ratios[3] = { 0.5f, 1.0f, 2.0f }; memcpy(net->anchor_scales, anchor_scales, 9 * sizeof(real)); memcpy(net->anchor_ratios, anchor_ratios, 3 * sizeof(real)); net->num_layer_data = 4; { for (int i = 1; i <= 15; ++i) { net->layers[i]->option.num_groups = 1; net->layers[i]->option.kernel_h = 3; net->layers[i]->option.kernel_w = 3; net->layers[i]->option.pad_h = 1; net->layers[i]->option.pad_w = 1; net->layers[i]->option.bias = 1; net->layers[i]->option.stride_h = 2; net->layers[i]->option.stride_w = 2; net->layers[i]->option.negative_slope = 0; #ifdef GPU net->layers[i]->option.handle = (void*)&net->cublas_handle; #endif } { net->layers[8]->option.pad_h = 0; net->layers[8]->option.pad_w = 0; net->layers[8]->option.stride_h = 2; net->layers[8]->option.stride_w = 2; net->layers[15]->option.num_groups = 512; net->layers[15]->option.kernel_h = 4; net->layers[15]->option.kernel_w = 4; net->layers[15]->option.pad_h = 1; net->layers[15]->option.pad_w = 1; net->layers[15]->option.bias = 0; net->layers[15]->option.stride_h = 2; net->layers[15]->option.stride_w = 2; net->layers[2]->option.stride_h = 1; net->layers[2]->option.stride_w = 1; net->layers[4]->option.stride_h = 1; net->layers[4]->option.stride_w = 1; net->layers[6]->option.stride_h = 1; net->layers[6]->option.stride_w = 1; net->layers[7]->option.stride_h = 1; net->layers[7]->option.stride_w = 1; net->layers[10]->option.stride_h = 1; net->layers[10]->option.stride_w = 1; net->layers[11]->option.stride_h = 1; net->layers[11]->option.stride_w = 1; net->layers[13]->option.stride_h = 1; net->layers[13]->option.stride_w = 1; net->layers[14]->option.stride_h = 1; net->layers[14]->option.stride_w = 1; } for (int i = 17; i <= 26; ++i) { net->layers[i]->option.num_groups = 1; net->layers[i]->option.kernel_h = 1; net->layers[i]->option.kernel_w = 1; net->layers[i]->option.pad_h = 0; net->layers[i]->option.pad_w = 0; net->layers[i]->option.bias = 1; net->layers[i]->option.stride_h = 1; net->layers[i]->option.stride_w = 1; net->layers[i]->option.negative_slope = 0; #ifdef GPU net->layers[i]->option.handle = (void*)&net->cublas_handle; #endif } { net->layers[21]->option.kernel_h = 3; net->layers[21]->option.kernel_w = 3; net->layers[21]->option.pad_h = 1; net->layers[21]->option.pad_w = 1; net->layers[24]->option.kernel_h = 5; net->layers[24]->option.kernel_w = 5; net->layers[24]->option.pad_h = 2; net->layers[24]->option.pad_w = 2; } { net->layers[16]->option.num_concats = 3; net->layers[27]->option.num_concats = 3; net->layers[29]->option.num_concats = 3; } net->layers[1]->option.out_channels = 32; net->layers[2]->option.out_channels = 32; net->layers[3]->option.out_channels = 64; net->layers[4]->option.out_channels = 64; net->layers[5]->option.out_channels = 96; net->layers[6]->option.out_channels = 64; net->layers[7]->option.out_channels = 128; net->layers[9]->option.out_channels = 192; net->layers[10]->option.out_channels = 128; net->layers[11]->option.out_channels = 256; net->layers[12]->option.out_channels = 384; net->layers[13]->option.out_channels = 256; net->layers[14]->option.out_channels = 512; net->layers[15]->option.out_channels = 512; net->layers[17]->option.out_channels = 512; net->layers[18]->option.out_channels = 128; net->layers[19]->option.out_channels = 18; net->layers[20]->option.out_channels = 36; net->layers[21]->option.out_channels = 256; net->layers[22]->option.out_channels = 18; net->layers[23]->option.out_channels = 36; net->layers[24]->option.out_channels = 128; net->layers[25]->option.out_channels = 18; net->layers[26]->option.out_channels = 36; net->layers[30]->option.scales = &net->anchor_scales[0]; net->layers[30]->option.ratios = &net->anchor_ratios[0]; net->layers[30]->option.num_scales = 9; net->layers[30]->option.num_ratios = 3; net->layers[30]->option.num_concats = 1; net->layers[30]->option.base_size = 16; net->layers[30]->option.feat_stride = 16; net->layers[30]->option.min_size = 16; net->layers[30]->option.pre_nms_topn = 6000; net->layers[30]->option.post_nms_topn = 300; net->layers[30]->option.nms_thresh = 0.7f; net->layers[31]->option.pooled_height = 6; net->layers[31]->option.pooled_width = 6; net->layers[31]->option.spatial_scale = 0.0625; net->layers[31]->option.flatten = 1; for (int i = 33; i <= 39; ++i) { net->layers[i]->option.bias = 1; net->layers[i]->option.negative_slope = 0; net->layers[i]->option.threshold = 0.5f; net->layers[i]->option.test = 1; net->layers[i]->option.scaled = 0; #ifdef GPU net->layers[i]->option.handle = (void*)&net->cublas_handle; #endif } net->layers[33]->option.bias = 0; net->layers[35]->option.bias = 0; net->layers[33]->option.out_channels = 512; net->layers[34]->option.out_channels = 4096; net->layers[35]->option.out_channels = 128; net->layers[36]->option.out_channels = 4096; net->layers[37]->option.out_channels = 22; net->layers[39]->option.out_channels = 88; net->layers[40]->option.min_size = 16; net->layers[40]->option.score_thresh = 0.7f; net->layers[40]->option.nms_thresh = 0.3f; } { net->layers[0]->num_tops = 1; for (int i = 1; i <= 15; ++i) { net->layers[i]->num_bottoms = 1; net->layers[i]->num_tops = 1; net->layers[i]->num_params = 2; } net->layers[8]->num_params = 0; net->layers[15]->num_params = 1; net->layers[16]->num_bottoms = 3; net->layers[16]->num_tops = 1; for (int i = 17; i <= 26; ++i) { net->layers[i]->num_bottoms = 1; net->layers[i]->num_tops = 1; net->layers[i]->num_params = 2; } net->layers[27]->num_bottoms = 3; net->layers[27]->num_tops = 1; net->layers[29]->num_bottoms = 3; net->layers[29]->num_tops = 1; net->layers[30]->num_bottoms = 3; net->layers[30]->num_tops = 1; net->layers[30]->num_aux_data = 1; net->layers[31]->num_bottoms = 2; net->layers[31]->num_tops = 1; for (int i = 33; i <= 39; ++i) { net->layers[i]->num_bottoms = 1; net->layers[i]->num_tops = 1; net->layers[i]->num_params = 2; } net->layers[33]->num_params = 1; net->layers[35]->num_params = 1; net->layers[38]->num_bottoms = 2; net->layers[38]->num_params = 0; net->layers[39]->num_bottoms = 2; net->layers[40]->num_bottoms = 4; net->layers[40]->num_tops = 1; } for (int i = 0; i < net->num_layers; ++i) { net->space_cpu += malloc_layer(net->layers[i]); } { Tensor* input = &net->layers[0]->tops[0]; input->num_items = 1; input->ndim = 3; for (int n = 0; n < input->num_items; ++n) { input->shape[n][0] = 3; input->shape[n][1] = 640; input->shape[n][2] = 1024; input->start[n] = n * 3 * 640 * 1024; } } { net->layers[8]->allocate_top_data[0] = 1; net->layers[19]->allocate_top_data[0] = 1; net->layers[20]->allocate_top_data[0] = 1; net->layers[22]->allocate_top_data[0] = 1; net->layers[23]->allocate_top_data[0] = 1; net->layers[25]->allocate_top_data[0] = 1; net->layers[26]->allocate_top_data[0] = 1; net->layers[30]->allocate_top_data[0] = 1; } } static void connect_frcnn_7_1_1(Net* const net) { // PVANET { // 1_1, 1_2, 2_1, 2_2, 3_1, 3_2, 3_3 for (int i = 1; i <= 7; ++i) { net->layers[i]->p_bottoms[0] = &net->layers[i - 1]->tops[0]; net->layers[i]->f_forward[0] = forward_conv_layer; net->layers[i]->f_forward[1] = forward_inplace_relu_layer; net->layers[i]->f_shape[0] = shape_conv_layer; } // downsample net->layers[8]->p_bottoms[0] = &net->layers[7]->tops[0]; net->layers[8]->f_forward[0] = forward_pool_layer; net->layers[8]->f_shape[0] = shape_pool_layer; // 4_1, 4_2, 4_3, 5_1, 5_2, 5_3 for (int i = 9; i <= 14; ++i) { net->layers[i]->p_bottoms[0] = &net->layers[i - 1]->tops[0]; net->layers[i]->f_forward[0] = forward_conv_layer; net->layers[i]->f_forward[1] = forward_inplace_relu_layer; net->layers[i]->f_shape[0] = shape_conv_layer; } net->layers[9]->p_bottoms[0] = &net->layers[7]->tops[0]; // upsample net->layers[15]->p_bottoms[0] = &net->layers[14]->tops[0]; net->layers[15]->f_forward[0] = forward_deconv_layer; net->layers[15]->f_shape[0] = shape_deconv_layer; // concat net->layers[16]->p_bottoms[0] = &net->layers[8]->tops[0]; net->layers[16]->p_bottoms[1] = &net->layers[11]->tops[0]; net->layers[16]->p_bottoms[2] = &net->layers[15]->tops[0]; net->layers[16]->f_forward[0] = forward_concat_layer; net->layers[16]->f_shape[0] = shape_concat_layer; // convf net->layers[17]->p_bottoms[0] = &net->layers[16]->tops[0]; net->layers[17]->f_forward[0] = forward_conv_layer; net->layers[17]->f_forward[1] = forward_inplace_relu_layer; net->layers[17]->f_shape[0] = shape_conv_layer; } // Multi-scale RPN { // rpn_1, 3, 5 for (int i = 18; i <= 26; i += 3) { // rpn_conv1, 3, 5 net->layers[i]->p_bottoms[0] = &net->layers[17]->tops[0]; net->layers[i]->f_forward[0] = forward_conv_layer; net->layers[i]->f_forward[1] = forward_inplace_relu_layer; net->layers[i]->f_shape[0] = shape_conv_layer; // rpn_cls_score1, 3, 5 net->layers[i + 1]->p_bottoms[0] = &net->layers[i]->tops[0]; net->layers[i + 1]->f_forward[0] = forward_conv_layer; net->layers[i + 1]->f_shape[0] = shape_conv_layer; // rpn_bbox_pred1, 3, 5 net->layers[i + 2]->p_bottoms[0] = &net->layers[i]->tops[0]; net->layers[i + 2]->f_forward[0] = forward_conv_layer; net->layers[i + 2]->f_shape[0] = shape_conv_layer; } // rpn_score net->layers[27]->p_bottoms[0] = &net->layers[19]->tops[0]; net->layers[27]->p_bottoms[1] = &net->layers[22]->tops[0]; net->layers[27]->p_bottoms[2] = &net->layers[25]->tops[0]; net->layers[27]->f_forward[0] = forward_concat_layer; net->layers[27]->f_forward[1] = forward_rpn_pred_layer; net->layers[27]->f_shape[0] = shape_concat_layer; net->layers[27]->f_shape[1] = shape_rpn_pred_layer; // rpn_bbox net->layers[29]->p_bottoms[0] = &net->layers[20]->tops[0]; net->layers[29]->p_bottoms[1] = &net->layers[23]->tops[0]; net->layers[29]->p_bottoms[2] = &net->layers[26]->tops[0]; net->layers[29]->f_forward[0] = forward_concat_layer; net->layers[29]->f_forward[1] = forward_rpn_bbox_layer; net->layers[29]->f_shape[0] = shape_concat_layer; net->layers[29]->f_shape[1] = shape_rpn_bbox_layer; // proposal net->layers[30]->p_bottoms[0] = &net->layers[27]->tops[0]; net->layers[30]->p_bottoms[1] = &net->layers[29]->tops[0]; net->layers[30]->p_bottoms[2] = net->img_info; net->layers[30]->f_forward[0] = forward_proposal_layer; net->layers[30]->f_shape[0] = shape_proposal_layer; net->layers[30]->f_init[0] = init_proposal_layer; } // R-CNN { // roipool net->layers[31]->p_bottoms[0] = &net->layers[17]->tops[0]; net->layers[31]->p_bottoms[1] = &net->layers[30]->tops[0]; net->layers[31]->f_forward[0] = forward_roipool_layer; net->layers[31]->f_shape[0] = shape_roipool_layer; // fc6_L, 6_U, 7_L, 7_U for (int i = 33; i <= 36; i += 2) { net->layers[i]->p_bottoms[0] = &net->layers[i - 1]->tops[0]; net->layers[i]->f_forward[0] = forward_fc_layer; net->layers[i]->f_shape[0] = shape_fc_layer; net->layers[i + 1]->p_bottoms[0] = &net->layers[i]->tops[0]; net->layers[i + 1]->f_forward[0] = forward_fc_layer; net->layers[i + 1]->f_forward[1] = forward_inplace_relu_layer; net->layers[i + 1]->f_forward[2] = forward_inplace_dropout_layer; net->layers[i + 1]->f_shape[0] = shape_fc_layer; } net->layers[33]->p_bottoms[0] = &net->layers[31]->tops[0]; // score net->layers[37]->p_bottoms[0] = &net->layers[36]->tops[0]; net->layers[37]->f_forward[0] = forward_fc_layer; net->layers[37]->f_shape[0] = shape_fc_layer; // pred net->layers[38]->p_bottoms[0] = &net->layers[37]->tops[0]; net->layers[38]->p_bottoms[1] = &net->layers[30]->tops[0]; net->layers[38]->f_forward[0] = forward_rcnn_pred_layer; //net->layers[38]->f_forward[1] = save_layer_tops; net->layers[38]->f_shape[0] = shape_rcnn_pred_layer; // bbox net->layers[39]->p_bottoms[0] = &net->layers[36]->tops[0]; net->layers[39]->p_bottoms[1] = &net->layers[30]->tops[0]; net->layers[39]->f_forward[0] = forward_fc_layer; net->layers[39]->f_forward[1] = forward_rcnn_bbox_layer; //net->layers[39]->f_forward[2] = save_layer_tops; net->layers[39]->f_shape[0] = shape_fc_layer; net->layers[39]->f_shape[1] = shape_rcnn_bbox_layer; // out net->layers[40]->p_bottoms[0] = &net->layers[38]->tops[0]; net->layers[40]->p_bottoms[1] = &net->layers[39]->tops[0]; net->layers[40]->p_bottoms[2] = &net->layers[30]->tops[0]; net->layers[40]->p_bottoms[3] = net->img_info; net->layers[40]->f_forward[0] = forward_odout_layer; net->layers[40]->f_shape[0] = shape_odout_layer; } } void construct_frcnn_7_1_1(Net* net, int gpu_id) { long int space_cpu = 0; #ifdef GPU cudaSetDevice(gpu_id); #endif setup_frcnn_7_1_1(net); connect_frcnn_7_1_1(net); shape_net(net); malloc_net(net); net->space_cpu += space_cpu; { for (int i = 0; i < net->num_layers; ++i) { for (int j = 0; j < net->layers[i]->num_tops; ++j) { if (!net->layers[i]->allocate_top_data[j]) { net->layers[i]->tops[j].data = net->layer_data[j]; } } } net->layers[1]->tops[0].data = net->layer_data[1]; net->layers[3]->tops[0].data = net->layer_data[1]; net->layers[5]->tops[0].data = net->layer_data[1]; net->layers[7]->tops[0].data = net->layer_data[1]; net->layers[10]->tops[0].data = net->layer_data[1]; net->layers[12]->tops[0].data = net->layer_data[1]; net->layers[14]->tops[0].data = net->layer_data[1]; net->layers[11]->tops[0].data = net->layer_data[2]; net->layers[15]->tops[0].data = net->layer_data[3]; net->layers[17]->tops[0].data = net->layer_data[1]; net->layers[27]->tops[0].data = net->layer_data[0]; net->layers[29]->tops[0].data = net->layer_data[2]; net->layers[31]->tops[0].data = net->layer_data[2]; net->layers[34]->tops[0].data = net->layer_data[1]; net->layers[36]->tops[0].data = net->layer_data[1]; net->layers[37]->tops[0].data = net->layer_data[0]; net->layers[38]->tops[0].data = net->layers[37]->tops[0].data; net->layers[39]->tops[0].data = net->layer_data[2]; net->layers[40]->tops[0].data = net->layer_data[1]; } init_layers(net); // print total memory size required { #ifdef GPU printf("%ldMB of main memory allocated\n", DIV_THEN_CEIL(net->space_cpu, 1000000)); printf("%ldMB of GPU memory allocated\n", DIV_THEN_CEIL(net->space, 1000000)); #else printf("%ldMB of main memory allocated\n", DIV_THEN_CEIL(net->space_cpu + net->space, 1000000)); #endif } } void get_input_frcnn_7_1_1(Net* net, const char* const filename[], const int num_images) { Tensor* input = &net->layers[0]->tops[0]; input->ndim = 3; input->num_items = 0; input->start[0] = 0; net->img_info->ndim = 1; net->img_info->num_items = 0; for (int i = 0; i < num_images; ++i) { load_image(filename[i], input, net->img_info, net->temp_data); } // network reshape shape_net(net); print_tensor_info("data", input); print_tensor_info("img_info", net->img_info); } void get_output_frcnn_7_1_1(Net* net, const int image_start_index) { // retrieve & print output { const Tensor* const out = &net->layers[40]->tops[0]; const long int output_size = flatten_size(out); #ifdef GPU cudaMemcpyAsync(net->output_cpu_data, out->data, output_size * sizeof(real), cudaMemcpyDeviceToHost); #else memcpy(net->output_cpu_data, out->data, output_size * sizeof(real)); #endif for (int n = 0; n < out->num_items; ++n) { const int image_index = image_start_index + n; const real* const p_out_item = net->output_cpu_data + out->start[n]; for (int i = 0; i < out->shape[n][0]; ++i) { const int class_index = (int)p_out_item[i * 6 + 0]; printf("Image %d / Box %d: ", image_index, i); printf("class %d, score %f, p1 = (%.2f, %.2f), p2 = (%.2f, %.2f)\n", class_index, p_out_item[i * 6 + 5], p_out_item[i * 6 + 1], p_out_item[i * 6 + 2], p_out_item[i * 6 + 3], p_out_item[i * 6 + 4]); } } } } #include "net.h" static Net* pvanet = NULL; static const char* class_names[] = { "__background", "bicycle", "bird", "bus", "car", "cat", "dog", "horse", "motorbike", "person", "train", "aeroplane", "boat", "bottle", "chair", "cow", "diningtable", "pottedplant", "sheep", "sofa", "tvmonitor", "cake", "vase" }; void pvanet_7_1_1_init(const char* const param_path, int gpu_id) { if (pvanet == NULL) { #ifdef GPU cudaSetDevice(0); #endif pvanet = (Net*)malloc(sizeof(Net)); init_net(pvanet); strcpy(pvanet->param_path, param_path); construct_frcnn_7_1_1(pvanet, gpu_id); } } void pvanet_7_1_1_release(void) { if (pvanet) { free_net(pvanet); free(pvanet); pvanet = NULL; } } void pvanet_get_output(std::vector<std::pair<std::string, std::vector<float> > >& boxes) { const Tensor* const out = &pvanet->layers[40]->tops[0]; const long int output_size = flatten_size(out); #ifdef GPU cudaMemcpyAsync(pvanet->output_cpu_data, out->data, output_size * sizeof(real), cudaMemcpyDeviceToHost); #else memcpy(pvanet->output_cpu_data, out->data, output_size * sizeof(real)); #endif const real* const p_out_item = pvanet->output_cpu_data; for (int i = 0; i < out->shape[0][0]; ++i) { std::pair<std::string, std::vector<float> > box; const int class_index = (int)p_out_item[i * 6 + 0]; box.first = class_names[class_index]; box.second.push_back(p_out_item[i * 6 + 1]); box.second.push_back(p_out_item[i * 6 + 2]); box.second.push_back(p_out_item[i * 6 + 3]); box.second.push_back(p_out_item[i * 6 + 4]); box.second.push_back(p_out_item[i * 6 + 5]); boxes.push_back(box); printf("class %d, score %f, p1 = (%.2f, %.2f), p2 = (%.2f, %.2f)\n", class_index, p_out_item[i * 6 + 5], p_out_item[i * 6 + 1], p_out_item[i * 6 + 2], p_out_item[i * 6 + 3], p_out_item[i * 6 + 4]); } } void pvanet_7_1_1_detect(const unsigned char* image_data, int width, int height, int stride, std::vector<std::pair<std::string, std::vector<float> > >& boxes) { boxes.clear(); if (!pvanet) { return; } Tensor* input = &pvanet->layers[0]->tops[0]; input->ndim = 3; input->num_items = 0; input->start[0] = 0; pvanet->img_info->ndim = 1; pvanet->img_info->num_items = 0; img2input(image_data, input, pvanet->img_info, (unsigned char*)pvanet->temp_data, height, width, stride); printf("%d %d\n", input->shape[0][1], input->shape[0][2]); shape_net(pvanet); forward_net(pvanet); pvanet_get_output(boxes); } #ifdef TEST int main(int argc, char* argv[]) { // CUDA initialization #ifdef GPU { printf("set device\n"); cudaSetDevice(0); } #endif // PVANET construction Net frcnn; init_net(&frcnn); strcpy(frcnn.param_path, argv[1]); construct_frcnn_7_1_1(&frcnn); // load a text file containing image filenames to be tested { char buf[10240]; char* line[20]; int total_count = 0, count = 0, buf_count = 0; FILE* fp_list = fopen(argv[2], "r"); if (!fp_list) { printf("File not found: %s\n", argv[2]); } while (fgets(&buf[buf_count], 1024, fp_list)) { const Tensor* const input = &frcnn.layers[0]->tops[0]; const int len = strlen(&buf[buf_count]); buf[buf_count + len - 1] = 0; line[count] = &buf[buf_count]; ++count; buf_count += len; if (count == input->num_items) { // input data loading get_input_frcnn_7_1_1(&frcnn, (const char * const *)&line, count); // forward-pass forward_net(&frcnn); // retrieve output & save to file get_output_frcnn_7_1_1(&frcnn, total_count); total_count += count; count = 0; buf_count = 0; } } if (count > 0) { get_input_frcnn_7_1_1(&frcnn, (const char * const *)&line, count); forward_net(&frcnn); get_output_frcnn_7_1_1(&frcnn, total_count); } fclose(fp_list); } // end free_net(&frcnn); return 0; } #endif
85dea3a1b6c718ec09dcbe29065279e9ea44ef31.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <hip/hip_runtime.h> #define N 1024*1024*1024 #define KERNELSIZE 9 #define THREADSPERBLOCK 1024 #define BLOCKSPERGRID (N+THREADSPERBLOCK-1)/THREADSPERBLOCK // 9 wide 1d kernel, no padding so it cuts out early // shift by 4 to align with original data __global__ void conv( float *data, float *kernel, float *output ){ int tid =blockIdx.x*blockDim.x+threadIdx.x; int i; for(i=0; i<9; i++){ output[tid] += data[tid + i] * kernel[i]; } } int main(){ srand(time(NULL)); struct timespec t_start, t_end; double elapsedTimeCPU; int pass = 1; // gassian kernel from: http://dev.theomader.com/gaussian-kernel-calculator/ float kernel[9] = {0.000229, 0.005977, 0.060598, 0.241732, 0.382928, 0.241732, 0.060598, 0.005977, 0.000229}; // random number from python, irl this would come from lidar //float data[100] = {7.230, 16.98, 17.99, 1.703, 16.44, 4.484, 7.843, 13.44, 7.815, 11.91, 2.050, 6.138, 3.049, 0.167, 1.756, 10.46, 10.02, 10.48, 13.14, 7.329, 14.93, 7.275, 18.61, 13.82, 15.97, 11.43, 10.27, 5.290, 14.13, 2.671, 3.267, 6.149, 14.56, 13.11, 18.14, 16.47, 17.49, 16.20, 7.835, 5.883, 0.967, 0.237, 4.359, 13.15, 15.92, 16.94, 14.30, 17.47, 5.118, 5.142, 19.41, 5.046, 16.78, 3.944, 12.17, 7.983, 15.35, 7.839, 11.65, 12.56, 9.564, 14.30, 4.670, 1.893, 9.304, 0.173, 3.921, 15.63, 6.561, 16.25, 1.634, 4.870, 15.03, 0.269, 11.92, 0.390, 15.57, 2.918, 8.966, 14.04, 11.23, 7.519, 7.943, 6.570, 18.74, 15.54, 1.303, 14.01, 1.797, 1.526, 12.90, 3.051, 8.602, 7.094, 14.39, 14.13, 11.20, 2.637, 2.644, 2.810}; float* data = (float*)malloc(N*sizeof(float)); for(int i = 0;i < N;i++) { data[i] = rand()%10+rand()/RAND_MAX; } // empty array to store the output // float output[N-KERNELSIZE+1]; //CPU 1d convolutional operation clock_gettime( CLOCK_REALTIME, &t_start); float* output = (float*)malloc((N-KERNELSIZE+1)*sizeof(float)); for (int i = 0; i < N-KERNELSIZE+1;i++) { output[i] = 0; for (int j = 0; j < KERNELSIZE; j++){ output[i] += kernel[j] * data[i+j]; } } clock_gettime( CLOCK_REALTIME, &t_end); elapsedTimeCPU = (t_end.tv_sec - t_start.tv_sec) * 1000.0; elapsedTimeCPU += (t_end.tv_nsec - t_start.tv_nsec) / 1000000.0; printf("CPU elapsedTime: %lf ms\n", elapsedTimeCPU); //GPU float *d_kernel, *d_data, *d_output; // allocate the memory on the GPU hipMalloc( (void**)&d_kernel, KERNELSIZE * sizeof(float) ); hipMalloc( (void**)&d_data, N * sizeof(float) ); hipMalloc( (void**)&d_output, (N-KERNELSIZE+1) * sizeof(float) ); float* output_from_device = (float*)malloc((N-KERNELSIZE+1)*sizeof(float)); hipMemcpy( d_kernel, kernel, KERNELSIZE * sizeof(float), hipMemcpyHostToDevice ); hipMemcpy( d_data, data, N * sizeof(float), hipMemcpyHostToDevice ); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( conv), dim3(BLOCKSPERGRID), dim3(THREADSPERBLOCK), 0, 0, d_data, d_kernel, d_output); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipMemcpy(output_from_device, d_output, (N-KERNELSIZE+1) * sizeof(float), hipMemcpyDeviceToHost ); float elapsedTime; hipEventElapsedTime(&elapsedTime, start, stop); printf("GPU time: %13f msec\n", elapsedTime); hipEventDestroy(start); hipEventDestroy(stop); printf(" %13f \n", elapsedTimeCPU/elapsedTime); for (int i = 0; i < N-KERNELSIZE+1; i++){ if(output_from_device[i]-output[i]>0.00001){ //don't use if(output_from_device[i]!=output[i]) printf("CPU:%lf GPU:%lf\n",output[i], output_from_device[i] ); pass = 0; } } if(pass == 1) printf("Test pass!\n"); else printf("Test fail!\n"); }
85dea3a1b6c718ec09dcbe29065279e9ea44ef31.cu
#include <stdio.h> #include <stdlib.h> #include <time.h> #include <sys/time.h> #include <cuda_runtime.h> #define N 1024*1024*1024 #define KERNELSIZE 9 #define THREADSPERBLOCK 1024 #define BLOCKSPERGRID (N+THREADSPERBLOCK-1)/THREADSPERBLOCK // 9 wide 1d kernel, no padding so it cuts out early // shift by 4 to align with original data __global__ void conv( float *data, float *kernel, float *output ){ int tid =blockIdx.x*blockDim.x+threadIdx.x; int i; for(i=0; i<9; i++){ output[tid] += data[tid + i] * kernel[i]; } } int main(){ srand(time(NULL)); struct timespec t_start, t_end; double elapsedTimeCPU; int pass = 1; // gassian kernel from: http://dev.theomader.com/gaussian-kernel-calculator/ float kernel[9] = {0.000229, 0.005977, 0.060598, 0.241732, 0.382928, 0.241732, 0.060598, 0.005977, 0.000229}; // random number from python, irl this would come from lidar //float data[100] = {7.230, 16.98, 17.99, 1.703, 16.44, 4.484, 7.843, 13.44, 7.815, 11.91, 2.050, 6.138, 3.049, 0.167, 1.756, 10.46, 10.02, 10.48, 13.14, 7.329, 14.93, 7.275, 18.61, 13.82, 15.97, 11.43, 10.27, 5.290, 14.13, 2.671, 3.267, 6.149, 14.56, 13.11, 18.14, 16.47, 17.49, 16.20, 7.835, 5.883, 0.967, 0.237, 4.359, 13.15, 15.92, 16.94, 14.30, 17.47, 5.118, 5.142, 19.41, 5.046, 16.78, 3.944, 12.17, 7.983, 15.35, 7.839, 11.65, 12.56, 9.564, 14.30, 4.670, 1.893, 9.304, 0.173, 3.921, 15.63, 6.561, 16.25, 1.634, 4.870, 15.03, 0.269, 11.92, 0.390, 15.57, 2.918, 8.966, 14.04, 11.23, 7.519, 7.943, 6.570, 18.74, 15.54, 1.303, 14.01, 1.797, 1.526, 12.90, 3.051, 8.602, 7.094, 14.39, 14.13, 11.20, 2.637, 2.644, 2.810}; float* data = (float*)malloc(N*sizeof(float)); for(int i = 0;i < N;i++) { data[i] = rand()%10+rand()/RAND_MAX; } // empty array to store the output // float output[N-KERNELSIZE+1]; //CPU 1d convolutional operation clock_gettime( CLOCK_REALTIME, &t_start); float* output = (float*)malloc((N-KERNELSIZE+1)*sizeof(float)); for (int i = 0; i < N-KERNELSIZE+1;i++) { output[i] = 0; for (int j = 0; j < KERNELSIZE; j++){ output[i] += kernel[j] * data[i+j]; } } clock_gettime( CLOCK_REALTIME, &t_end); elapsedTimeCPU = (t_end.tv_sec - t_start.tv_sec) * 1000.0; elapsedTimeCPU += (t_end.tv_nsec - t_start.tv_nsec) / 1000000.0; printf("CPU elapsedTime: %lf ms\n", elapsedTimeCPU); //GPU float *d_kernel, *d_data, *d_output; // allocate the memory on the GPU cudaMalloc( (void**)&d_kernel, KERNELSIZE * sizeof(float) ); cudaMalloc( (void**)&d_data, N * sizeof(float) ); cudaMalloc( (void**)&d_output, (N-KERNELSIZE+1) * sizeof(float) ); float* output_from_device = (float*)malloc((N-KERNELSIZE+1)*sizeof(float)); cudaMemcpy( d_kernel, kernel, KERNELSIZE * sizeof(float), cudaMemcpyHostToDevice ); cudaMemcpy( d_data, data, N * sizeof(float), cudaMemcpyHostToDevice ); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); conv<<<BLOCKSPERGRID, THREADSPERBLOCK>>>(d_data, d_kernel, d_output); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaMemcpy(output_from_device, d_output, (N-KERNELSIZE+1) * sizeof(float), cudaMemcpyDeviceToHost ); float elapsedTime; cudaEventElapsedTime(&elapsedTime, start, stop); printf("GPU time: %13f msec\n", elapsedTime); cudaEventDestroy(start); cudaEventDestroy(stop); printf("加速 %13f 倍\n", elapsedTimeCPU/elapsedTime); for (int i = 0; i < N-KERNELSIZE+1; i++){ if(output_from_device[i]-output[i]>0.00001){ //don't use if(output_from_device[i]!=output[i]) printf("CPU:%lf GPU:%lf\n",output[i], output_from_device[i] ); pass = 0; } } if(pass == 1) printf("Test pass!\n"); else printf("Test fail!\n"); }
d07eb0880747fe1df3acb8347f74992037650c37.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/strings/split/split.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <thrust/transform.h> #include <vector> namespace cudf { namespace strings { namespace detail { namespace { // align all column size allocations to this boundary so that all output column buffers // start at that alignment. static constexpr size_type split_align = 64; __device__ size_type compute_memory_size(size_type token_count, size_type token_size_sum) { return cudf::detail::round_up_pow2(token_size_sum, split_align) + cudf::detail::round_up_pow2((token_count + 1) * static_cast<size_type>(sizeof(size_type)), split_align); } struct copy_info { size_type idx{}; size_type token_count{}; size_type token_size_sum{}; void* memory_ptr{}; }; enum class Dir { FORWARD, BACKWARD }; /** * @brief Compute the number of tokens, the total byte sizes of the tokens, and * required memory size for the `idx'th` string element of `d_strings`. */ template <Dir dir> struct token_reader_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split size_type const max_tokens = std::numeric_limits<size_type>::max(); bool const has_validity = false; template <bool last> __device__ size_type compute_token_char_bytes(string_view const& d_str, size_type start_pos, size_type end_pos, size_type delimiter_pos) const { if (last) { return dir == Dir::FORWARD ? d_str.byte_offset(end_pos) - d_str.byte_offset(start_pos) : d_str.byte_offset(end_pos); } else { return dir == Dir::FORWARD ? d_str.byte_offset(delimiter_pos) - d_str.byte_offset(start_pos) : d_str.byte_offset(end_pos) - d_str.byte_offset(delimiter_pos + d_delimiter.length()); } } // returns a tuple of token count, sum of token sizes in bytes, and required // memory block size __device__ thrust::tuple<size_type, size_type, size_type> operator()(size_type idx) const { if (has_validity && d_strings.is_null(idx)) { return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const d_str = d_strings.element<string_view>(idx); size_type token_count = 0; size_type token_size_sum = 0; size_type start_pos = 0; // updates only if moving forward auto end_pos = d_str.length(); // updates only if moving backward while (token_count < max_tokens - 1) { auto const delimiter_pos = dir == Dir::FORWARD ? d_str.find(d_delimiter, start_pos) : d_str.rfind(d_delimiter, start_pos, end_pos); if (delimiter_pos != -1) { token_count++; token_size_sum += compute_token_char_bytes<false>(d_str, start_pos, end_pos, delimiter_pos); if (dir == Dir::FORWARD) { start_pos = delimiter_pos + d_delimiter.length(); } else { end_pos = delimiter_pos; } } else { break; } } token_count++; token_size_sum += compute_token_char_bytes<true>(d_str, start_pos, end_pos, -1); auto const memory_size = compute_memory_size(token_count, token_size_sum); return thrust::make_tuple<size_type, size_type, size_type>( token_count, token_size_sum, memory_size); } }; /** * @brief Copy the tokens from the `idx'th` string element of `d_strings` to * the contiguous memory buffer. */ template <Dir dir> struct token_copier_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split bool const has_validity = false; template <bool last> __device__ thrust::pair<size_type, size_type> compute_src_byte_offset_and_token_char_bytes( string_view const& d_str, size_type start_pos, size_type end_pos, size_type delimiter_pos) const { if (last) { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(start_pos) : 0; auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(end_pos) - src_byte_offset : d_str.byte_offset(end_pos); return thrust::make_pair<size_type, size_type>(src_byte_offset, token_char_bytes); } else { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(start_pos) : d_str.byte_offset(delimiter_pos + d_delimiter.length()); auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(delimiter_pos) - src_byte_offset : d_str.byte_offset(end_pos) - src_byte_offset; return thrust::make_pair<size_type, size_type>(src_byte_offset, token_char_bytes); } } __device__ void operator()(copy_info const info) const { if (info.token_count == 0) { return; } auto memory_ptr = static_cast<char*>(info.memory_ptr); auto const char_buf_size = cudf::detail::round_up_pow2(info.token_size_sum, split_align); auto const char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto const offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); auto const d_str = d_strings.element<string_view>(info.idx); size_type token_idx = 0; size_type char_bytes_copied = 0; size_type start_pos = 0; // updates only if moving forward auto end_pos = d_str.length(); // updates only if moving backward while (token_idx < info.token_count - 1) { auto const delimiter_pos = dir == Dir::FORWARD ? d_str.find(d_delimiter, start_pos) : d_str.rfind(d_delimiter, start_pos, end_pos); if (delimiter_pos != -1) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<false>( d_str, start_pos, end_pos, delimiter_pos); if (dir == Dir::FORWARD) { thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { auto const char_buf_offset = info.token_size_sum - char_bytes_copied - offset_size_pair.second; thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_buf_offset); offset_buf_ptr[info.token_count - 1 - token_idx] = char_buf_offset; } token_idx++; char_bytes_copied += offset_size_pair.second; if (dir == Dir::FORWARD) { start_pos = delimiter_pos + d_delimiter.length(); } else { end_pos = delimiter_pos; } } else { break; } } auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<true>(d_str, start_pos, end_pos, -1); if (dir == Dir::FORWARD) { thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { thrust::copy(thrust::seq, d_str.data(), d_str.data() + offset_size_pair.second, char_buf_ptr); offset_buf_ptr[0] = 0; } offset_buf_ptr[info.token_count] = info.token_size_sum; } }; /** * @brief Compute the number of tokens, the total byte sizes of the tokens, and * required memory size for the `idx'th` string element of `d_strings`. */ template <Dir dir> struct whitespace_token_reader_fn { column_device_view const d_strings; // strings to split size_type const max_tokens = std::numeric_limits<size_type>::max(); bool const has_validity = false; template <bool last> __device__ size_type compute_token_char_bytes(string_view const& d_str, size_type cur_pos, size_type to_token_pos) const { if (last) { return dir == Dir::FORWARD ? d_str.byte_offset(d_str.length()) - d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - d_str.byte_offset(0); } else { return dir == Dir::FORWARD ? d_str.byte_offset(cur_pos) - d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - d_str.byte_offset(cur_pos + 1); } } __device__ thrust::tuple<size_type, size_type, size_type> operator()(size_type idx) const { if (has_validity && d_strings.is_null(idx)) { return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const d_str = d_strings.element<string_view>(idx); size_type token_count = 0; size_type token_size_sum = 0; auto spaces = true; auto reached_max_tokens = false; size_type to_token_pos = 0; for (size_type i = 0; i < d_str.length(); ++i) { auto const cur_pos = dir == Dir::FORWARD ? i : d_str.length() - 1 - i; auto const ch = d_str[cur_pos]; if (spaces != (ch <= ' ')) { if (spaces) { // from whitespace(s) to a new token to_token_pos = cur_pos; } else { // from a token to whitespace(s) if (token_count < max_tokens - 1) { token_count++; token_size_sum += compute_token_char_bytes<false>(d_str, cur_pos, to_token_pos); } else { reached_max_tokens = true; break; } } spaces = !spaces; } } if (reached_max_tokens || !spaces) { token_count++; token_size_sum += compute_token_char_bytes<true>(d_str, -1, to_token_pos); } if (token_count == 0) { // note that pandas.Series.str.split("", pat=" ") // returns one token (i.e. "") while // pandas.Series.str.split("") returns 0 token. return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const memory_size = compute_memory_size(token_count, token_size_sum); return thrust::make_tuple<size_type, size_type, size_type>( token_count, token_size_sum, memory_size); } }; /** * @brief Copy the tokens from the `idx'th` string element of `d_strings` to * the contiguous memory buffer. */ template <Dir dir> struct whitespace_token_copier_fn { column_device_view const d_strings; // strings to split bool const has_validity = false; template <bool last> __device__ thrust::pair<size_type, size_type> compute_src_byte_offset_and_token_char_bytes( string_view const& d_str, size_type cur_pos, size_type to_token_pos, size_type remaining_bytes) const { if (last) { auto const token_char_bytes = remaining_bytes; auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - token_char_bytes; return thrust::make_pair<size_type, size_type>(src_byte_offset, token_char_bytes); } else { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(to_token_pos) : d_str.byte_offset(cur_pos + 1); auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(cur_pos) - src_byte_offset : d_str.byte_offset(to_token_pos + 1) - src_byte_offset; return thrust::make_pair<size_type, size_type>(src_byte_offset, token_char_bytes); } } __device__ void operator()(copy_info const info) const { if (info.token_count == 0) { return; } auto memory_ptr = static_cast<char*>(info.memory_ptr); auto const char_buf_size = cudf::detail::round_up_pow2(info.token_size_sum, split_align); auto const char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto const offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); auto const d_str = d_strings.element<string_view>(info.idx); size_type token_idx = 0; size_type char_bytes_copied = 0; auto spaces = true; size_type to_token_pos = 0; for (size_type i = 0; i < d_str.length(); ++i) { auto const cur_pos = dir == Dir::FORWARD ? i : d_str.length() - 1 - i; auto const ch = d_str[cur_pos]; if (spaces != (ch <= ' ')) { if (spaces) { // from whitespace(s) to a new token to_token_pos = cur_pos; } else { // from a token to whitespace(s) if (token_idx < info.token_count - 1) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<false>( d_str, cur_pos, to_token_pos, info.token_size_sum - char_bytes_copied); if (dir == Dir::FORWARD) { thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { auto const char_buf_offset = info.token_size_sum - char_bytes_copied - offset_size_pair.second; thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_buf_offset); offset_buf_ptr[info.token_count - 1 - token_idx] = char_buf_offset; } token_idx++; char_bytes_copied += offset_size_pair.second; } else { break; } } spaces = !spaces; } } if (token_idx < info.token_count) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<true>( d_str, -1, to_token_pos, info.token_size_sum - char_bytes_copied); if (dir == Dir::FORWARD) { thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr); offset_buf_ptr[0] = 0; } } offset_buf_ptr[info.token_count] = info.token_size_sum; } }; // Generic split function used by split_record and rsplit_record template <typename TokenReader, typename TokenCopier> contiguous_split_record_result contiguous_split_record_fn(strings_column_view const& strings, TokenReader reader, TokenCopier copier, rmm::mr::device_memory_resource* mr, hipStream_t stream) { // read each string element of the input column to count the number of tokens // and compute the memory offsets auto strings_count = strings.size(); rmm::device_vector<size_type> d_token_counts(strings_count); rmm::device_vector<size_type> d_token_size_sums(strings_count); rmm::device_vector<size_type> d_memory_offsets(strings_count + 1); thrust::transform(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(strings_count), thrust::make_zip_iterator(thrust::make_tuple( d_token_counts.begin(), d_token_size_sums.begin(), d_memory_offsets.begin())), reader); thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream), d_memory_offsets.begin(), d_memory_offsets.end(), d_memory_offsets.begin()); // allocate and copy thrust::host_vector<size_type> h_token_counts = d_token_counts; thrust::host_vector<size_type> h_token_size_sums = d_token_size_sums; thrust::host_vector<size_type> h_memory_offsets = d_memory_offsets; auto memory_size = h_memory_offsets.back(); auto all_data_ptr = std::make_unique<rmm::device_buffer>(memory_size, stream, mr); auto d_all_data_ptr = reinterpret_cast<char*>(all_data_ptr->data()); auto d_token_counts_ptr = d_token_counts.data().get(); auto d_memory_offsets_ptr = d_memory_offsets.data().get(); auto d_token_size_sums_ptr = d_token_size_sums.data().get(); auto copy_info_begin = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [d_all_data_ptr, d_token_counts_ptr, d_memory_offsets_ptr, d_token_size_sums_ptr] __device__( auto i) { return copy_info{i, d_token_counts_ptr[i], d_token_size_sums_ptr[i], d_all_data_ptr + d_memory_offsets_ptr[i]}; }); thrust::for_each( rmm::exec_policy(stream)->on(stream), copy_info_begin, copy_info_begin + strings_count, copier); // update column_view objects std::vector<column_view> column_views{}; for (size_type i = 0; i < strings_count; ++i) { if (h_token_counts[i] == 0) { column_views.emplace_back(strings.parent().type(), 0, nullptr); } else { auto memory_ptr = d_all_data_ptr + h_memory_offsets[i]; auto char_buf_size = cudf::util::round_up_safe(h_token_size_sums[i], split_align); auto char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); column_views.emplace_back( strings.parent().type(), h_token_counts[i], nullptr, nullptr, UNKNOWN_NULL_COUNT, 0, std::vector<column_view>{ column_view(strings.offsets().type(), h_token_counts[i] + 1, offset_buf_ptr), column_view(strings.chars().type(), h_token_size_sums[i], char_buf_ptr)}); } } CUDA_TRY(hipStreamSynchronize(stream)); return contiguous_split_record_result{std::move(column_views), std::move(all_data_ptr)}; } } // namespace template <Dir dir> contiguous_split_record_result contiguous_split_record( strings_column_view const& strings, string_scalar const& delimiter = string_scalar(""), size_type maxsplit = -1, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), hipStream_t stream = 0) { CUDF_EXPECTS(delimiter.is_valid(), "Parameter delimiter must be valid"); // makes consistent with Pandas size_type max_tokens = maxsplit > 0 ? maxsplit + 1 : std::numeric_limits<size_type>::max(); auto has_validity = strings.parent().nullable(); auto d_strings_column_ptr = column_device_view::create(strings.parent(), stream); if (delimiter.size() == 0) { return contiguous_split_record_fn( strings, whitespace_token_reader_fn<dir>{*d_strings_column_ptr, max_tokens, has_validity}, whitespace_token_copier_fn<dir>{*d_strings_column_ptr, has_validity}, mr, stream); } else { string_view d_delimiter(delimiter.data(), delimiter.size()); return contiguous_split_record_fn( strings, token_reader_fn<dir>{*d_strings_column_ptr, d_delimiter, max_tokens, has_validity}, token_copier_fn<dir>{*d_strings_column_ptr, d_delimiter, has_validity}, mr, stream); } } } // namespace detail // external APIs contiguous_split_record_result contiguous_split_record(strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::contiguous_split_record<detail::Dir::FORWARD>(strings, delimiter, maxsplit, mr, 0); } contiguous_split_record_result contiguous_rsplit_record(strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::contiguous_split_record<detail::Dir::BACKWARD>( strings, delimiter, maxsplit, mr, 0); } } // namespace strings } // namespace cudf
d07eb0880747fe1df3acb8347f74992037650c37.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/detail/utilities/cuda.cuh> #include <cudf/strings/split/split.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/strings/strings_column_view.hpp> #include <thrust/transform.h> #include <vector> namespace cudf { namespace strings { namespace detail { namespace { // align all column size allocations to this boundary so that all output column buffers // start at that alignment. static constexpr size_type split_align = 64; __device__ size_type compute_memory_size(size_type token_count, size_type token_size_sum) { return cudf::detail::round_up_pow2(token_size_sum, split_align) + cudf::detail::round_up_pow2((token_count + 1) * static_cast<size_type>(sizeof(size_type)), split_align); } struct copy_info { size_type idx{}; size_type token_count{}; size_type token_size_sum{}; void* memory_ptr{}; }; enum class Dir { FORWARD, BACKWARD }; /** * @brief Compute the number of tokens, the total byte sizes of the tokens, and * required memory size for the `idx'th` string element of `d_strings`. */ template <Dir dir> struct token_reader_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split size_type const max_tokens = std::numeric_limits<size_type>::max(); bool const has_validity = false; template <bool last> __device__ size_type compute_token_char_bytes(string_view const& d_str, size_type start_pos, size_type end_pos, size_type delimiter_pos) const { if (last) { return dir == Dir::FORWARD ? d_str.byte_offset(end_pos) - d_str.byte_offset(start_pos) : d_str.byte_offset(end_pos); } else { return dir == Dir::FORWARD ? d_str.byte_offset(delimiter_pos) - d_str.byte_offset(start_pos) : d_str.byte_offset(end_pos) - d_str.byte_offset(delimiter_pos + d_delimiter.length()); } } // returns a tuple of token count, sum of token sizes in bytes, and required // memory block size __device__ thrust::tuple<size_type, size_type, size_type> operator()(size_type idx) const { if (has_validity && d_strings.is_null(idx)) { return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const d_str = d_strings.element<string_view>(idx); size_type token_count = 0; size_type token_size_sum = 0; size_type start_pos = 0; // updates only if moving forward auto end_pos = d_str.length(); // updates only if moving backward while (token_count < max_tokens - 1) { auto const delimiter_pos = dir == Dir::FORWARD ? d_str.find(d_delimiter, start_pos) : d_str.rfind(d_delimiter, start_pos, end_pos); if (delimiter_pos != -1) { token_count++; token_size_sum += compute_token_char_bytes<false>(d_str, start_pos, end_pos, delimiter_pos); if (dir == Dir::FORWARD) { start_pos = delimiter_pos + d_delimiter.length(); } else { end_pos = delimiter_pos; } } else { break; } } token_count++; token_size_sum += compute_token_char_bytes<true>(d_str, start_pos, end_pos, -1); auto const memory_size = compute_memory_size(token_count, token_size_sum); return thrust::make_tuple<size_type, size_type, size_type>( token_count, token_size_sum, memory_size); } }; /** * @brief Copy the tokens from the `idx'th` string element of `d_strings` to * the contiguous memory buffer. */ template <Dir dir> struct token_copier_fn { column_device_view const d_strings; // strings to split string_view const d_delimiter; // delimiter for split bool const has_validity = false; template <bool last> __device__ thrust::pair<size_type, size_type> compute_src_byte_offset_and_token_char_bytes( string_view const& d_str, size_type start_pos, size_type end_pos, size_type delimiter_pos) const { if (last) { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(start_pos) : 0; auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(end_pos) - src_byte_offset : d_str.byte_offset(end_pos); return thrust::make_pair<size_type, size_type>(src_byte_offset, token_char_bytes); } else { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(start_pos) : d_str.byte_offset(delimiter_pos + d_delimiter.length()); auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(delimiter_pos) - src_byte_offset : d_str.byte_offset(end_pos) - src_byte_offset; return thrust::make_pair<size_type, size_type>(src_byte_offset, token_char_bytes); } } __device__ void operator()(copy_info const info) const { if (info.token_count == 0) { return; } auto memory_ptr = static_cast<char*>(info.memory_ptr); auto const char_buf_size = cudf::detail::round_up_pow2(info.token_size_sum, split_align); auto const char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto const offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); auto const d_str = d_strings.element<string_view>(info.idx); size_type token_idx = 0; size_type char_bytes_copied = 0; size_type start_pos = 0; // updates only if moving forward auto end_pos = d_str.length(); // updates only if moving backward while (token_idx < info.token_count - 1) { auto const delimiter_pos = dir == Dir::FORWARD ? d_str.find(d_delimiter, start_pos) : d_str.rfind(d_delimiter, start_pos, end_pos); if (delimiter_pos != -1) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<false>( d_str, start_pos, end_pos, delimiter_pos); if (dir == Dir::FORWARD) { thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { auto const char_buf_offset = info.token_size_sum - char_bytes_copied - offset_size_pair.second; thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_buf_offset); offset_buf_ptr[info.token_count - 1 - token_idx] = char_buf_offset; } token_idx++; char_bytes_copied += offset_size_pair.second; if (dir == Dir::FORWARD) { start_pos = delimiter_pos + d_delimiter.length(); } else { end_pos = delimiter_pos; } } else { break; } } auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<true>(d_str, start_pos, end_pos, -1); if (dir == Dir::FORWARD) { thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { thrust::copy(thrust::seq, d_str.data(), d_str.data() + offset_size_pair.second, char_buf_ptr); offset_buf_ptr[0] = 0; } offset_buf_ptr[info.token_count] = info.token_size_sum; } }; /** * @brief Compute the number of tokens, the total byte sizes of the tokens, and * required memory size for the `idx'th` string element of `d_strings`. */ template <Dir dir> struct whitespace_token_reader_fn { column_device_view const d_strings; // strings to split size_type const max_tokens = std::numeric_limits<size_type>::max(); bool const has_validity = false; template <bool last> __device__ size_type compute_token_char_bytes(string_view const& d_str, size_type cur_pos, size_type to_token_pos) const { if (last) { return dir == Dir::FORWARD ? d_str.byte_offset(d_str.length()) - d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - d_str.byte_offset(0); } else { return dir == Dir::FORWARD ? d_str.byte_offset(cur_pos) - d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - d_str.byte_offset(cur_pos + 1); } } __device__ thrust::tuple<size_type, size_type, size_type> operator()(size_type idx) const { if (has_validity && d_strings.is_null(idx)) { return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const d_str = d_strings.element<string_view>(idx); size_type token_count = 0; size_type token_size_sum = 0; auto spaces = true; auto reached_max_tokens = false; size_type to_token_pos = 0; for (size_type i = 0; i < d_str.length(); ++i) { auto const cur_pos = dir == Dir::FORWARD ? i : d_str.length() - 1 - i; auto const ch = d_str[cur_pos]; if (spaces != (ch <= ' ')) { if (spaces) { // from whitespace(s) to a new token to_token_pos = cur_pos; } else { // from a token to whitespace(s) if (token_count < max_tokens - 1) { token_count++; token_size_sum += compute_token_char_bytes<false>(d_str, cur_pos, to_token_pos); } else { reached_max_tokens = true; break; } } spaces = !spaces; } } if (reached_max_tokens || !spaces) { token_count++; token_size_sum += compute_token_char_bytes<true>(d_str, -1, to_token_pos); } if (token_count == 0) { // note that pandas.Series.str.split("", pat=" ") // returns one token (i.e. "") while // pandas.Series.str.split("") returns 0 token. return thrust::make_tuple<size_type, size_type, size_type>(0, 0, 0); } auto const memory_size = compute_memory_size(token_count, token_size_sum); return thrust::make_tuple<size_type, size_type, size_type>( token_count, token_size_sum, memory_size); } }; /** * @brief Copy the tokens from the `idx'th` string element of `d_strings` to * the contiguous memory buffer. */ template <Dir dir> struct whitespace_token_copier_fn { column_device_view const d_strings; // strings to split bool const has_validity = false; template <bool last> __device__ thrust::pair<size_type, size_type> compute_src_byte_offset_and_token_char_bytes( string_view const& d_str, size_type cur_pos, size_type to_token_pos, size_type remaining_bytes) const { if (last) { auto const token_char_bytes = remaining_bytes; auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(to_token_pos) : d_str.byte_offset(to_token_pos + 1) - token_char_bytes; return thrust::make_pair<size_type, size_type>(src_byte_offset, token_char_bytes); } else { auto const src_byte_offset = dir == Dir::FORWARD ? d_str.byte_offset(to_token_pos) : d_str.byte_offset(cur_pos + 1); auto const token_char_bytes = dir == Dir::FORWARD ? d_str.byte_offset(cur_pos) - src_byte_offset : d_str.byte_offset(to_token_pos + 1) - src_byte_offset; return thrust::make_pair<size_type, size_type>(src_byte_offset, token_char_bytes); } } __device__ void operator()(copy_info const info) const { if (info.token_count == 0) { return; } auto memory_ptr = static_cast<char*>(info.memory_ptr); auto const char_buf_size = cudf::detail::round_up_pow2(info.token_size_sum, split_align); auto const char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto const offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); auto const d_str = d_strings.element<string_view>(info.idx); size_type token_idx = 0; size_type char_bytes_copied = 0; auto spaces = true; size_type to_token_pos = 0; for (size_type i = 0; i < d_str.length(); ++i) { auto const cur_pos = dir == Dir::FORWARD ? i : d_str.length() - 1 - i; auto const ch = d_str[cur_pos]; if (spaces != (ch <= ' ')) { if (spaces) { // from whitespace(s) to a new token to_token_pos = cur_pos; } else { // from a token to whitespace(s) if (token_idx < info.token_count - 1) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<false>( d_str, cur_pos, to_token_pos, info.token_size_sum - char_bytes_copied); if (dir == Dir::FORWARD) { thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { auto const char_buf_offset = info.token_size_sum - char_bytes_copied - offset_size_pair.second; thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_buf_offset); offset_buf_ptr[info.token_count - 1 - token_idx] = char_buf_offset; } token_idx++; char_bytes_copied += offset_size_pair.second; } else { break; } } spaces = !spaces; } } if (token_idx < info.token_count) { auto const offset_size_pair = compute_src_byte_offset_and_token_char_bytes<true>( d_str, -1, to_token_pos, info.token_size_sum - char_bytes_copied); if (dir == Dir::FORWARD) { thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr + char_bytes_copied); offset_buf_ptr[token_idx] = char_bytes_copied; } else { thrust::copy(thrust::seq, d_str.data() + offset_size_pair.first, d_str.data() + offset_size_pair.first + offset_size_pair.second, char_buf_ptr); offset_buf_ptr[0] = 0; } } offset_buf_ptr[info.token_count] = info.token_size_sum; } }; // Generic split function used by split_record and rsplit_record template <typename TokenReader, typename TokenCopier> contiguous_split_record_result contiguous_split_record_fn(strings_column_view const& strings, TokenReader reader, TokenCopier copier, rmm::mr::device_memory_resource* mr, cudaStream_t stream) { // read each string element of the input column to count the number of tokens // and compute the memory offsets auto strings_count = strings.size(); rmm::device_vector<size_type> d_token_counts(strings_count); rmm::device_vector<size_type> d_token_size_sums(strings_count); rmm::device_vector<size_type> d_memory_offsets(strings_count + 1); thrust::transform(rmm::exec_policy(stream)->on(stream), thrust::make_counting_iterator(0), thrust::make_counting_iterator(strings_count), thrust::make_zip_iterator(thrust::make_tuple( d_token_counts.begin(), d_token_size_sums.begin(), d_memory_offsets.begin())), reader); thrust::exclusive_scan(rmm::exec_policy(stream)->on(stream), d_memory_offsets.begin(), d_memory_offsets.end(), d_memory_offsets.begin()); // allocate and copy thrust::host_vector<size_type> h_token_counts = d_token_counts; thrust::host_vector<size_type> h_token_size_sums = d_token_size_sums; thrust::host_vector<size_type> h_memory_offsets = d_memory_offsets; auto memory_size = h_memory_offsets.back(); auto all_data_ptr = std::make_unique<rmm::device_buffer>(memory_size, stream, mr); auto d_all_data_ptr = reinterpret_cast<char*>(all_data_ptr->data()); auto d_token_counts_ptr = d_token_counts.data().get(); auto d_memory_offsets_ptr = d_memory_offsets.data().get(); auto d_token_size_sums_ptr = d_token_size_sums.data().get(); auto copy_info_begin = thrust::make_transform_iterator( thrust::make_counting_iterator(0), [d_all_data_ptr, d_token_counts_ptr, d_memory_offsets_ptr, d_token_size_sums_ptr] __device__( auto i) { return copy_info{i, d_token_counts_ptr[i], d_token_size_sums_ptr[i], d_all_data_ptr + d_memory_offsets_ptr[i]}; }); thrust::for_each( rmm::exec_policy(stream)->on(stream), copy_info_begin, copy_info_begin + strings_count, copier); // update column_view objects std::vector<column_view> column_views{}; for (size_type i = 0; i < strings_count; ++i) { if (h_token_counts[i] == 0) { column_views.emplace_back(strings.parent().type(), 0, nullptr); } else { auto memory_ptr = d_all_data_ptr + h_memory_offsets[i]; auto char_buf_size = cudf::util::round_up_safe(h_token_size_sums[i], split_align); auto char_buf_ptr = memory_ptr; memory_ptr += char_buf_size; auto offset_buf_ptr = reinterpret_cast<size_type*>(memory_ptr); column_views.emplace_back( strings.parent().type(), h_token_counts[i], nullptr, nullptr, UNKNOWN_NULL_COUNT, 0, std::vector<column_view>{ column_view(strings.offsets().type(), h_token_counts[i] + 1, offset_buf_ptr), column_view(strings.chars().type(), h_token_size_sums[i], char_buf_ptr)}); } } CUDA_TRY(cudaStreamSynchronize(stream)); return contiguous_split_record_result{std::move(column_views), std::move(all_data_ptr)}; } } // namespace template <Dir dir> contiguous_split_record_result contiguous_split_record( strings_column_view const& strings, string_scalar const& delimiter = string_scalar(""), size_type maxsplit = -1, rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(), cudaStream_t stream = 0) { CUDF_EXPECTS(delimiter.is_valid(), "Parameter delimiter must be valid"); // makes consistent with Pandas size_type max_tokens = maxsplit > 0 ? maxsplit + 1 : std::numeric_limits<size_type>::max(); auto has_validity = strings.parent().nullable(); auto d_strings_column_ptr = column_device_view::create(strings.parent(), stream); if (delimiter.size() == 0) { return contiguous_split_record_fn( strings, whitespace_token_reader_fn<dir>{*d_strings_column_ptr, max_tokens, has_validity}, whitespace_token_copier_fn<dir>{*d_strings_column_ptr, has_validity}, mr, stream); } else { string_view d_delimiter(delimiter.data(), delimiter.size()); return contiguous_split_record_fn( strings, token_reader_fn<dir>{*d_strings_column_ptr, d_delimiter, max_tokens, has_validity}, token_copier_fn<dir>{*d_strings_column_ptr, d_delimiter, has_validity}, mr, stream); } } } // namespace detail // external APIs contiguous_split_record_result contiguous_split_record(strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::contiguous_split_record<detail::Dir::FORWARD>(strings, delimiter, maxsplit, mr, 0); } contiguous_split_record_result contiguous_rsplit_record(strings_column_view const& strings, string_scalar const& delimiter, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::contiguous_split_record<detail::Dir::BACKWARD>( strings, delimiter, maxsplit, mr, 0); } } // namespace strings } // namespace cudf
7439ad1a8993ce868c486346b84bdf21018257fc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "traffic.h" static const int kNumBlockSize = 256; static const char kCellTypeNormal = 1; static const char kCellTypeProducer = 2; using IndexT = int; using CellPointerT = IndexT; #include "../dataset.h" __managed__ CellBase **dev_cells; // Need 2 arrays of both, so we can swap. __device__ int *d_Car_active; __device__ int *d_Car_active_2; __managed__ CarBase **dev_cars; __managed__ CarBase **dev_cars_2; // For prefix sum array compaction. __device__ int *d_prefix_sum_temp; __device__ int *d_prefix_sum_output; int *h_prefix_sum_temp; int *h_prefix_sum_output; int *h_Car_active; int *h_Car_active_2; __device__ int d_num_cells; __device__ int d_num_cars; __device__ int d_num_cars_2; int host_num_cells; int host_num_cars; // TODO: Consider migrating to SoaAlloc. TrafficLight *h_traffic_lights; __managed__ TrafficLightBase **d_traffic_lights; // Only for rendering. __device__ int dev_num_cells; __device__ float *dev_Cell_pos_x; __device__ float *dev_Cell_pos_y; __device__ bool *dev_Cell_occupied; float *host_Cell_pos_x; float *host_Cell_pos_y; bool *host_Cell_occupied; float *host_data_Cell_pos_x; float *host_data_Cell_pos_y; bool *host_data_Cell_occupied; __device__ void Car_step_extend_path(IndexT self) { // CONCORD CellBase *cell; CONCORD(cell, dev_cars[self], get_position()); CellBase *next_cell; // CONCORD int vel; CONCORD(vel, dev_cars[self], get_velocity()); for (int i = 0; i < vel; ++i) { // CONCORD bool cond; CONCORD(cond, cell, get_is_target()); ; // CONCORD bool cond2; CONCORD(cond2, cell, is_sink()); if (cond2 || cond) { break; } // CONCORD CONCORD(next_cell, dev_cars[self], next_step(cell)); assert(next_cell != cell); // CONCORD bool cond3; CONCORD(cond3, next_cell, is_free()); if (!cond3) break; cell = next_cell; // CONCORD CONCORD(dev_cars[self], set_path(cell, i)); ; // CONCORD int path_len; CONCORD(path_len, dev_cars[self], get_path_length()); ; // CONCORD CONCORD(dev_cars[self], set_path_length(path_len + 1)); ; } // CONCORD int path_len; CONCORD(path_len, dev_cars[self], get_path_length()); ; // CONCORD CONCORD(dev_cars[self], set_velocity(path_len)); ; } __device__ void Car_step_constraint_velocity(IndexT self) { // This is actually only needed for the very first iteration, because a car // may be positioned on a traffic light cell. // CONCORD int vel; CONCORD(vel, dev_cars[self], get_velocity()); // CONCORD CellBase *cell; CONCORD(cell, dev_cars[self], get_position()); int vel_max; CONCORD(vel_max, cell, get_current_max_velocity()); // CONCORD if (vel > vel_max) { // CONCORD int max_velocity; CONCORD(max_velocity, cell, get_current_max_velocity()); ; // CONCORD CONCORD(dev_cars[self], set_velocity(max_velocity)); ; } int path_index = 0; int distance = 1; // CONCORD int vel3; CONCORD(vel3, dev_cars[self], get_velocity()); while (distance <= vel3) { // Invariant: Movement of up to `distance - 1` many cells at `velocity_` // is allowed. // Now check if next cell can be entered. // CONCORD CellBase *next_cell; CONCORD(next_cell, dev_cars[self], get_path(path_index)); ; // Avoid collision. // CONCORD bool cond4; CONCORD(cond4, next_cell, is_free()); if (!cond4) { // Cannot enter cell. --distance; // CONCORD CONCORD(dev_cars[self], set_velocity(distance)); ; break; } // else: Can enter next cell. // CONCORD int curr_vel; CONCORD(curr_vel, dev_cars[self], get_velocity()); ; // CONCORD int cur_max; CONCORD(cur_max, next_cell, get_current_max_velocity()); if (curr_vel > cur_max) { // Car is too fast for this cell. // CONCORD int cur_max2; CONCORD(cur_max2, next_cell, get_current_max_velocity()); if (cur_max2 > distance - 1) { // Even if we slow down, we would still make progress. // CONCORD int max; CONCORD(max, next_cell, get_current_max_velocity()); // CONCORD CONCORD(dev_cars[self], set_velocity(max)); } else { // Do not enter the next cell. --distance; assert(distance >= 0); // CONCORD CONCORD(dev_cars[self], set_velocity(distance)); break; } } ++distance; ++path_index; } --distance; #ifndef NDEBUG // CONCORD int aavel; CONCORD(aavel, dev_cars[self], get_velocity()); for (int i = 0; i < aavel; ++i) { // CONCORD CellBase *path; CellBase *pathi; CellBase *pathi_1; bool cond_free; CONCORD(path, dev_cars[self], get_path(i)); CONCORD(cond_free, path, is_free()); assert(cond_free); CONCORD(pathi, dev_cars[self], get_path(i)); CONCORD(pathi_1, dev_cars[self], get_path(i - 1)); assert(i == 0 || pathi_1 != pathi); } // TODO: Check why the cast is necessary. // CONCORD int ver_; CONCORD(ver_, dev_cars[self], get_velocity()); //assert(distance <= ver_); #endif // NDEBUG } __device__ void Car_step_move(IndexT self) { // CONCORD CellBase *cell; CONCORD(cell, dev_cars[self], get_position()); // CONCORD int vel; CONCORD(vel, dev_cars[self], get_velocity()); for (int i = 0; i < vel; ++i) { // CONCORD CellBase *path; CONCORD(path, dev_cars[self], get_path(i)); assert(path != cell); // CONCORD CONCORD(cell, dev_cars[self], get_path(i)); // CONCORD bool cond21; CONCORD(cond21, cell, is_free()) assert(cond21); // CONCORD CellBase *ptr; CONCORD(ptr, dev_cars[self], get_position()); // CONCORD CONCORD(ptr, release()); // CONCORD CONCORD(cell, occupy(dev_cars[self])); // CONCORD CONCORD(dev_cars[self], set_position(cell)); } // CONCORD CellBase *ptr; CONCORD(ptr, dev_cars[self], get_position()); ; // CONCORD bool cond; CONCORD(cond, ptr, is_sink()); ; // CONCORD bool cond32; CONCORD(cond32, ptr, get_is_target()) if (cond || cond32) { // Remove car from the simulation. Will be added again in the next // iteration. // CONCORD CONCORD(ptr, release()); ; // CONCORD CONCORD(dev_cars[self], set_position(nullptr)); ; d_Car_active[self] = 0; } } __device__ void Car_step_slow_down(IndexT self) { // 20% change of slowdown. // CONCORD int vel; CONCORD(vel, dev_cars[self], get_velocity()); ; // CONCORD float rnd; CONCORD(rnd, dev_cars[self], random_uni()); if (rnd < 0.2 && vel > 0) { // CONCORD CONCORD(dev_cars[self], set_velocity(vel - 1)); ; } } __device__ IndexT new_Car(int seed, IndexT cell, int max_velocity) { IndexT idx = atomicAdd(&d_num_cars, 1); assert(idx >= 0 && idx < kMaxNumCars); assert(!d_Car_active[idx]); // CONCORD CONCORD(dev_cars[idx], set_position(dev_cells[cell])); ; // CONCORD CONCORD(dev_cars[idx], set_path_length(0)); ; // CONCORD CONCORD(dev_cars[idx], set_velocity(0)); ; // CONCORD CONCORD(dev_cars[idx], set_max_velocity(max_velocity)); ; d_Car_active[idx] = 1; // CONCORD bool cond; CONCORD(cond, dev_cells[cell], is_free()) assert(cond); // CONCORD CONCORD(dev_cells[cell], occupy(dev_cars[idx])); ; hiprand_init(seed, 0, 0, &dev_cars[idx]->random_state); return idx; } __device__ void ProducerCell_create_car(IndexT self) { assert(dev_cells[self]->type == kCellTypeProducer); // CONCORD bool cond; CONCORD(cond, dev_cells[self], is_free()); if (cond) { float r = hiprand_uniform(&dev_cells[self]->random_state); if (r < kCarAllocationRatio) { IndexT new_car = new_Car( /*seed=*/hiprand(&dev_cells[self]->random_state), /*cell=*/self, /*max_velocity=*/hiprand(&dev_cells[self]->random_state) % (kMaxVelocity / 2) + kMaxVelocity / 2); } } } __device__ IndexT new_Cell(int max_velocity, float x, float y) { IndexT idx = atomicAdd(&d_num_cells, 1); dev_cells[idx]->car = nullptr; dev_cells[idx]->max_velocity = max_velocity; dev_cells[idx]->current_max_velocity = max_velocity; dev_cells[idx]->num_incoming = 0; dev_cells[idx]->num_outgoing = 0; dev_cells[idx]->x = x; dev_cells[idx]->y = y; dev_cells[idx]->is_target = false; dev_cells[idx]->type = kCellTypeNormal; return idx; } __device__ IndexT new_ProducerCell(int max_velocity, float x, float y, int seed) { IndexT idx = new_Cell(max_velocity, x, y); dev_cells[idx]->type = kCellTypeProducer; hiprand_init(seed, 0, 0, &dev_cells[idx]->random_state); return idx; } __global__ void kernel_traffic_light_step() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections; i += blockDim.x * gridDim.x) { // CONCORD int num_c; CONCORD(num_c, d_traffic_lights[i], get_num_cells()); if (num_c > 0) { // CONCORD int timer; CONCORD(timer, d_traffic_lights[i], get_timer()); ; // CONCORD int phase_time; CONCORD(phase_time, d_traffic_lights[i], get_phase_time()); ; // CONCORD CONCORD(d_traffic_lights[i], set_timer((timer + 1) % phase_time)); // CONCORD if (d_traffic_lights[i]->get_timer() == 0) { // CONCORD int phase; CONCORD(phase, d_traffic_lights[i], get_phase()); ; // CONCORD CellBase *ptr22; CONCORD(ptr22, d_traffic_lights[i], get_cell(phase)); assert(ptr22 != nullptr); // CONCORD CONCORD(phase, d_traffic_lights[i], get_phase()); ; // CONCORD CellBase *ptr; CONCORD(ptr, d_traffic_lights[i], get_cell(phase)); ; // CONCORD CONCORD(ptr, set_current_max_velocity(0)); ; // CONCORD int phase_2; CONCORD(phase_2, d_traffic_lights[i], get_phase()); ; // CONCORD int num_cells; CONCORD(num_cells, d_traffic_lights[i], get_num_cells()); ; // CONCORD CONCORD(d_traffic_lights[i], set_phase((phase_2 + 1) % num_cells)); // CONCORD CONCORD(phase_2, d_traffic_lights[i], get_phase()); ; // CONCORD CONCORD(ptr, d_traffic_lights[i], get_cell(phase_2)); ; // CONCORD CONCORD(ptr, remove_speed_limit()); ; } } // d_traffic_lights[i]->step(); } } __global__ void kernel_create_nodes() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections; i += blockDim.x * gridDim.x) { hiprandState_t state; hiprand_init(i, 0, 0, &state); assert(d_nodes[i].x >= 0 && d_nodes[i].x <= 1); assert(d_nodes[i].y >= 0 && d_nodes[i].y <= 1); for (int j = 0; j < d_nodes[i].num_outgoing; ++j) { d_nodes[i].cell_out[j] = new_Cell( /*max_velocity=*/hiprand(&state) % (kMaxVelocity / 2) + kMaxVelocity / 2, d_nodes[i].x, d_nodes[i].y); } } } __device__ IndexT connect_intersections(IndexT from, Node *target, int incoming_idx, hiprandState_t &state) { // Create edge. float dx = target->x - dev_cells[from]->x; float dy = target->y - dev_cells[from]->y; float dist = sqrt(dx * dx + dy * dy); int steps = dist / kCellLength; float step_x = dx / steps; float step_y = dy / steps; IndexT prev = from; for (int j = 0; j < steps; ++j) { float new_x = dev_cells[from]->x + j * step_x; float new_y = dev_cells[from]->y + j * step_y; assert(new_x >= 0 && new_x <= 1); assert(new_y >= 0 && new_y <= 1); IndexT next; if (hiprand_uniform(&state) < kProducerRatio) { next = new_ProducerCell(dev_cells[prev]->max_velocity, new_x, new_y, hiprand(&state)); } else { next = new_Cell(dev_cells[prev]->max_velocity, new_x, new_y); } if (hiprand_uniform(&state) < kTargetRatio) { // CONCORD CONCORD(dev_cells[next], set_target()); ; } // CONCORD CONCORD(dev_cells[prev], set_num_outgoing(1)); ; // CONCORD CONCORD(dev_cells[prev], set_outgoing(0, dev_cells[next])); ; // CONCORD CONCORD(dev_cells[next], set_num_incoming(1)); ; // CONCORD CONCORD(dev_cells[next], set_incoming(0, dev_cells[prev])); ; prev = next; } // Connect to all outgoing nodes of target. // CONCORD CONCORD(dev_cells[prev], set_num_outgoing(target->num_outgoing)); ; for (int i = 0; i < target->num_outgoing; ++i) { IndexT next = target->cell_out[i]; // num_incoming set later. // CONCORD CONCORD(dev_cells[prev], set_outgoing(i, dev_cells[next])); ; // CONCORD CONCORD(dev_cells[next], set_incoming(incoming_idx, dev_cells[prev])); ; } return prev; } __global__ void kernel_create_edges() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections; i += blockDim.x * gridDim.x) { hiprandState_t state; hiprand_init(i, 0, 0, &state); for (int k = 0; k < d_nodes[i].num_outgoing; ++k) { int target = d_nodes[i].node_out[k]; int target_pos = d_nodes[i].node_out_pos[k]; IndexT last = connect_intersections( d_nodes[i].cell_out[k], &d_nodes[target], target_pos, state); // CONCORD CONCORD(dev_cells[last], set_current_max_velocity(0)); d_nodes[target].cell_in[target_pos] = last; } } } __global__ void kernel_create_traffic_lights() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections; i += blockDim.x * gridDim.x) { new (d_traffic_lights[i]) TrafficLight( /*num_cells=*/d_nodes[i].num_incoming, /*phase_time=*/5); for (int j = 0; j < d_nodes[i].num_outgoing; ++j) { // CONCORD CONCORD(dev_cells[d_nodes[i].cell_out[j]], set_num_incoming(d_nodes[i].num_incoming)); } for (int j = 0; j < d_nodes[i].num_incoming; ++j) { // CONCORD CONCORD(d_traffic_lights[i], set_cell(j, dev_cells[d_nodes[i].cell_in[j]])); ; // CONCORD CONCORD(dev_cells[d_nodes[i].cell_in[j]], set_current_max_velocity(0)); } } } template <class Type, class TypeBase> __global__ void device_alloc(TypeBase **ptr, int size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) { ptr[i] = new Type(); assert(ptr[i] != nullptr); } } void create_street_network() { int zero = 0; hipMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0, hipMemcpyHostToDevice); hipMalloc(&h_nodes, sizeof(Node) * kNumIntersections); hipMemcpyToSymbol(d_nodes, &h_nodes, sizeof(Node *), 0, hipMemcpyHostToDevice); hipMalloc(&d_traffic_lights, sizeof(TrafficLight *) * kNumIntersections); hipLaunchKernelGGL(( device_alloc<TrafficLight, TrafficLightBase>) , dim3((kNumIntersections + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, d_traffic_lights, kNumIntersections); gpuErrchk(hipDeviceSynchronize()); // Create basic structure on host. create_network_structure(); hipLaunchKernelGGL(( kernel_create_nodes), (kNumIntersections + kNumBlockSize - 1) / kNumBlockSize, dim3(kNumBlockSize), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernel_create_edges), (kNumIntersections + kNumBlockSize - 1) / kNumBlockSize, dim3(kNumBlockSize), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernel_create_traffic_lights), (kNumIntersections + kNumBlockSize - 1) / kNumBlockSize, dim3(kNumBlockSize), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); // Allocate helper data structures for rendering. hipMemcpyFromSymbol(&host_num_cells, d_num_cells, sizeof(int), 0, hipMemcpyDeviceToHost); hipMalloc(&host_Cell_pos_x, sizeof(float) * host_num_cells); hipMemcpyToSymbol(dev_Cell_pos_x, &host_Cell_pos_x, sizeof(float *), 0, hipMemcpyHostToDevice); hipMalloc(&host_Cell_pos_y, sizeof(float) * host_num_cells); hipMemcpyToSymbol(dev_Cell_pos_y, &host_Cell_pos_y, sizeof(float *), 0, hipMemcpyHostToDevice); hipMalloc(&host_Cell_occupied, sizeof(bool) * host_num_cells); hipMemcpyToSymbol(dev_Cell_occupied, &host_Cell_occupied, sizeof(bool *), 0, hipMemcpyHostToDevice); host_data_Cell_pos_x = (float *)malloc(sizeof(float) * host_num_cells); host_data_Cell_pos_y = (float *)malloc(sizeof(float) * host_num_cells); host_data_Cell_occupied = (bool *)malloc(sizeof(bool) * host_num_cells); #ifndef NDEBUG printf("Number of cells: %i\n", host_num_cells); #endif // NDEBUG } void step_traffic_lights() { // TODO: Consider migrating this to SoaAlloc. hipLaunchKernelGGL(( kernel_traffic_light_step), (kNumIntersections + kNumBlockSize - 1) / kNumBlockSize, dim3(kNumBlockSize), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); } __device__ void Cell_add_to_rendering_array(IndexT self) { int idx = atomicAdd(&dev_num_cells, 1); dev_Cell_pos_x[idx] = dev_cells[self]->x; dev_Cell_pos_y[idx] = dev_cells[self]->y; // CONCORD CONCORD(dev_Cell_occupied[idx], !dev_cells[self], is_free()); ; } __global__ void kernel_Cell_add_to_rendering_array() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells; i += blockDim.x * gridDim.x) { Cell_add_to_rendering_array(i); } } void transfer_data() { int zero = 0; hipMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_Cell_add_to_rendering_array), (host_num_cells + kNumBlockSize - 1) / kNumBlockSize, dim3(kNumBlockSize), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipMemcpy(host_data_Cell_pos_x, host_Cell_pos_x, sizeof(float) * host_num_cells, hipMemcpyDeviceToHost); hipMemcpy(host_data_Cell_pos_y, host_Cell_pos_y, sizeof(float) * host_num_cells, hipMemcpyDeviceToHost); hipMemcpy(host_data_Cell_occupied, host_Cell_occupied, sizeof(bool) * host_num_cells, hipMemcpyDeviceToHost); gpuErrchk(hipDeviceSynchronize()); } __global__ void kernel_ProducerCell_create_car() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells; i += blockDim.x * gridDim.x) { if (dev_cells[i]->type == kCellTypeProducer) { ProducerCell_create_car(i); } } } __device__ void Car_step_prepare_path(IndexT self) { // CONCORD CONCORD(dev_cars[self], step_initialize_iteration()); ; // CONCORD CONCORD(dev_cars[self], step_accelerate()); ; Car_step_extend_path(self); Car_step_constraint_velocity(self); Car_step_slow_down(self); } __global__ void kernel_Car_step_prepare_path() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars; i += blockDim.x * gridDim.x) { if (d_Car_active[i]) { Car_step_prepare_path(i); } } } __global__ void kernel_fill_car_indices() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars; i += blockDim.x * gridDim.x) { d_Car_active[i] = 0; d_Car_active_2[i] = 0; } } __global__ void kernel_Car_step_move() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars; i += blockDim.x * gridDim.x) { if (d_Car_active[i]) { Car_step_move(i); } } } __device__ int d_checksum; __global__ void kernel_compute_checksum() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars; i += blockDim.x * gridDim.x) { if (d_Car_active[i]) { atomicAdd(&d_checksum, 1); } } } int checksum() { int zero = 0; hipMemcpyToSymbol(d_checksum, &zero, sizeof(int), 0, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_compute_checksum), dim3(128), dim3(128), 0, 0, ); int result; hipMemcpyFromSymbol(&result, d_checksum, sizeof(int), 0, hipMemcpyDeviceToHost); return result; } void step() { hipLaunchKernelGGL(( kernel_ProducerCell_create_car), (host_num_cells + kNumBlockSize - 1) / kNumBlockSize, dim3(kNumBlockSize), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0, hipMemcpyDeviceToHost); step_traffic_lights(); hipLaunchKernelGGL(( kernel_Car_step_prepare_path), dim3((host_num_cars + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernel_Car_step_move), dim3((host_num_cars + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); } void allocate_memory() { hipMalloc(&dev_cells, sizeof(Cell *) * kMaxNumCells); hipLaunchKernelGGL(( device_alloc<Cell, CellBase>) , dim3((kMaxNumCells + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, dev_cells, kMaxNumCells); gpuErrchk(hipDeviceSynchronize()); hipMalloc(&dev_cars, sizeof(Car *) * kMaxNumCars); hipMalloc(&dev_cars_2, sizeof(Car *) * kMaxNumCars); hipLaunchKernelGGL(( device_alloc<Car, CarBase>) , dim3((kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, dev_cars, kMaxNumCars); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( device_alloc<Car>) , dim3((kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, dev_cars_2, kMaxNumCars); gpuErrchk(hipDeviceSynchronize()); hipMalloc(&h_Car_active, sizeof(int) * kMaxNumCars); hipMemcpyToSymbol(d_Car_active, &h_Car_active, sizeof(int *), 0, hipMemcpyHostToDevice); // Car *h_cars_2; // hipMalloc(&h_cars_2, sizeof(Car) * kMaxNumCars); // hipMemcpyToSymbol(dev_cars_2, &h_cars_2, sizeof(Car *), 0, // hipMemcpyHostToDevice); hipMalloc(&h_Car_active_2, sizeof(int) * kMaxNumCars); hipMemcpyToSymbol(d_Car_active_2, &h_Car_active_2, sizeof(int *), 0, hipMemcpyHostToDevice); hipMalloc(&h_prefix_sum_temp, 3 * sizeof(int) * kMaxNumCars); hipMemcpyToSymbol(d_prefix_sum_temp, &h_prefix_sum_temp, sizeof(int *), 0, hipMemcpyHostToDevice); hipMalloc(&h_prefix_sum_output, sizeof(int) * kMaxNumCars); hipMemcpyToSymbol(d_prefix_sum_output, &h_prefix_sum_output, sizeof(int *), 0, hipMemcpyHostToDevice); hipLaunchKernelGGL(( kernel_fill_car_indices), dim3(128), dim3(128), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); int zero = 0; hipMemcpyToSymbol(d_num_cells, &zero, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyToSymbol(d_num_cars, &zero, sizeof(int), 0, hipMemcpyHostToDevice); } __global__ void kernel_compact_initialize() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kMaxNumCars; i += blockDim.x * gridDim.x) { d_Car_active_2[i] = 0; } } __global__ void kernel_compact_cars() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars; i += blockDim.x * gridDim.x) { if (d_Car_active[i]) { int target = d_prefix_sum_output[i]; // Copy i --> target. // dev_cars_2[target] = dev_cars[i]; memcpy(dev_cars_2[target], dev_cars[i], sizeof(Car)); d_Car_active_2[target] = 1; // Update pointer in Cell. dev_cars[i]->position->car = dev_cars[target]; atomicAdd(&d_num_cars_2, 1); } } } __global__ void kernel_compact_swap_pointers() { { auto *tmp = dev_cars; dev_cars = dev_cars_2; dev_cars_2 = tmp; } { auto *tmp = d_Car_active; d_Car_active = d_Car_active_2; d_Car_active_2 = tmp; } d_num_cars = d_num_cars_2; } void compact_car_array() { int zero = 0; hipMemcpyToSymbol(d_num_cars_2, &zero, sizeof(int), 0, hipMemcpyHostToDevice); hipMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0, hipMemcpyDeviceToHost); // TODO: Prefix sum broken for num_objects < 256. auto prefix_sum_size = host_num_cars < 256 ? 256 : host_num_cars; size_t temp_size = 3 * kMaxNumCars; hipcub::DeviceScan::ExclusiveSum(h_prefix_sum_temp, temp_size, h_Car_active, h_prefix_sum_output, prefix_sum_size); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernel_compact_initialize), dim3((kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernel_compact_cars), dim3((kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize), dim3(kNumBlockSize), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); hipLaunchKernelGGL(( kernel_compact_swap_pointers), dim3(1), dim3(1), 0, 0, ); gpuErrchk(hipDeviceSynchronize()); auto *tmp = h_Car_active; h_Car_active = h_Car_active_2; h_Car_active_2 = tmp; } int main(int /*argc*/, char ** /*argv*/) { hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024); allocate_memory(); printf("mem alloc done\n"); create_street_network(); auto time_start = std::chrono::system_clock::now(); for (int i = 0; i < kNumIterations; ++i) { step(); compact_car_array(); } auto time_end = std::chrono::system_clock::now(); auto elapsed = time_end - time_start; auto millis = std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count(); #ifndef NDEBUG printf("Checksum: %i\n", checksum()); #endif // NDEBUG printf("%lu\n", millis); }
7439ad1a8993ce868c486346b84bdf21018257fc.cu
#include "traffic.h" static const int kNumBlockSize = 256; static const char kCellTypeNormal = 1; static const char kCellTypeProducer = 2; using IndexT = int; using CellPointerT = IndexT; #include "../dataset.h" __managed__ CellBase **dev_cells; // Need 2 arrays of both, so we can swap. __device__ int *d_Car_active; __device__ int *d_Car_active_2; __managed__ CarBase **dev_cars; __managed__ CarBase **dev_cars_2; // For prefix sum array compaction. __device__ int *d_prefix_sum_temp; __device__ int *d_prefix_sum_output; int *h_prefix_sum_temp; int *h_prefix_sum_output; int *h_Car_active; int *h_Car_active_2; __device__ int d_num_cells; __device__ int d_num_cars; __device__ int d_num_cars_2; int host_num_cells; int host_num_cars; // TODO: Consider migrating to SoaAlloc. TrafficLight *h_traffic_lights; __managed__ TrafficLightBase **d_traffic_lights; // Only for rendering. __device__ int dev_num_cells; __device__ float *dev_Cell_pos_x; __device__ float *dev_Cell_pos_y; __device__ bool *dev_Cell_occupied; float *host_Cell_pos_x; float *host_Cell_pos_y; bool *host_Cell_occupied; float *host_data_Cell_pos_x; float *host_data_Cell_pos_y; bool *host_data_Cell_occupied; __device__ void Car_step_extend_path(IndexT self) { // CONCORD CellBase *cell; CONCORD(cell, dev_cars[self], get_position()); CellBase *next_cell; // CONCORD int vel; CONCORD(vel, dev_cars[self], get_velocity()); for (int i = 0; i < vel; ++i) { // CONCORD bool cond; CONCORD(cond, cell, get_is_target()); ; // CONCORD bool cond2; CONCORD(cond2, cell, is_sink()); if (cond2 || cond) { break; } // CONCORD CONCORD(next_cell, dev_cars[self], next_step(cell)); assert(next_cell != cell); // CONCORD bool cond3; CONCORD(cond3, next_cell, is_free()); if (!cond3) break; cell = next_cell; // CONCORD CONCORD(dev_cars[self], set_path(cell, i)); ; // CONCORD int path_len; CONCORD(path_len, dev_cars[self], get_path_length()); ; // CONCORD CONCORD(dev_cars[self], set_path_length(path_len + 1)); ; } // CONCORD int path_len; CONCORD(path_len, dev_cars[self], get_path_length()); ; // CONCORD CONCORD(dev_cars[self], set_velocity(path_len)); ; } __device__ void Car_step_constraint_velocity(IndexT self) { // This is actually only needed for the very first iteration, because a car // may be positioned on a traffic light cell. // CONCORD int vel; CONCORD(vel, dev_cars[self], get_velocity()); // CONCORD CellBase *cell; CONCORD(cell, dev_cars[self], get_position()); int vel_max; CONCORD(vel_max, cell, get_current_max_velocity()); // CONCORD if (vel > vel_max) { // CONCORD int max_velocity; CONCORD(max_velocity, cell, get_current_max_velocity()); ; // CONCORD CONCORD(dev_cars[self], set_velocity(max_velocity)); ; } int path_index = 0; int distance = 1; // CONCORD int vel3; CONCORD(vel3, dev_cars[self], get_velocity()); while (distance <= vel3) { // Invariant: Movement of up to `distance - 1` many cells at `velocity_` // is allowed. // Now check if next cell can be entered. // CONCORD CellBase *next_cell; CONCORD(next_cell, dev_cars[self], get_path(path_index)); ; // Avoid collision. // CONCORD bool cond4; CONCORD(cond4, next_cell, is_free()); if (!cond4) { // Cannot enter cell. --distance; // CONCORD CONCORD(dev_cars[self], set_velocity(distance)); ; break; } // else: Can enter next cell. // CONCORD int curr_vel; CONCORD(curr_vel, dev_cars[self], get_velocity()); ; // CONCORD int cur_max; CONCORD(cur_max, next_cell, get_current_max_velocity()); if (curr_vel > cur_max) { // Car is too fast for this cell. // CONCORD int cur_max2; CONCORD(cur_max2, next_cell, get_current_max_velocity()); if (cur_max2 > distance - 1) { // Even if we slow down, we would still make progress. // CONCORD int max; CONCORD(max, next_cell, get_current_max_velocity()); // CONCORD CONCORD(dev_cars[self], set_velocity(max)); } else { // Do not enter the next cell. --distance; assert(distance >= 0); // CONCORD CONCORD(dev_cars[self], set_velocity(distance)); break; } } ++distance; ++path_index; } --distance; #ifndef NDEBUG // CONCORD int aavel; CONCORD(aavel, dev_cars[self], get_velocity()); for (int i = 0; i < aavel; ++i) { // CONCORD CellBase *path; CellBase *pathi; CellBase *pathi_1; bool cond_free; CONCORD(path, dev_cars[self], get_path(i)); CONCORD(cond_free, path, is_free()); assert(cond_free); CONCORD(pathi, dev_cars[self], get_path(i)); CONCORD(pathi_1, dev_cars[self], get_path(i - 1)); assert(i == 0 || pathi_1 != pathi); } // TODO: Check why the cast is necessary. // CONCORD int ver_; CONCORD(ver_, dev_cars[self], get_velocity()); //assert(distance <= ver_); #endif // NDEBUG } __device__ void Car_step_move(IndexT self) { // CONCORD CellBase *cell; CONCORD(cell, dev_cars[self], get_position()); // CONCORD int vel; CONCORD(vel, dev_cars[self], get_velocity()); for (int i = 0; i < vel; ++i) { // CONCORD CellBase *path; CONCORD(path, dev_cars[self], get_path(i)); assert(path != cell); // CONCORD CONCORD(cell, dev_cars[self], get_path(i)); // CONCORD bool cond21; CONCORD(cond21, cell, is_free()) assert(cond21); // CONCORD CellBase *ptr; CONCORD(ptr, dev_cars[self], get_position()); // CONCORD CONCORD(ptr, release()); // CONCORD CONCORD(cell, occupy(dev_cars[self])); // CONCORD CONCORD(dev_cars[self], set_position(cell)); } // CONCORD CellBase *ptr; CONCORD(ptr, dev_cars[self], get_position()); ; // CONCORD bool cond; CONCORD(cond, ptr, is_sink()); ; // CONCORD bool cond32; CONCORD(cond32, ptr, get_is_target()) if (cond || cond32) { // Remove car from the simulation. Will be added again in the next // iteration. // CONCORD CONCORD(ptr, release()); ; // CONCORD CONCORD(dev_cars[self], set_position(nullptr)); ; d_Car_active[self] = 0; } } __device__ void Car_step_slow_down(IndexT self) { // 20% change of slowdown. // CONCORD int vel; CONCORD(vel, dev_cars[self], get_velocity()); ; // CONCORD float rnd; CONCORD(rnd, dev_cars[self], random_uni()); if (rnd < 0.2 && vel > 0) { // CONCORD CONCORD(dev_cars[self], set_velocity(vel - 1)); ; } } __device__ IndexT new_Car(int seed, IndexT cell, int max_velocity) { IndexT idx = atomicAdd(&d_num_cars, 1); assert(idx >= 0 && idx < kMaxNumCars); assert(!d_Car_active[idx]); // CONCORD CONCORD(dev_cars[idx], set_position(dev_cells[cell])); ; // CONCORD CONCORD(dev_cars[idx], set_path_length(0)); ; // CONCORD CONCORD(dev_cars[idx], set_velocity(0)); ; // CONCORD CONCORD(dev_cars[idx], set_max_velocity(max_velocity)); ; d_Car_active[idx] = 1; // CONCORD bool cond; CONCORD(cond, dev_cells[cell], is_free()) assert(cond); // CONCORD CONCORD(dev_cells[cell], occupy(dev_cars[idx])); ; curand_init(seed, 0, 0, &dev_cars[idx]->random_state); return idx; } __device__ void ProducerCell_create_car(IndexT self) { assert(dev_cells[self]->type == kCellTypeProducer); // CONCORD bool cond; CONCORD(cond, dev_cells[self], is_free()); if (cond) { float r = curand_uniform(&dev_cells[self]->random_state); if (r < kCarAllocationRatio) { IndexT new_car = new_Car( /*seed=*/curand(&dev_cells[self]->random_state), /*cell=*/self, /*max_velocity=*/curand(&dev_cells[self]->random_state) % (kMaxVelocity / 2) + kMaxVelocity / 2); } } } __device__ IndexT new_Cell(int max_velocity, float x, float y) { IndexT idx = atomicAdd(&d_num_cells, 1); dev_cells[idx]->car = nullptr; dev_cells[idx]->max_velocity = max_velocity; dev_cells[idx]->current_max_velocity = max_velocity; dev_cells[idx]->num_incoming = 0; dev_cells[idx]->num_outgoing = 0; dev_cells[idx]->x = x; dev_cells[idx]->y = y; dev_cells[idx]->is_target = false; dev_cells[idx]->type = kCellTypeNormal; return idx; } __device__ IndexT new_ProducerCell(int max_velocity, float x, float y, int seed) { IndexT idx = new_Cell(max_velocity, x, y); dev_cells[idx]->type = kCellTypeProducer; curand_init(seed, 0, 0, &dev_cells[idx]->random_state); return idx; } __global__ void kernel_traffic_light_step() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections; i += blockDim.x * gridDim.x) { // CONCORD int num_c; CONCORD(num_c, d_traffic_lights[i], get_num_cells()); if (num_c > 0) { // CONCORD int timer; CONCORD(timer, d_traffic_lights[i], get_timer()); ; // CONCORD int phase_time; CONCORD(phase_time, d_traffic_lights[i], get_phase_time()); ; // CONCORD CONCORD(d_traffic_lights[i], set_timer((timer + 1) % phase_time)); // CONCORD if (d_traffic_lights[i]->get_timer() == 0) { // CONCORD int phase; CONCORD(phase, d_traffic_lights[i], get_phase()); ; // CONCORD CellBase *ptr22; CONCORD(ptr22, d_traffic_lights[i], get_cell(phase)); assert(ptr22 != nullptr); // CONCORD CONCORD(phase, d_traffic_lights[i], get_phase()); ; // CONCORD CellBase *ptr; CONCORD(ptr, d_traffic_lights[i], get_cell(phase)); ; // CONCORD CONCORD(ptr, set_current_max_velocity(0)); ; // CONCORD int phase_2; CONCORD(phase_2, d_traffic_lights[i], get_phase()); ; // CONCORD int num_cells; CONCORD(num_cells, d_traffic_lights[i], get_num_cells()); ; // CONCORD CONCORD(d_traffic_lights[i], set_phase((phase_2 + 1) % num_cells)); // CONCORD CONCORD(phase_2, d_traffic_lights[i], get_phase()); ; // CONCORD CONCORD(ptr, d_traffic_lights[i], get_cell(phase_2)); ; // CONCORD CONCORD(ptr, remove_speed_limit()); ; } } // d_traffic_lights[i]->step(); } } __global__ void kernel_create_nodes() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections; i += blockDim.x * gridDim.x) { curandState_t state; curand_init(i, 0, 0, &state); assert(d_nodes[i].x >= 0 && d_nodes[i].x <= 1); assert(d_nodes[i].y >= 0 && d_nodes[i].y <= 1); for (int j = 0; j < d_nodes[i].num_outgoing; ++j) { d_nodes[i].cell_out[j] = new_Cell( /*max_velocity=*/curand(&state) % (kMaxVelocity / 2) + kMaxVelocity / 2, d_nodes[i].x, d_nodes[i].y); } } } __device__ IndexT connect_intersections(IndexT from, Node *target, int incoming_idx, curandState_t &state) { // Create edge. float dx = target->x - dev_cells[from]->x; float dy = target->y - dev_cells[from]->y; float dist = sqrt(dx * dx + dy * dy); int steps = dist / kCellLength; float step_x = dx / steps; float step_y = dy / steps; IndexT prev = from; for (int j = 0; j < steps; ++j) { float new_x = dev_cells[from]->x + j * step_x; float new_y = dev_cells[from]->y + j * step_y; assert(new_x >= 0 && new_x <= 1); assert(new_y >= 0 && new_y <= 1); IndexT next; if (curand_uniform(&state) < kProducerRatio) { next = new_ProducerCell(dev_cells[prev]->max_velocity, new_x, new_y, curand(&state)); } else { next = new_Cell(dev_cells[prev]->max_velocity, new_x, new_y); } if (curand_uniform(&state) < kTargetRatio) { // CONCORD CONCORD(dev_cells[next], set_target()); ; } // CONCORD CONCORD(dev_cells[prev], set_num_outgoing(1)); ; // CONCORD CONCORD(dev_cells[prev], set_outgoing(0, dev_cells[next])); ; // CONCORD CONCORD(dev_cells[next], set_num_incoming(1)); ; // CONCORD CONCORD(dev_cells[next], set_incoming(0, dev_cells[prev])); ; prev = next; } // Connect to all outgoing nodes of target. // CONCORD CONCORD(dev_cells[prev], set_num_outgoing(target->num_outgoing)); ; for (int i = 0; i < target->num_outgoing; ++i) { IndexT next = target->cell_out[i]; // num_incoming set later. // CONCORD CONCORD(dev_cells[prev], set_outgoing(i, dev_cells[next])); ; // CONCORD CONCORD(dev_cells[next], set_incoming(incoming_idx, dev_cells[prev])); ; } return prev; } __global__ void kernel_create_edges() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections; i += blockDim.x * gridDim.x) { curandState_t state; curand_init(i, 0, 0, &state); for (int k = 0; k < d_nodes[i].num_outgoing; ++k) { int target = d_nodes[i].node_out[k]; int target_pos = d_nodes[i].node_out_pos[k]; IndexT last = connect_intersections( d_nodes[i].cell_out[k], &d_nodes[target], target_pos, state); // CONCORD CONCORD(dev_cells[last], set_current_max_velocity(0)); d_nodes[target].cell_in[target_pos] = last; } } } __global__ void kernel_create_traffic_lights() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kNumIntersections; i += blockDim.x * gridDim.x) { new (d_traffic_lights[i]) TrafficLight( /*num_cells=*/d_nodes[i].num_incoming, /*phase_time=*/5); for (int j = 0; j < d_nodes[i].num_outgoing; ++j) { // CONCORD CONCORD(dev_cells[d_nodes[i].cell_out[j]], set_num_incoming(d_nodes[i].num_incoming)); } for (int j = 0; j < d_nodes[i].num_incoming; ++j) { // CONCORD CONCORD(d_traffic_lights[i], set_cell(j, dev_cells[d_nodes[i].cell_in[j]])); ; // CONCORD CONCORD(dev_cells[d_nodes[i].cell_in[j]], set_current_max_velocity(0)); } } } template <class Type, class TypeBase> __global__ void device_alloc(TypeBase **ptr, int size) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < size; i += blockDim.x * gridDim.x) { ptr[i] = new Type(); assert(ptr[i] != nullptr); } } void create_street_network() { int zero = 0; cudaMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMalloc(&h_nodes, sizeof(Node) * kNumIntersections); cudaMemcpyToSymbol(d_nodes, &h_nodes, sizeof(Node *), 0, cudaMemcpyHostToDevice); cudaMalloc(&d_traffic_lights, sizeof(TrafficLight *) * kNumIntersections); device_alloc<TrafficLight, TrafficLightBase> <<<(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(d_traffic_lights, kNumIntersections); gpuErrchk(cudaDeviceSynchronize()); // Create basic structure on host. create_network_structure(); kernel_create_nodes<<<(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(); gpuErrchk(cudaDeviceSynchronize()); kernel_create_edges<<<(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(); gpuErrchk(cudaDeviceSynchronize()); kernel_create_traffic_lights<<<(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(); gpuErrchk(cudaDeviceSynchronize()); // Allocate helper data structures for rendering. cudaMemcpyFromSymbol(&host_num_cells, d_num_cells, sizeof(int), 0, cudaMemcpyDeviceToHost); cudaMalloc(&host_Cell_pos_x, sizeof(float) * host_num_cells); cudaMemcpyToSymbol(dev_Cell_pos_x, &host_Cell_pos_x, sizeof(float *), 0, cudaMemcpyHostToDevice); cudaMalloc(&host_Cell_pos_y, sizeof(float) * host_num_cells); cudaMemcpyToSymbol(dev_Cell_pos_y, &host_Cell_pos_y, sizeof(float *), 0, cudaMemcpyHostToDevice); cudaMalloc(&host_Cell_occupied, sizeof(bool) * host_num_cells); cudaMemcpyToSymbol(dev_Cell_occupied, &host_Cell_occupied, sizeof(bool *), 0, cudaMemcpyHostToDevice); host_data_Cell_pos_x = (float *)malloc(sizeof(float) * host_num_cells); host_data_Cell_pos_y = (float *)malloc(sizeof(float) * host_num_cells); host_data_Cell_occupied = (bool *)malloc(sizeof(bool) * host_num_cells); #ifndef NDEBUG printf("Number of cells: %i\n", host_num_cells); #endif // NDEBUG } void step_traffic_lights() { // TODO: Consider migrating this to SoaAlloc. kernel_traffic_light_step<<<(kNumIntersections + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(); gpuErrchk(cudaDeviceSynchronize()); } __device__ void Cell_add_to_rendering_array(IndexT self) { int idx = atomicAdd(&dev_num_cells, 1); dev_Cell_pos_x[idx] = dev_cells[self]->x; dev_Cell_pos_y[idx] = dev_cells[self]->y; // CONCORD CONCORD(dev_Cell_occupied[idx], !dev_cells[self], is_free()); ; } __global__ void kernel_Cell_add_to_rendering_array() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells; i += blockDim.x * gridDim.x) { Cell_add_to_rendering_array(i); } } void transfer_data() { int zero = 0; cudaMemcpyToSymbol(dev_num_cells, &zero, sizeof(int), 0, cudaMemcpyHostToDevice); kernel_Cell_add_to_rendering_array<<<(host_num_cells + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(); gpuErrchk(cudaDeviceSynchronize()); cudaMemcpy(host_data_Cell_pos_x, host_Cell_pos_x, sizeof(float) * host_num_cells, cudaMemcpyDeviceToHost); cudaMemcpy(host_data_Cell_pos_y, host_Cell_pos_y, sizeof(float) * host_num_cells, cudaMemcpyDeviceToHost); cudaMemcpy(host_data_Cell_occupied, host_Cell_occupied, sizeof(bool) * host_num_cells, cudaMemcpyDeviceToHost); gpuErrchk(cudaDeviceSynchronize()); } __global__ void kernel_ProducerCell_create_car() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cells; i += blockDim.x * gridDim.x) { if (dev_cells[i]->type == kCellTypeProducer) { ProducerCell_create_car(i); } } } __device__ void Car_step_prepare_path(IndexT self) { // CONCORD CONCORD(dev_cars[self], step_initialize_iteration()); ; // CONCORD CONCORD(dev_cars[self], step_accelerate()); ; Car_step_extend_path(self); Car_step_constraint_velocity(self); Car_step_slow_down(self); } __global__ void kernel_Car_step_prepare_path() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars; i += blockDim.x * gridDim.x) { if (d_Car_active[i]) { Car_step_prepare_path(i); } } } __global__ void kernel_fill_car_indices() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars; i += blockDim.x * gridDim.x) { d_Car_active[i] = 0; d_Car_active_2[i] = 0; } } __global__ void kernel_Car_step_move() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars; i += blockDim.x * gridDim.x) { if (d_Car_active[i]) { Car_step_move(i); } } } __device__ int d_checksum; __global__ void kernel_compute_checksum() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars; i += blockDim.x * gridDim.x) { if (d_Car_active[i]) { atomicAdd(&d_checksum, 1); } } } int checksum() { int zero = 0; cudaMemcpyToSymbol(d_checksum, &zero, sizeof(int), 0, cudaMemcpyHostToDevice); kernel_compute_checksum<<<128, 128>>>(); int result; cudaMemcpyFromSymbol(&result, d_checksum, sizeof(int), 0, cudaMemcpyDeviceToHost); return result; } void step() { kernel_ProducerCell_create_car<<<(host_num_cells + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(); gpuErrchk(cudaDeviceSynchronize()); cudaMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0, cudaMemcpyDeviceToHost); step_traffic_lights(); kernel_Car_step_prepare_path<<< (host_num_cars + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(); gpuErrchk(cudaDeviceSynchronize()); kernel_Car_step_move<<<(host_num_cars + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(); gpuErrchk(cudaDeviceSynchronize()); } void allocate_memory() { cudaMalloc(&dev_cells, sizeof(Cell *) * kMaxNumCells); device_alloc<Cell, CellBase> <<<(kMaxNumCells + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>( dev_cells, kMaxNumCells); gpuErrchk(cudaDeviceSynchronize()); cudaMalloc(&dev_cars, sizeof(Car *) * kMaxNumCars); cudaMalloc(&dev_cars_2, sizeof(Car *) * kMaxNumCars); device_alloc<Car, CarBase> <<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>( dev_cars, kMaxNumCars); gpuErrchk(cudaDeviceSynchronize()); device_alloc<Car> <<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>( dev_cars_2, kMaxNumCars); gpuErrchk(cudaDeviceSynchronize()); cudaMalloc(&h_Car_active, sizeof(int) * kMaxNumCars); cudaMemcpyToSymbol(d_Car_active, &h_Car_active, sizeof(int *), 0, cudaMemcpyHostToDevice); // Car *h_cars_2; // cudaMalloc(&h_cars_2, sizeof(Car) * kMaxNumCars); // cudaMemcpyToSymbol(dev_cars_2, &h_cars_2, sizeof(Car *), 0, // cudaMemcpyHostToDevice); cudaMalloc(&h_Car_active_2, sizeof(int) * kMaxNumCars); cudaMemcpyToSymbol(d_Car_active_2, &h_Car_active_2, sizeof(int *), 0, cudaMemcpyHostToDevice); cudaMalloc(&h_prefix_sum_temp, 3 * sizeof(int) * kMaxNumCars); cudaMemcpyToSymbol(d_prefix_sum_temp, &h_prefix_sum_temp, sizeof(int *), 0, cudaMemcpyHostToDevice); cudaMalloc(&h_prefix_sum_output, sizeof(int) * kMaxNumCars); cudaMemcpyToSymbol(d_prefix_sum_output, &h_prefix_sum_output, sizeof(int *), 0, cudaMemcpyHostToDevice); kernel_fill_car_indices<<<128, 128>>>(); gpuErrchk(cudaDeviceSynchronize()); int zero = 0; cudaMemcpyToSymbol(d_num_cells, &zero, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyToSymbol(d_num_cars, &zero, sizeof(int), 0, cudaMemcpyHostToDevice); } __global__ void kernel_compact_initialize() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < kMaxNumCars; i += blockDim.x * gridDim.x) { d_Car_active_2[i] = 0; } } __global__ void kernel_compact_cars() { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < d_num_cars; i += blockDim.x * gridDim.x) { if (d_Car_active[i]) { int target = d_prefix_sum_output[i]; // Copy i --> target. // dev_cars_2[target] = dev_cars[i]; memcpy(dev_cars_2[target], dev_cars[i], sizeof(Car)); d_Car_active_2[target] = 1; // Update pointer in Cell. dev_cars[i]->position->car = dev_cars[target]; atomicAdd(&d_num_cars_2, 1); } } } __global__ void kernel_compact_swap_pointers() { { auto *tmp = dev_cars; dev_cars = dev_cars_2; dev_cars_2 = tmp; } { auto *tmp = d_Car_active; d_Car_active = d_Car_active_2; d_Car_active_2 = tmp; } d_num_cars = d_num_cars_2; } void compact_car_array() { int zero = 0; cudaMemcpyToSymbol(d_num_cars_2, &zero, sizeof(int), 0, cudaMemcpyHostToDevice); cudaMemcpyFromSymbol(&host_num_cars, d_num_cars, sizeof(int), 0, cudaMemcpyDeviceToHost); // TODO: Prefix sum broken for num_objects < 256. auto prefix_sum_size = host_num_cars < 256 ? 256 : host_num_cars; size_t temp_size = 3 * kMaxNumCars; cub::DeviceScan::ExclusiveSum(h_prefix_sum_temp, temp_size, h_Car_active, h_prefix_sum_output, prefix_sum_size); gpuErrchk(cudaDeviceSynchronize()); kernel_compact_initialize<<< (kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(); gpuErrchk(cudaDeviceSynchronize()); kernel_compact_cars<<<(kMaxNumCars + kNumBlockSize - 1) / kNumBlockSize, kNumBlockSize>>>(); gpuErrchk(cudaDeviceSynchronize()); kernel_compact_swap_pointers<<<1, 1>>>(); gpuErrchk(cudaDeviceSynchronize()); auto *tmp = h_Car_active; h_Car_active = h_Car_active_2; h_Car_active_2 = tmp; } int main(int /*argc*/, char ** /*argv*/) { cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024); allocate_memory(); printf("mem alloc done\n"); create_street_network(); auto time_start = std::chrono::system_clock::now(); for (int i = 0; i < kNumIterations; ++i) { step(); compact_car_array(); } auto time_end = std::chrono::system_clock::now(); auto elapsed = time_end - time_start; auto millis = std::chrono::duration_cast<std::chrono::microseconds>(elapsed).count(); #ifndef NDEBUG printf("Checksum: %i\n", checksum()); #endif // NDEBUG printf("%lu\n", millis); }
17ae3552ff6f79c9019fccbbaa878219e0cd78bc.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include<stdio.h> #include<cuda_runtime.h> /* This is a simple vector addtion program. C = A + B */ /* CUDA Kernel Device Code starts with __global__ keyword */ __global__ void vadd( float *A, float *B, float *C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N){ C[i] = A[i] + B[i]; } } /*Host function*/ int main() { //call for error code to check CUDA calls hipError_t err = hipSuccess; //vector length int N = 50000; size_t size = N * sizeof(float); printf("Vector addition of %d elements.\n", N); //Allocate memory for host float *h_A = (float *)malloc(size); float *h_B = (float *)malloc(size); float *h_C = (float *)malloc(size); //Error check //Initialized value for host vectors for(int i=0; i< N; ++i){ h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; h_C[i] = 0.0f; //good practice in numerical method } //allocate device vectors float *d_A = NULL; err = hipMalloc( (void **)&d_A, size ); //err to check for error float *d_B = NULL; err = hipMalloc( (void **)&d_B, size ); //err to check for error float *d_C = NULL; err = hipMalloc( (void **)&d_C, size ); //err to check for error //No error check; could be fatal but let skip it for a while //Copy host vectors h_A, h_B to device vectors d_A, d_B printf("Copy input data from host memory to the CUDA device.\n"); err = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice); err = hipMemcpy(d_B, h_B, size, hipMemcpyHostToDevice); //Launch the vector addition kernel int threadsperblock = 256; int blockspergrid = (N + threadsperblock-1) / threadsperblock; printf("Launching CUDA kernel with %d blocks and %d threads.\n", blockspergrid, threadsperblock); hipLaunchKernelGGL(( vadd), dim3(blockspergrid), dim3(threadsperblock) , 0, 0, d_A, d_B, d_C, N); //the kernel call err = hipGetLastError(); //minimal checking; at least need to know kernel success. if(err != hipSuccess) { printf("Failed to launch vadd kernel, error code %s.\n", hipGetErrorString(err) ); exit(EXIT_FAILURE); } //Copy device result vector to host result vector in host memory printf("Copying output data from CUDA device to host memory.\n"); err = hipMemcpy(h_C, d_C, size, hipMemcpyDeviceToHost); if(err != hipSuccess) { printf("Failed to copy data from device, error %s.\n", hipGetErrorString(err) ); exit(EXIT_FAILURE); } //verify result for(int i=0; i<N; ++i){ if( fabs(h_A[i] + h_B[i] - h_C[i] ) > 1.0e-5 ){ printf("Result verification failed at element %d \n", i); exit(EXIT_FAILURE); } } printf("Kernel success!\n"); //Free device global memory err = hipFree(d_A); err = hipFree(d_B); err = hipFree(d_C); //Free host memory free(h_A); free(h_B); free(h_C); return 0; }
17ae3552ff6f79c9019fccbbaa878219e0cd78bc.cu
#include<stdio.h> #include<cuda_runtime.h> /* This is a simple vector addtion program. C = A + B */ /* CUDA Kernel Device Code starts with __global__ keyword */ __global__ void vadd( float *A, float *B, float *C, int N) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < N){ C[i] = A[i] + B[i]; } } /*Host function*/ int main() { //call for error code to check CUDA calls cudaError_t err = cudaSuccess; //vector length int N = 50000; size_t size = N * sizeof(float); printf("Vector addition of %d elements.\n", N); //Allocate memory for host float *h_A = (float *)malloc(size); float *h_B = (float *)malloc(size); float *h_C = (float *)malloc(size); //Error check //Initialized value for host vectors for(int i=0; i< N; ++i){ h_A[i] = rand() / (float)RAND_MAX; h_B[i] = rand() / (float)RAND_MAX; h_C[i] = 0.0f; //good practice in numerical method } //allocate device vectors float *d_A = NULL; err = cudaMalloc( (void **)&d_A, size ); //err to check for error float *d_B = NULL; err = cudaMalloc( (void **)&d_B, size ); //err to check for error float *d_C = NULL; err = cudaMalloc( (void **)&d_C, size ); //err to check for error //No error check; could be fatal but let skip it for a while //Copy host vectors h_A, h_B to device vectors d_A, d_B printf("Copy input data from host memory to the CUDA device.\n"); err = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice); err = cudaMemcpy(d_B, h_B, size, cudaMemcpyHostToDevice); //Launch the vector addition kernel int threadsperblock = 256; int blockspergrid = (N + threadsperblock-1) / threadsperblock; printf("Launching CUDA kernel with %d blocks and %d threads.\n", blockspergrid, threadsperblock); vadd<<<blockspergrid, threadsperblock >>>(d_A, d_B, d_C, N); //the kernel call err = cudaGetLastError(); //minimal checking; at least need to know kernel success. if(err != cudaSuccess) { printf("Failed to launch vadd kernel, error code %s.\n", cudaGetErrorString(err) ); exit(EXIT_FAILURE); } //Copy device result vector to host result vector in host memory printf("Copying output data from CUDA device to host memory.\n"); err = cudaMemcpy(h_C, d_C, size, cudaMemcpyDeviceToHost); if(err != cudaSuccess) { printf("Failed to copy data from device, error %s.\n", cudaGetErrorString(err) ); exit(EXIT_FAILURE); } //verify result for(int i=0; i<N; ++i){ if( fabs(h_A[i] + h_B[i] - h_C[i] ) > 1.0e-5 ){ printf("Result verification failed at element %d \n", i); exit(EXIT_FAILURE); } } printf("Kernel success!\n"); //Free device global memory err = cudaFree(d_A); err = cudaFree(d_B); err = cudaFree(d_C); //Free host memory free(h_A); free(h_B); free(h_C); return 0; }
3b7ad426ec294884cffc9039e12a9447fdc432c8.hip
// !!! This is a file automatically generated by hipify!!! /** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Guoguo Chen * * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <limits> #include <sstream> #include <utility> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/context.h" #include "k2/csrc/fsa.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" namespace k2 { // field separator within a line for a text form FSA static constexpr const char *kDelim = " \t"; // Convert a string to an integer. Abort the program on failure. static int32_t StringToInt(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); bool ok = false; char *p = nullptr; // std::strtol requires a `long` type long n = std::strtol(s.c_str(), &p, 10); // NOLINT if (*p == '\0') ok = true; auto res = static_cast<int32_t>(n); if (n != res) ok = false; // out of range K2_CHECK(ok) << "Failed to convert " << s << " to an integer"; return res; } // Convert a string to a float. Abort the program on failure. // TODO(guoguo): We may run into locale problems, with comma vs. period for // decimals. We have to test if the C code will behave the same // w.r.t. locale as Python does. static float StringToFloat(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); char *p = nullptr; float f = std::strtof(s.c_str(), &p); if (*p != '\0') K2_LOG(FATAL) << "Failed to convert " << s << " to a float"; return f; } // Trim leading and trailing spaces of a string. static void TrimString(std::string *s) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(s, nullptr); auto not_space = [](int32_t c) -> bool { return std::isspace(c) == 0; }; s->erase(s->begin(), std::find_if(s->begin(), s->end(), not_space)); s->erase(std::find_if(s->rbegin(), s->rend(), not_space).base(), s->end()); } /* Split a string to a vector of strings using a set of delimiters. Example usage: @code std::string in = "1 2 3"; const char *delim = " \t"; std::vector<std::string> out; SplitStringToVector(in, delim, &out); @endcode @param [in] in The input string to be split. @param [in] delim A string of delimiters. @param [out] out It saves the split result. */ static void SplitStringToVector(const std::string &in, const char *delim, std::vector<std::string> *out) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(delim, nullptr); K2_CHECK_NE(out, nullptr); out->clear(); std::size_t start = 0; while (true) { auto pos = in.find_first_of(delim, start); if (pos == std::string::npos) break; auto sub = in.substr(start, pos - start); start = pos + 1; TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } if (start < in.size()) { auto sub = in.substr(start); TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } } /* Create an acceptor from a stream, assuming the acceptor is in the k2 format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa K2AcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 4u) { // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = StringToFloat(splits[3]); arcs.emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); // this is a final state finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 acceptor expects a line with 1 (final_state) or " "4 (src_state dest_state label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; bool error = true; Array1<Arc> array(GetCpuContext(), arcs); auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the K2 format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa K2TransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 5u) { // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = StringToFloat(splits[4]); arcs.emplace_back(src_state, dest_state, symbol, score); aux_labels_internal.push_back(aux_label); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 transducer expects a line with 1 (final_state) or " "5 (src_state dest_state label aux_label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create an acceptor from a stream, assuming the acceptor is in the OpenFST format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FSA, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, and -1 as its label. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa OpenFstAcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 3u || num_fields == 4u) { // 0 1 2 // src_state dest_state label // // or // // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = 0.0f; if (num_fields == 4u) score = -1.0f * StringToFloat(splits[3]); // Add the arc to "state_to_arcs". ++num_arcs; max_state = ::max(max_state, ::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) state_to_arcs.resize(src_state + 1); state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u || num_fields == 2u) { // 0 1 // final_state score float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = ::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST acceptor expects a line with 1 (final_state)," " 2 (final_state score), 3 (src_state dest_state label) " "or 4 (src_state dest_state label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FSA, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs". int32_t arc_index = 0; arcs.resize(num_arcs); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); bool error = true; Array1<Arc> array(GetCpuContext(), arcs); // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the OpenFST format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FST, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, -1 as its label and -1 as its aux_label. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa OpenFstTransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<std::vector<int32_t>> state_to_aux_labels; // indexed by states std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 4u || num_fields == 5u) { // 0 1 2 3 // src_state dest_state label aux_label // // or // // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = 0.0f; if (num_fields == 5u) score = -1.0f * StringToFloat(splits[4]); // Add the arc to "state_to_arcs", and aux_label to "state_to_aux_labels" ++num_arcs; max_state = ::max(max_state, ::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) { state_to_arcs.resize(src_state + 1); state_to_aux_labels.resize(src_state + 1); } state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); state_to_aux_labels[src_state].push_back(aux_label); } else if (num_fields == 1u || num_fields == 2u) { // 0 // final_state // // or // // 0 1 // final_state score // There could be multiple final states, so we first have to collect all // the final states, and then work out the super final state. float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = ::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST transducer expects a line with " "1 (final_state), 2 (final_state score), " "4 (src_state dest_state label aux_label) or " "5 (src_state dest_state label aux_label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FST, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); state_to_aux_labels.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); state_to_aux_labels[original_final_states[i]].push_back( -1); // kFinalSymbol ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs", and aux_labels from // "state_to_aux_labels" to "aux_labels_internal" int32_t arc_index = 0; arcs.resize(num_arcs); aux_labels_internal.resize(num_arcs); K2_CHECK_EQ(state_to_arcs.size(), state_to_aux_labels.size()); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { K2_CHECK_EQ(state_to_arcs[s].size(), state_to_aux_labels[s].size()); for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; aux_labels_internal[arc_index] = state_to_aux_labels[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } Fsa FsaFromString(const std::string &s, bool openfst /*= false*/, Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); std::istringstream is(s); K2_CHECK(is); if (openfst == false && aux_labels == nullptr) return K2AcceptorFromStream(is); else if (openfst == false && aux_labels != nullptr) return K2TransducerFromStream(is, aux_labels); else if (openfst == true && aux_labels == nullptr) return OpenFstAcceptorFromStream(is); else if (openfst == true && aux_labels != nullptr) return OpenFstTransducerFromStream(is, aux_labels); return Fsa(); // unreachable code } std::string FsaToString(const Fsa &fsa, bool openfst /*= false*/, const Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsa.NumAxes(), 2); if (fsa.Context()->GetDeviceType() != kCpu) { Fsa _fsa = fsa.To(GetCpuContext()); Array1<int32_t> _aux_labels; if (aux_labels) _aux_labels = aux_labels->To(_fsa.Context()); return FsaToString(_fsa, openfst, aux_labels ? &_aux_labels : nullptr); } K2_CHECK_EQ(fsa.Context()->GetDeviceType(), kCpu); const Array1<int32_t> &row_splits = fsa.shape.RowSplits(1); const Array1<Arc> &arcs = fsa.values; const int32_t *p = nullptr; if (aux_labels != nullptr) { K2_CHECK(IsCompatible(fsa, *aux_labels)); K2_CHECK_EQ(aux_labels->Dim(), arcs.Dim()); p = aux_labels->Data(); } float scale = 1; if (openfst) scale = -1; std::ostringstream os; int32_t n = arcs.Dim(); char sep = ' '; char line_sep = '\n'; for (int32_t i = 0; i != n; ++i) { const auto &arc = arcs[i]; os << arc.src_state << sep << arc.dest_state << sep << arc.label << sep; if (p != nullptr) os << p[i] << sep; os << (scale * arc.score) << line_sep; } os << (fsa.shape.Dim0() - 1) << line_sep; return os.str(); } Array1<int32_t> GetDestStates(FsaVec &fsas, bool as_idx01) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_arcs = fsas.NumElements(); Array1<int32_t> ans(c, num_arcs); const Arc *arcs_data = fsas.values.Data(); int32_t *ans_data = ans.Data(); if (!as_idx01) { K2_EVAL( c, num_arcs, lambda_set_dest_states1, (int32_t arc_idx012) { ans_data[arc_idx012] = arcs_data[arc_idx012].dest_state; }); } else { const int32_t *row_ids2_data = fsas.RowIds(2).Data(); K2_EVAL( c, num_arcs, lambda_set_dest_states01, (int32_t arc_idx012) { int32_t src_state = arcs_data[arc_idx012].src_state, dest_state = arcs_data[arc_idx012].dest_state; // (row_ids2[arc_idx012] - src_state) is the same as // row_splits1[row_ids1[row_ids2[arc_idx012]]]; it's the idx01 of the // 1st state in this FSA. ans_data[arc_idx012] = dest_state + (row_ids2_data[arc_idx012] - src_state); }); } return ans; } Ragged<int32_t> GetStateBatches(FsaVec &fsas, bool transpose) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); Array1<int32_t> arc_dest_states = GetDestStates(fsas, true); MonotonicLowerBound(arc_dest_states, &arc_dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); // We can tune `log_power` as a tradeoff between work done and clock time on // GPU. int32_t log_power = (c->GetDeviceType() == kCpu ? 0 : 4); int32_t max_num_states = fsas.shape.MaxSize(1); // the following avoids doing too much extra work accumulating powers // of 'dest_states' for very small problem sizes. while (log_power > 0 && (1 << (1 + log_power)) > max_num_states) log_power--; // Ignoring edge effects: `dest_states_powers[0]` is just an array indexed by // state_idx01, that gives us the dest_state_idx01 that would be the beginning // of the next batch if state_idx01 were the beginning of the current batch. // So if we follow this chain forward from the start of one of the FSAs until // it passes the end of this FSA, we get the beginnings of the batches // we want. The natural algorithm to find the beginnings of the batches // is sequential. Array2<int32_t> dest_states_powers(c, log_power + 1, num_states); const int32_t *arc_dest_states_data = arc_dest_states.Data(), *fsas_row_splits2_data = fsas.RowSplits(2).Data(); int32_t *dest_states_power_data = dest_states_powers.Data(); // only process Row[0] below const int32_t int_max = std::numeric_limits<int32_t>::max(); K2_EVAL( c, num_states, lambda_set_dest_states, (int32_t state_idx01)->void { int32_t arc_idx01x = fsas_row_splits2_data[state_idx01]; // If this state has arcs, let its `dest_state` be the smallest // `dest_state` of any of its arcs (which is the first element of those // arcs' dest states in `arc_dest_states_data`); otherwise, take the // `dest_state` from the 1st arc of the next state, which is the largest // value we can take (if the definition is: the highest-numbered state s // for which neither this state nor any later-numbered state has an arc // to a state lower than s). // if this state has arcs, // arc_idx01x is the first arc index of this state, we get the // smallest dest state of this state's arcs using // arc_dest_states_data[arc_idx01x] // else // arc_idx01x is the first arc index of the next state, then // arc_dest_states_data[arc_idx01x] is the largest value we can take, // which is also the smallest dest state in the next state. int32_t dest_state = (arc_idx01x < num_arcs ? arc_dest_states_data[arc_idx01x] : int_max); dest_states_power_data[state_idx01] = dest_state; // if the following fails, it's either a code error or the input FSA had // cycles. K2_CHECK_GT(dest_state, state_idx01); }); // `num_batches_per_fsa` will be set to the number of batches of states that // we'll use for each FSA... it corresponds to the number of times we have // to follow links forward in the dest_states array till we pass the // end of the array for this fSA. Array1<int32_t> num_batches_per_fsa(c, num_fsas + 1, 0); // `batch_starts` will contain the locations of the first state_idx01 for each // batch, but in an 'un-consolidated' format. Specifically, for FSA with // index i, the batch_starts for that FSA begin at element fsa.RowSplits(1)[i] // of `batch_starts`. This is just a convenient layout because we know there // can't be more batches than there are states. We'll later consolidate the // information into a single array. Array1<int32_t> batch_starts(c, num_states + 1); int32_t *num_batches_per_fsa_data = num_batches_per_fsa.Data(), *batch_starts_data = batch_starts.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(); #if 0 // This is a simple version of the kernel that demonstrates what we're trying // to do with the more complex code. K2_EVAL( c, num_fsas, lambda_set_batch_info_simple, (int32_t fsa_idx) { int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t i = 0, cur_state_idx01 = begin_state_idx01; while (cur_state_idx01 < end_state_idx01) { batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; cur_state_idx01 = dest_states_power_data[cur_state_idx01]; ++i; } num_batches_per_fsa_data[fsa_idx] = i; }); #else int32_t stride = dest_states_powers.ElemStride0(); for (int32_t power = 1; power <= log_power; power++) { const int32_t *src_data = dest_states_powers.Data() + (power - 1) * stride; int32_t *dest_data = dest_states_powers.Data() + power * stride; K2_EVAL( c, num_states, lambda_square_array, (int32_t state_idx01)->void { int32_t dest_state = src_data[state_idx01], dest_state_sq = (dest_state < num_states ? src_data[dest_state] : int_max); dest_data[state_idx01] = dest_state_sq; }); } // jobs_per_fsa tells us how many separate chains of states we'll follow for // each FSA. // jobs_multiple is a kind of trick to ensure any given warp doesn't // issue more memory requests than it can handle at a time (we drop // some threads). int32_t jobs_per_fsa = (1 << log_power), jobs_multiple = (c->GetDeviceType() == kCuda ? 8 : 1); while (jobs_multiple > 1 && jobs_per_fsa * jobs_multiple * num_fsas > 10000) jobs_multiple /= 2; // Likely won't get here. Just reduce multiple if // num-jobs is ridiculous. auto dest_states_powers_acc = dest_states_powers.Accessor(); K2_EVAL2( c, num_fsas, jobs_per_fsa * jobs_multiple, lambda_set_batch_info, (int32_t fsa_idx, int32_t j) { if (j % jobs_multiple != 0) return; // a trick to avoid too much random // memory access for any given warp int32_t task_idx = j / jobs_multiple; // Now 0 <= task_idx < jobs_per_fsa. // The task indexed `task_idx` is responsible for batches numbered // task_idx, task_idx + jobs_per_fsa, task_index + 2 * job_per_fsa and // so on, for the FSA numbered `fsa_idx`. Comparing this code to // `lambda_set_batch_info_simple`, this task is responsible for the // assignment to batch_starts_data for all i such that i % jobs_per_fsas // == task_idx, together with the assignment to // num_batchess_per_fsa_data if // i % jobs_per_fsas == task_idx (here referring to the i value finally // assigned to that location). int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t num_states_this_fsa = end_state_idx01 - begin_state_idx01; int32_t i = 0, cur_state_idx01 = begin_state_idx01; if (task_idx >= num_states_this_fsa) return; // The next loop advances `cur_state_idx01` by // a number of steps equal to `task_idx`. for (int32_t m = 0; m < log_power; ++m) { int32_t n = 1 << m; if ((task_idx & n) != 0) { i += n; int32_t next = dest_states_powers_acc(m, cur_state_idx01); if (next >= end_state_idx01) return; cur_state_idx01 = next; } } K2_CHECK_EQ(i, task_idx); while (1) { if (i >= num_states_this_fsa) return; batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; int32_t next_state_idx01 = dest_states_powers_acc( log_power, cur_state_idx01); // advance jobs_per_fsa = (1 << log_power) // steps if (next_state_idx01 >= end_state_idx01) { // if exactly one step would also be enough to take us past the // boundary... if (dest_states_powers_acc(0, cur_state_idx01) >= end_state_idx01) { num_batches_per_fsa_data[fsa_idx] = i + 1; } return; } else { i += jobs_per_fsa; cur_state_idx01 = next_state_idx01; } } }); #endif ExclusiveSum(num_batches_per_fsa, &num_batches_per_fsa); Array1<int32_t> &ans_row_splits1 = num_batches_per_fsa; int32_t num_batches = num_batches_per_fsa[num_fsas]; Array1<int32_t> ans_row_ids1(c, num_batches); RowSplitsToRowIds(ans_row_splits1, &ans_row_ids1); Array1<int32_t> ans_row_splits2(c, num_batches + 1); const int32_t *ans_row_splits1_data = ans_row_splits1.Data(), *ans_row_ids1_data = ans_row_ids1.Data(); int32_t *ans_row_splits2_data = ans_row_splits2.Data(); ans_row_splits2.Range(num_batches, 1) = num_states; // The kernel below won't // set this last element K2_EVAL( c, num_batches, lambda_set_ans_row_splits2, (int32_t idx01)->void { int32_t idx0 = ans_row_ids1_data[idx01], // Fsa index idx0x = ans_row_splits1_data[idx0], idx1 = idx01 - idx0x, fsas_idx0x = fsas_row_splits1_data[idx0]; // 1st state-idx (idx01) // in fsas_, for this FSA int32_t fsas_idx01 = fsas_idx0x + idx1; // the idx1 is actually the // batch-index, this statement // reflects the 'un-consolidated' // format of `batch_starts`. int32_t this_batch_start = batch_starts_data[fsas_idx01]; ans_row_splits2_data[idx01] = this_batch_start; }); RaggedShape ans_shape = RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_batches, &ans_row_splits2, nullptr, num_states); Array1<int32_t> ans_value = Range(c, num_states, 0); if (transpose) { ans_shape = MakeTransposable(ans_shape); Ragged<int32_t> ans(ans_shape, ans_value); return Transpose(ans); } else { return Ragged<int32_t>(ans_shape, ans_value); } } Ragged<int32_t> GetIncomingArcs(FsaVec &fsas, const Array1<int32_t> &dest_states) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK(IsCompatible(fsas, dest_states)); ContextPtr &c = fsas.Context(); Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); Array1<int32_t> incoming_arcs_order = GetTransposeReordering(dest_states_tensor, num_states), ans_row_ids2 = dest_states[incoming_arcs_order]; // Note: incoming_arcs_row_ids2 will be monotonically increasing Array1<int32_t> ans_row_splits2(c, num_states + 1); RowIdsToRowSplits(ans_row_ids2, &ans_row_splits2); // Axis 1 corresponds to FSA states, so the row-ids and row-splits for axis // 1 are the same as for `fsas`. Array1<int32_t> ans_row_ids1 = fsas.RowIds(1), ans_row_splits1 = fsas.RowSplits(1); return Ragged<int32_t>( RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_states, &ans_row_splits2, &ans_row_ids2, num_arcs), incoming_arcs_order); } Ragged<int32_t> GetLeavingArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *fsa_states_row_splits_data = fsas.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); K2_EVAL( c, num_states, lambda_set_ans_row_splits3, (int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = fsa_states_row_splits_data[state_idx + 1] - fsa_states_row_splits_data[state_idx]; }); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); K2_EVAL( c, num_arcs, lambda_set_ans_values, (int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in fsas int32_t fsa_idx01x = fsa_states_row_splits_data[state_idx]; // ans_idx3 is fsas_idx2, i.e. the arc idx in a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; ans_values_data[idx0123] = fsa_idx01x + ans_idx3; }); return Ragged<int32_t>(ans_shape, ans_values); } Ragged<int32_t> GetEnteringArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, incoming_arcs)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(incoming_arcs.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(incoming_arcs.Dim0(), num_fsas); K2_DCHECK_EQ(incoming_arcs.TotSize(1), num_states); K2_DCHECK_EQ(incoming_arcs.NumElements(), num_arcs); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *incoming_arcs_row_splits_data = incoming_arcs.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); K2_EVAL( c, num_states, lambda_set_ans_row_splits3, (int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = incoming_arcs_row_splits_data[state_idx + 1] - incoming_arcs_row_splits_data[state_idx]; }); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); const int32_t *incoming_arcs_data = incoming_arcs.values.Data(); K2_EVAL( c, num_arcs, lambda_set_ans_values, (int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in // incoming_arcs int32_t incoming_arcs_idx01x = incoming_arcs_row_splits_data[state_idx]; // ans_idx3 is incoming_arcs_idx2, i.e. the entering arc idx for a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; int32_t incoming_arcs_idx012 = incoming_arcs_idx01x + ans_idx3; ans_values_data[idx0123] = incoming_arcs_data[incoming_arcs_idx012]; }); return Ragged<int32_t>(ans_shape, ans_values); } FsaVec ConvertDenseToFsaVec(DenseFsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.shape.Context(); // caution: 'num_symbols' is the number of symbols excluding the final-symbol // -1. int32_t num_fsas = src.shape.Dim0(), num_symbols = src.scores.Dim1() - 1; // the "1" is the extra state per FSA we need in the FsaVec format, // for the final-state. RaggedShape fsa2state = ChangeSublistSize(src.shape, 1); // again, the "+num_fsas" below is the extra state per FSA we need in the // FsaVec format, for the final-state. int32_t num_states = src.shape.NumElements() + num_fsas; // The explanation num-arcs below is as follows: // Firstly, all rows of src.scores (==all elements of src.shape) correspond // to states with arcs leaving them. Most of them have `num_symbols` arcs, // but the final one for each FSA has 1 arc (with symbol -1) int32_t num_arcs = src.shape.NumElements() * num_symbols - (num_symbols - 1) * num_fsas; Array1<int32_t> row_splits2(c, num_states + 1), row_ids2(c, num_arcs); const int32_t *row_ids1_data = fsa2state.RowIds(1).Data(), *src_row_ids1_data = src.shape.RowIds(1).Data(), *src_row_splits1_data = src.shape.RowSplits(1).Data(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); auto scores_acc = src.scores.Accessor(); int32_t *row_splits2_data = row_splits2.Data(), *row_ids2_data = row_ids2.Data(); // 0 <= s < num_symbols; note, `num_symbols` excludes the final-symbol (-1). // note: `src` means: w.r.t. the numbering in the original DenseFsaVec. K2_EVAL2( c, src.shape.NumElements(), num_symbols, lambda_set_arcs_etc, (int32_t src_state_idx01, int32_t s)->void { int32_t fsa_idx0 = src_row_ids1_data[src_state_idx01], src_state_idx0x = src_row_splits1_data[fsa_idx0], state_idx1 = src_state_idx01 - src_state_idx0x, src_next_state_idx0x = src_row_splits1_data[fsa_idx0 + 1], src_num_states1 = src_next_state_idx0x - src_state_idx0x, ans_state_idx01 = src_state_idx01 + fsa_idx0; // we add one final-state per FSA.. // "+ fsa_idx0" gives the // difference from old->new // numbering. // arc_idx0xx is the 1st arc-index of the FSA we are creating.. each // source state has `num_symbols` arcs leaving it except the last one of // each FSA, which has 1 arc leaving it (to the final-state). int32_t arc_idx0xx = (src_state_idx0x * num_symbols) - fsa_idx0 * (num_symbols - 1), arc_idx01x = arc_idx0xx + (state_idx1 * num_symbols), arc_idx012 = arc_idx01x + s; int32_t symbol_offset; if (state_idx1 + 1 == src_num_states1) { symbol_offset = -1; if (s > 0) return; // we just need the arc with -1. // if this is the state before the final state of this FSA. it has the // responsibility to write the row_splits2 value for the final state. // It's arc_idx012 + 1; the "+1" corresponds to the single arc with // the final-symbol on it. row_splits2_data[ans_state_idx01 + 1] = arc_idx012 + 1; } else { symbol_offset = 0; } // the "+ 1" is because index 0 in `scores` is for the final-symbol -1, // then 0, 1, etc. int32_t symbol_index_in_scores = s + symbol_offset + 1; arcs_data[arc_idx012] = Arc(state_idx1, state_idx1 + 1, s + symbol_offset, scores_acc(src_state_idx01, symbol_index_in_scores)); row_ids2_data[arc_idx012] = ans_state_idx01; if (s == 0) { // 1st arc for this state. row_splits2_data[ans_state_idx01] = arc_idx012; K2_CHECK(row_ids1_data[ans_state_idx01] == fsa_idx0); if (src_state_idx01 == 0) row_splits2_data[num_states] = num_arcs; } }); RaggedShape state2arc = RaggedShape2(&row_splits2, &row_ids2, num_arcs); return Ragged<Arc>(ComposeRaggedShapes(fsa2state, state2arc), arcs); } template <typename FloatType> Array1<FloatType> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs) { NVTX_RANGE(K2_FUNC); K2_STATIC_ASSERT((std::is_same<float, FloatType>::value || std::is_same<double, FloatType>::value)); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, entering_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(entering_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK_EQ(state_batches.TotSize(1), num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(entering_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(entering_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(entering_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(entering_arc_batches.NumElements(), num_arcs); const int32_t *fsas_row_ids1_data = fsas.RowIds(1).Data(), *fsas_row_splits1_data = fsas.RowSplits(1).Data(), *fsas_row_ids2_data = fsas.RowIds(2).Data(); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); // set the score of start state in each fsa to be 0 const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); K2_EVAL( c, num_fsas, lambda_set_start_state_score, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state] = 0; }); const Arc *arcs = fsas.values.Data(); int32_t *entering_arcs_data = nullptr; if (entering_arcs) { K2_CHECK_EQ(log_semiring, false) << " entering_arcs supplied"; *entering_arcs = Array1<int32_t>(c, num_states, -1); entering_arcs_data = entering_arcs->Data(); } RaggedAxis0Splitter<int32_t> arc_batches_splitter(entering_arc_batches); // process batch sequentially. for (int32_t i = 0; i < num_batches; ++i) { // entering_arc_batch is indexed [fsa][state_list][arc_list] int32_t arc_begin; Ragged<int32_t> entering_arc_batch = arc_batches_splitter.GetElement(i, &arc_begin); const int32_t *entering_arc_batch_data = entering_arc_batch.values.Data(); int32_t state_begin = arc_batches_splitter.GetOffset(i, 2), state_end = arc_batches_splitter.GetOffset(i + 1, 2), num_states_this_batch = state_end - state_begin, num_arcs_this_batch = entering_arc_batch.NumElements(); Array1<int32_t> states_batch = state_batches.values.Arange(state_begin, state_end); const int32_t *states_batch_data = states_batch.Data(); Ragged<FloatType> entering_arc_batch_scores(entering_arc_batch.shape); FloatType *entering_arc_batch_scores_data = entering_arc_batch_scores.values.Data(); // get entering arc scores K2_EVAL( c, num_arcs_this_batch, lambda_set_entering_arc_score, (int32_t idx012)->void { // `idx012` is into the batch. int32_t fsas_arc_idx012 = entering_arc_batch_data[idx012]; float curr_arc_score = arcs[fsas_arc_idx012].score; int32_t src_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012]; entering_arc_batch_scores_data[idx012] = state_scores_data[src_state_idx01] + curr_arc_score; }); Array1<FloatType> state_batch_scores(c, num_states_this_batch); FloatType *state_batch_scores_data = state_batch_scores.Data(); // get scores per state in this batch if (log_semiring) { LogSumPerSublist(entering_arc_batch_scores, negative_infinity, &state_batch_scores); } else { if (entering_arcs_data == nullptr) { MaxPerSublist(entering_arc_batch_scores, negative_infinity, &state_batch_scores); } else { // entering_arc_idxs will contain indexes into // `entering_arc_batch_scores`, equiv. to indexes into // `entering_arc_batch`. Array1<int32_t> entering_arc_idxs(c, num_states_this_batch); ArgMaxPerSublist(entering_arc_batch_scores, negative_infinity, &entering_arc_idxs); const int32_t *entering_arc_idxs_data = entering_arc_idxs.Data(), *entering_arc_batch_data = entering_arc_batch.values.Data(); // arc_idx01 below is an index into sub_scores, it is also an arc_idx123 // into entering_arc_batches. K2_EVAL( c, num_states_this_batch, lambda_set_entering_arcs_etc, (int32_t state_idx) { // state_idx is into state_batch_scores_data // and entering_arc_idxs. // arc_idx is into entering_arc_batch_data. int32_t arc_idx = entering_arc_idxs_data[state_idx]; FloatType score; int32_t fsas_arc_idx012; if (arc_idx == -1) { score = negative_infinity; fsas_arc_idx012 = -1; } else { fsas_arc_idx012 = entering_arc_batch_data[arc_idx]; score = entering_arc_batch_scores_data[arc_idx]; } // we'll later ignore this score if it was the start state. state_batch_scores_data[state_idx] = score; int32_t fsas_state_idx01 = states_batch_data[state_idx]; entering_arcs_data[fsas_state_idx01] = fsas_arc_idx012; }); } } // Copy those scores to the corresponding state in state_scores. // `state_idx` is an index into `states_batch_data.values`. K2_EVAL( c, num_states_this_batch, lambda_copy_state_scores, (int32_t state_idx) { int32_t fsas_state_idx01 = states_batch_data[state_idx]; FloatType score = state_batch_scores_data[state_idx]; // The if-statement below is to prevent it overriding the zero score // for the start-states. We only bother checking whether it's a start // state if the score is -infinity, to save memory bandwidth. (It // would always be -infinity for start states because they have no // entering arcs; these FSAs are acyclic. if (score != negative_infinity || fsas_state_idx01 != fsas_row_splits1_data[fsas_row_ids1_data[fsas_state_idx01]]) { state_scores_data[fsas_state_idx01] = score; } }); } return state_scores; } template <typename FloatType> void BackpropGetArcPost(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, const Array1<FloatType> &arc_post_deriv, Array1<FloatType> *forward_scores_deriv, Array1<FloatType> *backward_scores_deriv) { NVTX_RANGE(K2_FUNC); K2_STATIC_ASSERT((std::is_same<float, FloatType>::value || std::is_same<double, FloatType>::value)); K2_CHECK(forward_scores_deriv != nullptr && backward_scores_deriv != nullptr); ContextPtr c = GetContext(fsas, incoming_arcs, arc_post_deriv); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(incoming_arcs.NumAxes(), 3); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); K2_CHECK_EQ(arc_post_deriv.Dim(), num_arcs); K2_DCHECK_EQ(incoming_arcs.Dim0(), num_fsas); K2_DCHECK_EQ(incoming_arcs.TotSize(1), num_states); K2_DCHECK_EQ(incoming_arcs.TotSize(2), num_arcs); *forward_scores_deriv = Array1<FloatType>(c, num_states); *backward_scores_deriv = Array1<FloatType>(c, num_states); // compute forward_scores_deriv Ragged<FloatType> ragged_forward_scores_deriv(fsas.shape, arc_post_deriv); SumPerSublist<FloatType>(ragged_forward_scores_deriv, FloatType(0), forward_scores_deriv); // compute backward_scores_deriv Array1<FloatType> incoming_arc_post_deriv = arc_post_deriv[incoming_arcs.values]; Ragged<FloatType> ragged_backward_scores_deriv(incoming_arcs.shape, incoming_arc_post_deriv); SumPerSublist<FloatType>(ragged_backward_scores_deriv, FloatType(0), backward_scores_deriv); // set the forward_scores_deriv for the final state and backward_scores_deriv // for the start state. Ragged<FloatType> arc_post_deriv_per_fsa = ragged_forward_scores_deriv.RemoveAxis(1); Array1<FloatType> tot_arc_post_deriv(c, num_fsas); SumPerSublist<FloatType>(arc_post_deriv_per_fsa, FloatType(0), &tot_arc_post_deriv); FloatType *tot_arc_post_deriv_data = tot_arc_post_deriv.Data(), *forward_scores_deriv_data = forward_scores_deriv->Data(), *backward_scores_deriv_data = backward_scores_deriv->Data(); const int32_t *fsa_row_splits1_data = fsas.RowSplits(1).Data(); K2_EVAL( c, num_fsas, lambda_set_deriv_for_start_and_final_state, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1_data[fsa_idx], start_state_next_fsa = fsa_row_splits1_data[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) { FloatType deriv = FloatType(-0.5) * tot_arc_post_deriv_data[fsa_idx]; forward_scores_deriv_data[start_state_next_fsa - 1] = deriv; backward_scores_deriv_data[start_state] = deriv; } }); } template void BackpropGetArcPost(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, const Array1<float> &arc_post_deriv, Array1<float> *forward_scores_deriv, Array1<float> *backward_scores_deriv); template void BackpropGetArcPost(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, const Array1<double> &arc_post_deriv, Array1<double> *forward_scores_deriv, Array1<double> *backward_scores_deriv); template <typename FloatType> Array1<FloatType> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring /*= true*/) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, leaving_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(leaving_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(leaving_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(leaving_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(leaving_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(leaving_arc_batches.NumElements(), num_arcs); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(), *fsas_row_ids1_data = fsas.RowIds(1).Data(), *fsas_row_ids2_data = fsas.RowIds(2).Data(); // set the score of final state in each fsa to be 0 K2_EVAL( c, num_fsas, lambda_set_final_state_score, (int32_t fsa_idx) { int32_t start_state = fsas_row_splits1_data[fsa_idx], start_state_next_fsa = fsas_row_splits1_data[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state_next_fsa - 1] = 0; }); RaggedAxis0Splitter<int32_t> arc_batches_splitter(leaving_arc_batches); const Arc *arcs = fsas.values.Data(); // process batch sequentially. for (int32_t i = num_batches - 1; i >= 0; --i) { int32_t arc_begin; Ragged<int32_t> this_arc_batch = arc_batches_splitter.GetElement(i, &arc_begin); int32_t state_begin = arc_batches_splitter.GetOffset(i, 2), state_end = arc_batches_splitter.GetOffset(i + 1, 2), num_states_this_batch = state_end - state_begin, num_arcs_this_batch = this_arc_batch.NumElements(); Ragged<FloatType> this_arc_batch_scores(this_arc_batch.shape); const int32_t *this_arc_batch_data = this_arc_batch.values.Data(); FloatType *this_arc_batch_scores_data = this_arc_batch_scores.values.Data(); // Get arc backward scores at the beginning of arcs in this batch K2_EVAL( c, num_arcs_this_batch, lambda_set_leaving_arc_score, (int32_t arc_idx) { int32_t fsa_arc_idx012 = this_arc_batch_data[arc_idx]; float curr_arc_score = arcs[fsa_arc_idx012].score; int32_t dest_state_idx1 = arcs[fsa_arc_idx012].dest_state, src_state_idx1 = arcs[fsa_arc_idx012].src_state, src_state_idx01 = fsas_row_ids2_data[fsa_arc_idx012], idx0x = src_state_idx01 - src_state_idx1, dest_state_idx01 = idx0x + dest_state_idx1; this_arc_batch_scores_data[arc_idx] = state_scores_data[dest_state_idx01] + curr_arc_score; }); Array1<FloatType> this_batch_state_scores(c, num_states_this_batch); // get scores per state in this batch if (log_semiring) { LogSumPerSublist(this_arc_batch_scores, negative_infinity, &this_batch_state_scores); } else { MaxPerSublist(this_arc_batch_scores, negative_infinity, &this_batch_state_scores); } Array1<int32_t> this_batch_state_ids = state_batches.values.Arange(state_begin, state_end); const int32_t *this_batch_state_ids_data = this_batch_state_ids.Data(); const FloatType *this_batch_state_scores_data = this_batch_state_scores.Data(); // copy those scores to the corresponding states in state_scores (they are // in a different order). K2_EVAL( c, num_states_this_batch, lambda_copy_state_scores, (int32_t state_idx) { int32_t fsas_state_idx01 = this_batch_state_ids_data[state_idx]; FloatType score = this_batch_state_scores_data[state_idx]; if (score != negative_infinity || fsas_state_idx01 + 1 != fsas_row_splits1_data[fsas_row_ids1_data[fsas_state_idx01] + 1]) { // The if-block is to ensure we don't overwrite the final-states' // backward-probs (0) with -infinity. We check the score first to // avoid unnecessary memory traffic. state_scores_data[fsas_state_idx01] = score; } }); } return state_scores; } template <typename FloatType> Array1<FloatType> BackpropGetBackwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, const Array1<FloatType> &backward_scores, const Array1<FloatType> &backward_scores_deriv_in) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(fsas, state_batches, entering_arc_batches, backward_scores, backward_scores_deriv_in); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(entering_arc_batches.NumAxes(), 4); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = entering_arc_batches.Dim0(); K2_DCHECK_EQ(state_batches.TotSize(1), num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(entering_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(entering_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(entering_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(entering_arc_batches.NumElements(), num_arcs); K2_DCHECK_EQ(backward_scores.Dim(), num_states); K2_DCHECK_EQ(backward_scores_deriv_in.Dim(), num_states); // We will be adding to the elements of `backward_scores_deriv`. // `backward_scores_deriv_in` was just the derivative w.r.t. the output of // GetBackwardScores(), but because GetBackwardScores() is recursive, // the derivatives for earlier states contribute to those of later ones. Array1<FloatType> backward_scores_deriv(backward_scores_deriv_in.Clone()); FloatType *backward_scores_deriv_data = backward_scores_deriv.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(), *fsas_row_ids1_data = fsas.RowIds(1).Data(), *fsas_row_ids2_data = fsas.RowIds(2).Data(); const FloatType *backward_scores_data = backward_scores.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> arc_scores_deriv(c, num_arcs); // will return this. FloatType *arc_scores_deriv_data = arc_scores_deriv.Data(); RaggedAxis0Splitter<int32_t> arc_batches_splitter(entering_arc_batches); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); if (log_semiring) { // For each batch of states, from start to end (opposite direction to // GetBackwardScores())... for (int32_t b = 0; b < num_batches; ++b) { int32_t arc_begin; Ragged<int32_t> entering_arc_batch = arc_batches_splitter.GetElement(b, &arc_begin); const int32_t *entering_arc_batch_data = entering_arc_batch.values.Data(); Ragged<FloatType> entering_arc_deriv(entering_arc_batch.shape); FloatType *entering_arc_deriv_data = entering_arc_deriv.values.Data(); K2_EVAL( c, entering_arc_batch.NumElements(), lambda_set_arc_deriv_etc, (int32_t arc_idx) { int32_t fsas_arc_idx012 = entering_arc_batch_data[arc_idx]; const Arc &arc = arcs[fsas_arc_idx012]; int32_t dest_state_idx1 = arc.dest_state, src_state_idx1 = arc.src_state, src_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012], state_idx0x = src_state_idx01 - src_state_idx1, dest_state_idx01 = state_idx0x + dest_state_idx1; FloatType dest_score = backward_scores_data[dest_state_idx01], arc_begin_score = dest_score + arc.score, src_score = backward_scores_data[src_state_idx01]; // so that arc_begin_score - src_score will never be nan if (src_score == negative_infinity) src_score = -negative_infinity; // alpha = d(src_score) / d(arc_begin_score) FloatType alpha = exp(arc_begin_score - src_score), arc_deriv = alpha * backward_scores_deriv_data[src_state_idx01]; K2_CHECK_LT(alpha, 1.1); arc_scores_deriv_data[fsas_arc_idx012] = arc_deriv; entering_arc_deriv_data[arc_idx] = arc_deriv; }); int32_t state_begin = arc_batches_splitter.GetOffset(b, 2), state_end = arc_batches_splitter.GetOffset(b + 1, 2), this_num_states = state_end - state_begin; // `state_score_derivs` is the extra part contributed to // `backward_scores_deriv` by the recursion, for the batch of states we're // currently processing. Array1<FloatType> state_score_derivs(c, this_num_states); SumPerSublist<FloatType>(entering_arc_deriv, 0, &state_score_derivs); const FloatType *state_score_derivs_data = state_score_derivs.Data(); const int32_t *state_ids_batch_data = state_batches.values.Data() + state_begin; K2_EVAL( c, this_num_states, lambda_modify_state_score_derivs, (int32_t state_idx) { int32_t fsas_state_idx01 = state_ids_batch_data[state_idx]; FloatType state_score_extra_deriv = state_score_derivs_data[state_idx]; backward_scores_deriv_data[fsas_state_idx01] += state_score_extra_deriv; }); } } else { // in a single kernel, figure out the contribution of each arc to its // source-state's backward prob by seeing which outgoing arc contributes the // max loglike; this uses the shape of the fsas. Note, it's arbitrary in // case of ties, we pick one. Ragged<FloatType> arc_begin_scores(fsas.shape); FloatType *arc_begin_scores_data = arc_begin_scores.values.Data(); K2_EVAL( c, num_arcs, lambda_set_arc_begin_scores, (int32_t arc_idx012) { const Arc &arc = arcs[arc_idx012]; int32_t dest_state_idx1 = arc.dest_state, src_state_idx1 = arc.src_state, src_state_idx01 = fsas_row_ids2_data[arc_idx012], state_idx0x = src_state_idx01 - src_state_idx1, dest_state_idx01 = state_idx0x + dest_state_idx1; FloatType dest_score = backward_scores_data[dest_state_idx01], arc_begin_score = dest_score + arc.score; arc_begin_scores_data[arc_idx012] = arc_begin_score; }); Array1<int32_t> best_leaving_arc_idx(c, num_states); ArgMaxPerSublist(arc_begin_scores, negative_infinity, &best_leaving_arc_idx); const int32_t *best_leaving_arc_idx_data = best_leaving_arc_idx.Data(); for (int32_t b = 0; b < num_batches; ++b) { int32_t arc_begin; Ragged<int32_t> entering_arc_batch = arc_batches_splitter.GetElement(b, &arc_begin); const int32_t *entering_arc_batch_data = entering_arc_batch.values.Data(); Ragged<FloatType> entering_arc_deriv(entering_arc_batch.shape); FloatType *entering_arc_deriv_data = entering_arc_deriv.values.Data(); K2_EVAL( c, entering_arc_batch.NumElements(), lambda_set_arc_deriv_etc, (int32_t arc_idx)->void { int32_t fsas_arc_idx012 = entering_arc_batch_data[arc_idx]; int32_t src_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012]; FloatType arc_deriv = FloatType(0); if (best_leaving_arc_idx_data[src_state_idx01] == fsas_arc_idx012) { arc_deriv = backward_scores_deriv_data[src_state_idx01]; } // otherwise arc_deriv is 0.0, the arc's score has no effect arc_scores_deriv_data[fsas_arc_idx012] = arc_deriv; entering_arc_deriv_data[arc_idx] = arc_deriv; }); int32_t state_begin = arc_batches_splitter.GetOffset(b, 2), state_end = arc_batches_splitter.GetOffset(b + 1, 2), this_num_states = state_end - state_begin; // `state_score_derivs` is the extra part contributed to // `backward_scores_deriv` by the recursion, for the batch of states we're // currently processing. Array1<FloatType> state_score_derivs(c, this_num_states); SumPerSublist<FloatType>(entering_arc_deriv, 0, &state_score_derivs); const FloatType *state_score_derivs_data = state_score_derivs.Data(); const int32_t *state_ids_batch_data = state_batches.values.Data() + state_begin; K2_EVAL( c, this_num_states, lambda_modify_state_score_derivs, (int32_t state_idx)->void { int32_t fsas_state_idx01 = state_ids_batch_data[state_idx]; FloatType state_score_extra_deriv = state_score_derivs_data[state_idx]; backward_scores_deriv_data[fsas_state_idx01] += state_score_extra_deriv; }); } } return arc_scores_deriv; } template Array1<float> BackpropGetBackwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, const Array1<float> &backward_scores, const Array1<float> &backward_scores_deriv_in); template Array1<double> BackpropGetBackwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, const Array1<double> &backward_scores, const Array1<double> &backward_scores_deriv_in); template <typename FloatType> Array1<FloatType> BackpropGetForwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring, const Array1<int32_t> *entering_arcs, const Array1<FloatType> &forward_scores, const Array1<FloatType> &forward_scores_deriv_in) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(fsas, state_batches, leaving_arc_batches, forward_scores, forward_scores_deriv_in); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(leaving_arc_batches.NumAxes(), 4); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = leaving_arc_batches.Dim0(); K2_DCHECK_EQ(state_batches.TotSize(1), num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(leaving_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(leaving_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(leaving_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(leaving_arc_batches.NumElements(), num_arcs); K2_DCHECK_EQ(forward_scores.Dim(), num_states); K2_DCHECK_EQ(forward_scores_deriv_in.Dim(), num_states); // We will be adding to the elements of `forward_scores_deriv`. // `forward_scores_deriv_in` was just the derivative w.r.t. the output of // GetForwardScores(), but because GetForwardScores() is recursive, // the derivatives for later states contribute to those of earlier ones. Array1<FloatType> forward_scores_deriv(forward_scores_deriv_in.Clone()); FloatType *forward_scores_deriv_data = forward_scores_deriv.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(), *fsas_row_ids1_data = fsas.RowIds(1).Data(), *fsas_row_ids2_data = fsas.RowIds(2).Data(); const FloatType *forward_scores_data = forward_scores.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> arc_scores_deriv(c, num_arcs); // will return this. FloatType *arc_scores_deriv_data = arc_scores_deriv.Data(); RaggedAxis0Splitter<int32_t> arc_batches_splitter(leaving_arc_batches); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); if (log_semiring) { // For each batch of states, from end to start (opposite direction to // GetForwardScores())... for (int32_t b = num_batches - 1; b >= 0; --b) { int32_t arc_begin; Ragged<int32_t> leaving_arc_batch = arc_batches_splitter.GetElement(b, &arc_begin); int32_t *leaving_arc_batch_data = leaving_arc_batch.values.Data(); Ragged<FloatType> leaving_arc_deriv(leaving_arc_batch.shape); FloatType *leaving_arc_deriv_data = leaving_arc_deriv.values.Data(); K2_EVAL( c, leaving_arc_batch.NumElements(), lambda_set_arc_deriv_etc, (int32_t arc_idx) { int32_t fsas_arc_idx012 = leaving_arc_batch_data[arc_idx]; const Arc &arc = arcs[fsas_arc_idx012]; int32_t dest_state_idx1 = arc.dest_state, src_state_idx1 = arc.src_state, src_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012], state_idx0x = src_state_idx01 - src_state_idx1, dest_state_idx01 = state_idx0x + dest_state_idx1; FloatType src_score = forward_scores_data[src_state_idx01], arc_end_score = src_score + arc.score, dest_score = forward_scores_data[dest_state_idx01]; // so that arc_end_score - dest_score will never be nan if (dest_score == negative_infinity) dest_score = -negative_infinity; // alpha = d(dest_score) / d(arc_end_score) FloatType alpha = exp(arc_end_score - dest_score), arc_deriv = alpha * forward_scores_deriv_data[dest_state_idx01]; K2_CHECK_LT(alpha, 1.1); arc_scores_deriv_data[fsas_arc_idx012] = arc_deriv; leaving_arc_deriv_data[arc_idx] = arc_deriv; }); int32_t state_begin = arc_batches_splitter.GetOffset(b, 2), state_end = arc_batches_splitter.GetOffset(b + 1, 2), this_num_states = state_end - state_begin; // `state_score_derivs` is the extra part contributed to // `forward_scores_deriv` by the recursion, for the batch of states we're // currently processing. Array1<FloatType> state_score_derivs(c, this_num_states); SumPerSublist<FloatType>(leaving_arc_deriv, 0, &state_score_derivs); const FloatType *state_score_derivs_data = state_score_derivs.Data(); const int32_t *state_ids_batch_data = state_batches.values.Data() + state_begin; K2_EVAL( c, this_num_states, lambda_modify_state_score_derivs, (int32_t state_idx) { int32_t fsas_state_idx01 = state_ids_batch_data[state_idx]; FloatType state_score_extra_deriv = state_score_derivs_data[state_idx]; forward_scores_deriv_data[fsas_state_idx01] += state_score_extra_deriv; }); } } else { K2_CHECK_NE(entering_arcs, nullptr); K2_CHECK_EQ(entering_arcs->Dim(), num_states); K2_CHECK(entering_arcs->Context()->IsCompatible(*c)); const int32_t *entering_arcs_data = entering_arcs->Data(); for (int32_t b = num_batches - 1; b >= 0; --b) { int32_t arc_begin; Ragged<int32_t> leaving_arc_batch = arc_batches_splitter.GetElement(b, &arc_begin); const int32_t *leaving_arc_batch_data = leaving_arc_batch.values.Data(); Ragged<FloatType> leaving_arc_deriv(leaving_arc_batch.shape); FloatType *leaving_arc_deriv_data = leaving_arc_deriv.values.Data(); K2_EVAL( c, leaving_arc_batch.NumElements(), lambda_set_arc_deriv_etc, (int32_t arc_idx)->void { int32_t fsas_arc_idx012 = leaving_arc_batch_data[arc_idx]; const Arc &arc = arcs[fsas_arc_idx012]; int32_t dest_state_idx1 = arc.dest_state, src_state_idx1 = arc.src_state, src_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012], state_idx0x = src_state_idx01 - src_state_idx1, dest_state_idx01 = state_idx0x + dest_state_idx1; FloatType arc_deriv = FloatType(0); if (entering_arcs_data[dest_state_idx01] == fsas_arc_idx012) { arc_deriv = forward_scores_deriv_data[dest_state_idx01]; } // otherwise arc_deriv is 0.0, the arc's score has no effect arc_scores_deriv_data[fsas_arc_idx012] = arc_deriv; leaving_arc_deriv_data[arc_idx] = arc_deriv; }); int32_t state_begin = arc_batches_splitter.GetOffset(b, 2), state_end = arc_batches_splitter.GetOffset(b + 1, 2), this_num_states = state_end - state_begin; // `state_score_derivs` is the extra part contributed to // `forward_scores_deriv` by the recursion, for the batch of states we're // currently processing. Array1<FloatType> state_score_derivs(c, this_num_states); SumPerSublist<FloatType>(leaving_arc_deriv, 0, &state_score_derivs); const FloatType *state_score_derivs_data = state_score_derivs.Data(); const int32_t *state_ids_batch_data = state_batches.values.Data() + state_begin; K2_EVAL( c, this_num_states, lambda_modify_state_score_derivs, (int32_t state_idx)->void { int32_t fsas_state_idx01 = state_ids_batch_data[state_idx]; FloatType state_score_extra_deriv = state_score_derivs_data[state_idx]; forward_scores_deriv_data[fsas_state_idx01] += state_score_extra_deriv; }); } } return arc_scores_deriv; } template Array1<float> BackpropGetForwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring, const Array1<int32_t> *entering_arcs, const Array1<float> &forward_scores, const Array1<float> &forward_scores_deriv_in); template Array1<double> BackpropGetForwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring, const Array1<int32_t> *entering_arcs, const Array1<double> &forward_scores, const Array1<double> &forward_scores_deriv_in); template <typename FloatType> Array1<FloatType> GetTotScores(FsaVec &fsas, const Array1<FloatType> &forward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1); K2_CHECK_EQ(num_states, forward_scores.Dim()); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> tot_scores(c, num_fsas, negative_infinity); FloatType *tot_scores_data = tot_scores.Data(); const int32_t *fsa_row_splits1_data = fsas.RowSplits(1).Data(); const FloatType *forward_scores_data = forward_scores.Data(); K2_EVAL( c, num_fsas, lambda_copy_tot_scores, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1_data[fsa_idx], start_state_next_fsa = fsa_row_splits1_data[fsa_idx + 1]; if (start_state_next_fsa > start_state) { // non-empty fsa int32_t final_state_idx = start_state_next_fsa - 1; tot_scores_data[fsa_idx] = forward_scores_data[final_state_idx]; } }); return tot_scores; } template <typename FloatType> Array1<FloatType> GetArcPost(FsaVec &fsas, const Array1<FloatType> &forward_scores, const Array1<FloatType> &backward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK(IsCompatible(fsas, backward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); K2_CHECK_EQ(num_states, forward_scores.Dim()); K2_CHECK_EQ(num_states, backward_scores.Dim()); Array1<FloatType> arc_scores(c, num_arcs), fsa_neg_tot_scores(c, num_fsas); // minus the tot scores per FSA. FloatType *arc_scores_data = arc_scores.Data(), *fsa_neg_tot_scores_data = fsa_neg_tot_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); const int32_t *fsa_row_ids1 = fsas.RowIds(1).Data(); const int32_t *fsa_row_ids2 = fsas.RowIds(2).Data(); const Arc *arcs = fsas.values.Data(); const FloatType *forward_scores_data = forward_scores.Data(); const FloatType *backward_scores_data = backward_scores.Data(); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); K2_EVAL( c, num_fsas, lambda_set_fsa_scores, (int32_t fsa_idx0)->void { int32_t begin = fsa_row_splits1[fsa_idx0], end = fsa_row_splits1[fsa_idx0 + 1]; FloatType tot_score = FloatType(0); if (begin != end) { tot_score = FloatType(0.5) * (forward_scores_data[end - 1] + backward_scores_data[begin]); } // We never set the score of a state to positive_infinity, otherwise // we may get NaN when add it with negative_infinity below. But this // usually would not happen for a connected FSA. fsa_neg_tot_scores_data[fsa_idx0] = tot_score != negative_infinity ? -tot_score : negative_infinity; }); K2_EVAL( c, num_arcs, lambda_get_arc_scores, (int32_t arc_idx012) { int32_t src_state_idx1 = arcs[arc_idx012].src_state; int32_t dest_state_idx1 = arcs[arc_idx012].dest_state; float arc_score = arcs[arc_idx012].score; int32_t idx01 = fsa_row_ids2[arc_idx012]; int32_t idx0 = fsa_row_ids1[idx01]; int32_t idx0x = fsa_row_splits1[idx0]; int32_t src_state_idx01 = idx0x + src_state_idx1; int32_t dest_state_idx01 = idx0x + dest_state_idx1; arc_scores_data[arc_idx012] = arc_score + forward_scores_data[src_state_idx01] + backward_scores_data[dest_state_idx01] + fsa_neg_tot_scores_data[idx0]; }); return arc_scores; } // explicit instantiation for those score computation functions above template Array1<float> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<double> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<float> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring); template Array1<double> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring); template Array1<float> GetArcPost(FsaVec &fsas, const Array1<float> &forward_scores, const Array1<float> &backward_scores); template Array1<double> GetArcPost(FsaVec &fsas, const Array1<double> &forward_scores, const Array1<double> &backward_scores); template Array1<float> GetTotScores(FsaVec &fsas, const Array1<float> &forward_scores); template Array1<double> GetTotScores(FsaVec &fsas, const Array1<double> &forward_scores); Fsa RandomFsa(bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); K2_CHECK_GE(min_num_arcs, 0); K2_CHECK_GE(max_num_arcs, min_num_arcs); K2_CHECK_GE(max_symbol, 0); RaggedShape shape = RandomRaggedShape(false, 2, 2, min_num_arcs, max_num_arcs); int32_t dim0 = shape.Dim0(); // empty Fsa if (dim0 == 0) return Fsa(shape, Array1<Arc>(c, std::vector<Arc>{})); // as there should be no arcs leaving the final_state, we always push back an // empty row here. Array1<int32_t> ans_row_splits1(c, dim0 + 2); Array1<int32_t> sub_range = ans_row_splits1.Range(0, dim0 + 1); sub_range.CopyFrom(shape.RowSplits(1)); int32_t *ans_row_splits1_data = ans_row_splits1.Data(); ans_row_splits1_data[dim0 + 1] = ans_row_splits1_data[dim0]; // create returned shape RaggedShapeLayer ans_shape_dim; ans_shape_dim.row_splits = ans_row_splits1; ans_shape_dim.cached_tot_size = shape.TotSize(1); RaggedShape ans_shape(std::vector<RaggedShapeLayer>{ans_shape_dim}, true); ans_shape.Populate(); // will be used to generate scores on arcs. std::random_device rd; std::mt19937 gen(rd()); // TODO(haowen): let the users set the range of scores? it's fine to use it // for now as we just use it to test. std::uniform_real_distribution<float> dis_score(0, 10); // create arcs int32_t *row_ids1 = ans_shape.RowIds(1).Data(); int32_t num_states = ans_shape.Dim0(), num_arcs = ans_shape.TotSize(1); int32_t start_state = 0, final_state = num_states - 1; std::vector<Arc> arcs(num_arcs); for (int32_t i = 0; i != num_arcs; ++i) { int32_t curr_state = row_ids1[i]; int32_t dest_state = acyclic ? RandInt(curr_state + 1, final_state) : RandInt(start_state, final_state); int32_t symbol = dest_state == final_state ? -1 : RandInt(0, max_symbol); float score = dis_score(gen); arcs[i] = Arc(curr_state, dest_state, symbol, score); } return Fsa(ans_shape, Array1<Arc>(c, arcs)); } FsaVec RandomFsaVec(int32_t min_num_fsas /*=1*/, int32_t max_num_fsas /*=1000*/, bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(min_num_fsas, 0); K2_CHECK_GE(max_num_fsas, min_num_fsas); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); std::vector<Fsa> fsas(num_fsas); for (int32_t i = 0; i != num_fsas; ++i) { fsas[i] = RandomFsa(acyclic, max_symbol, min_num_arcs, max_num_arcs); } return Stack(0, num_fsas, fsas.data()); } DenseFsaVec RandomDenseFsaVec(int32_t min_num_fsas, int32_t max_num_fsas, int32_t min_frames, int32_t max_frames, int32_t min_symbols, int32_t max_symbols, float scores_scale) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); // num_symbols includes epsilon but not final-symbol -1. int32_t num_symbols = RandInt(min_symbols, max_symbols); // `num_frames` includes the extra 1 frame for the final-symbol. std::vector<int32_t> num_frames(num_fsas + 1); int32_t tot_frames = 0; for (int32_t i = 0; i < num_fsas; ++i) { num_frames[i] = RandInt(min_frames, max_frames) + 1; tot_frames += num_frames[i]; } Array2<float> scores(c, tot_frames, num_symbols + 1); auto scores_acc = scores.Accessor(); std::vector<int32_t> row_splits_vec(num_fsas + 1); row_splits_vec[0] = 0; int32_t cur_start_frame = 0; RandIntGenerator gen; for (int32_t i = 0; i < num_fsas; ++i) { int32_t this_num_frames = num_frames[i], end_frame = cur_start_frame + this_num_frames; for (int32_t f = cur_start_frame; f + 1 < end_frame; f++) { scores_acc(f, 0) = -std::numeric_limits<float>::infinity(); for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = scores_scale * gen(-50, 50) * 0.01; } // on the last frame the placement of infinity vs. finite is reversed: // -1 gets finite value, others get infinity. int32_t f = end_frame - 1; scores_acc(f, 0) = scores_scale * gen(-50, 50) * 0.01; for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = -std::numeric_limits<float>::infinity(); row_splits_vec[i + 1] = cur_start_frame = end_frame; } Array1<int32_t> row_splits(c, row_splits_vec); return DenseFsaVec(RaggedShape2(&row_splits, nullptr, tot_frames), scores); } Ragged<int32_t> GetStartStates(FsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); K2_CHECK_EQ(src.NumAxes(), 3); int32_t num_fsas = src.Dim0(); const int32_t *src_row_splits1_data = src.RowSplits(1).Data(); Array1<int32_t> ans_row_splits(c, num_fsas + 1); // will first set the elements of ans_row_splits to the number of states kept // from this FSA (either 0 or 1). int32_t *num_states_data = ans_row_splits.Data(); K2_EVAL( c, num_fsas, lambda_set_num_states, (int32_t fsa_idx0)->void { // 1 if the FSA is not empty, 0 if empty. num_states_data[fsa_idx0] = (src_row_splits1_data[fsa_idx0 + 1] > src_row_splits1_data[fsa_idx0]); }); ExclusiveSum(ans_row_splits, &ans_row_splits); int32_t ans_dim = ans_row_splits.Back(); Ragged<int32_t> ans(RaggedShape2(&ans_row_splits, nullptr, ans_dim), Array1<int32_t>(c, ans_dim)); const int32_t *ans_row_ids1_data = ans.shape.RowIds(1).Data(); int32_t *ans_values_data = ans.values.Data(); K2_EVAL( c, ans_dim, lambda_set_ans_values, (int32_t ans_idx01)->void { int32_t idx0 = ans_row_ids1_data[ans_idx01]; int32_t src_start_state_idx01 = src_row_splits1_data[idx0]; K2_DCHECK_GT(src_row_splits1_data[idx0 + 1], src_row_splits1_data[idx0]); ans_values_data[ans_idx01] = src_start_state_idx01; }); return ans; } FsaVec FsaVecFromArcIndexes(FsaVec &fsas, Ragged<int32_t> &best_arc_indexes) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(best_arc_indexes.NumAxes(), 2); K2_CHECK(IsCompatible(fsas, best_arc_indexes)); K2_CHECK_EQ(fsas.Dim0(), best_arc_indexes.Dim0()); // if there are n arcs (for n > 0), there are n + 1 states; if there are 0 // arcs, there are 0 states (that FSA will have no arcs or states). RaggedShape states_shape = ChangeSublistSizePinned(best_arc_indexes.shape, 1); const int32_t *states_shape_row_splits1_data = states_shape.RowSplits(1).Data(); int32_t num_fsas = fsas.Dim0(); int32_t num_states = states_shape.NumElements(); int32_t num_arcs = best_arc_indexes.shape.NumElements(); ContextPtr &context = fsas.Context(); if (num_arcs == 0) { RaggedShape shape_a = RegularRaggedShape(context, num_fsas, 0), shape_b = RegularRaggedShape(context, 0, 0); return FsaVec(ComposeRaggedShapes(shape_a, shape_b), Array1<Arc>(context, 0)); } Array1<int32_t> row_splits2(context, num_states + 1); Array1<int32_t> row_ids2(context, num_arcs); int32_t *row_splits2_data = row_splits2.Data(); int32_t *row_ids2_data = row_ids2.Data(); Array1<Arc> arcs(context, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *best_arc_indexes_row_splits1_data = best_arc_indexes.RowSplits(1).Data(); const int32_t *best_arc_indexes_row_ids1_data = best_arc_indexes.RowIds(1).Data(); const int32_t *best_arc_indexes_data = best_arc_indexes.values.Data(); const Arc *fsas_values_data = fsas.values.Data(); K2_EVAL( context, num_arcs, lambda_set_arcs, (int32_t best_arc_idx01) { int32_t fsas_idx0 = best_arc_indexes_row_ids1_data[best_arc_idx01]; int32_t best_arc_idx0x = best_arc_indexes_row_splits1_data[fsas_idx0]; int32_t best_arc_idx0x_next = best_arc_indexes_row_splits1_data[fsas_idx0 + 1]; int32_t num_best_arcs = best_arc_idx0x_next - best_arc_idx0x; int32_t best_arc_idx1 = best_arc_idx01 - best_arc_idx0x; int32_t state_offset = states_shape_row_splits1_data[fsas_idx0]; const Arc &arc = fsas_values_data[best_arc_indexes_data[best_arc_idx01]]; int32_t src_state = best_arc_idx1; int32_t dest_state = src_state + 1; int32_t label = arc.label; float score = arc.score; arcs_data[best_arc_idx01] = Arc(src_state, dest_state, label, score); int32_t state_idx01 = state_offset + src_state; row_ids2_data[best_arc_idx01] = state_idx01; row_splits2_data[state_idx01 + 1] = best_arc_idx01 + 1; if (best_arc_idx01 == 0) row_splits2_data[0] = 0; if (best_arc_idx1 + 1 == num_best_arcs) row_splits2_data[state_idx01 + 2] = best_arc_idx01 + 1; }); RaggedShape shape = RaggedShape3(&states_shape.RowSplits(1), &states_shape.RowIds(1), num_states, &row_splits2, &row_ids2, num_arcs); Ragged<Arc> ans(shape, arcs); return ans; } FsaVec GetIncomingFsaVec(FsaVec &fsas) { Array1<int32_t> dest_states = GetDestStates(fsas, true); Ragged<int32_t> arc_indexes = GetIncomingArcs(fsas, dest_states); return FsaVec(arc_indexes.shape, fsas.values[arc_indexes.values]); } Ragged<int32_t> ComposeArcMaps(Ragged<int32_t> &step1_arc_map, Ragged<int32_t> &step2_arc_map) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(step1_arc_map.NumAxes(), 2); K2_CHECK_EQ(step2_arc_map.NumAxes(), 2); return Index(step1_arc_map, step2_arc_map, true); } void FixNumStates(FsaVec *fsas) { K2_CHECK_EQ(fsas->NumAxes(), 3); ContextPtr c = fsas->Context(); int32_t num_fsas = fsas->Dim0(), num_states = fsas->TotSize(1); Array1<int32_t> changed(c, 1, 0); Renumbering renumber_states(c, num_states); renumber_states.Keep() = static_cast<char>(1); // by default keep all states. int32_t *changed_data = changed.Data(); char *keep_data = renumber_states.Keep().Data(); const int32_t *row_splits1_data = fsas->RowSplits(1).Data(); K2_EVAL( c, num_fsas, lambda_set_must_remove, (int32_t i)->void { int32_t num_states = (row_splits1_data[i + 1] - row_splits1_data[i]); if (num_states == 1) keep_data[row_splits1_data[i]] = 0; changed_data[0] = 1; }); if (changed[0] == 0) return; // an optimization.. fsas->shape = RemoveSomeEmptyLists(fsas->shape, 1, renumber_states); } } // namespace k2
3b7ad426ec294884cffc9039e12a9447fdc432c8.cu
/** * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * Mobvoi Inc. (authors: Fangjun Kuang) * Guoguo Chen * * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include <limits> #include <sstream> #include <utility> #include <vector> #include "k2/csrc/array.h" #include "k2/csrc/context.h" #include "k2/csrc/fsa.h" #include "k2/csrc/fsa_utils.h" #include "k2/csrc/math.h" #include "k2/csrc/ragged.h" namespace k2 { // field separator within a line for a text form FSA static constexpr const char *kDelim = " \t"; // Convert a string to an integer. Abort the program on failure. static int32_t StringToInt(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); bool ok = false; char *p = nullptr; // std::strtol requires a `long` type long n = std::strtol(s.c_str(), &p, 10); // NOLINT if (*p == '\0') ok = true; auto res = static_cast<int32_t>(n); if (n != res) ok = false; // out of range K2_CHECK(ok) << "Failed to convert " << s << " to an integer"; return res; } // Convert a string to a float. Abort the program on failure. // TODO(guoguo): We may run into locale problems, with comma vs. period for // decimals. We have to test if the C code will behave the same // w.r.t. locale as Python does. static float StringToFloat(const std::string &s) { NVTX_RANGE(K2_FUNC); K2_CHECK(!s.empty()); char *p = nullptr; float f = std::strtof(s.c_str(), &p); if (*p != '\0') K2_LOG(FATAL) << "Failed to convert " << s << " to a float"; return f; } // Trim leading and trailing spaces of a string. static void TrimString(std::string *s) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(s, nullptr); auto not_space = [](int32_t c) -> bool { return std::isspace(c) == 0; }; s->erase(s->begin(), std::find_if(s->begin(), s->end(), not_space)); s->erase(std::find_if(s->rbegin(), s->rend(), not_space).base(), s->end()); } /* Split a string to a vector of strings using a set of delimiters. Example usage: @code std::string in = "1 2 3"; const char *delim = " \t"; std::vector<std::string> out; SplitStringToVector(in, delim, &out); @endcode @param [in] in The input string to be split. @param [in] delim A string of delimiters. @param [out] out It saves the split result. */ static void SplitStringToVector(const std::string &in, const char *delim, std::vector<std::string> *out) { NVTX_RANGE(K2_FUNC); K2_CHECK_NE(delim, nullptr); K2_CHECK_NE(out, nullptr); out->clear(); std::size_t start = 0; while (true) { auto pos = in.find_first_of(delim, start); if (pos == std::string::npos) break; auto sub = in.substr(start, pos - start); start = pos + 1; TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } if (start < in.size()) { auto sub = in.substr(start); TrimString(&sub); if (!sub.empty()) out->emplace_back(std::move(sub)); } } /* Create an acceptor from a stream, assuming the acceptor is in the k2 format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa K2AcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 4u) { // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = StringToFloat(splits[3]); arcs.emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); // this is a final state finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 acceptor expects a line with 1 (final_state) or " "4 (src_state dest_state label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; bool error = true; Array1<Arc> array(GetCpuContext(), arcs); auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the K2 format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state The source states will be in non-descending order, and the final state does not bear a cost/score -- we put the cost/score on the arc that connects to the final state and set its label to -1. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa K2TransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; bool finished = false; // when the final state is read, set it to true. while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line K2_CHECK_EQ(finished, false); auto num_fields = splits.size(); if (num_fields == 5u) { // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = StringToFloat(splits[4]); arcs.emplace_back(src_state, dest_state, symbol, score); aux_labels_internal.push_back(aux_label); } else if (num_fields == 1u) { // 0 // final_state (void)StringToInt(splits[0]); finished = true; // set finish } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nk2 transducer expects a line with 1 (final_state) or " "5 (src_state dest_state label aux_label score) fields"; } } K2_CHECK_EQ(finished, true) << "The last line should be the final state"; auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create an acceptor from a stream, assuming the acceptor is in the OpenFST format: src_state1 dest_state1 label1 score1 src_state2 dest_state2 label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FSA, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, and -1 as its label. @param [in] is The input stream that contains the acceptor. @return It returns an Fsa on CPU. */ static Fsa OpenFstAcceptorFromStream(std::istringstream &is) { NVTX_RANGE(K2_FUNC); std::vector<Arc> arcs; std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 3u || num_fields == 4u) { // 0 1 2 // src_state dest_state label // // or // // 0 1 2 3 // src_state dest_state label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); float score = 0.0f; if (num_fields == 4u) score = -1.0f * StringToFloat(splits[3]); // Add the arc to "state_to_arcs". ++num_arcs; max_state = std::max(max_state, std::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) state_to_arcs.resize(src_state + 1); state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); } else if (num_fields == 1u || num_fields == 2u) { // 0 1 // final_state score float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = std::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST acceptor expects a line with 1 (final_state)," " 2 (final_state score), 3 (src_state dest_state label) " "or 4 (src_state dest_state label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FSA, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs". int32_t arc_index = 0; arcs.resize(num_arcs); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); bool error = true; Array1<Arc> array(GetCpuContext(), arcs); // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } /* Create a transducer from a stream, assuming the transducer is in the OpenFST format: src_state1 dest_state1 label1 aux_label1 score1 src_state2 dest_state2 label2 aux_label2 score2 ... ... final_state final_score We will negate the cost/score when we read them in. Also note, OpenFST may omit the cost/score if it is 0.0. We always create the super final state. If there are final state(s) in the original FST, then we add arc(s) from the original final state(s) to the super final state, with the (negated) old final state cost/score as its cost/score, -1 as its label and -1 as its aux_label. @param [in] is The input stream that contains the transducer. @return It returns an Fsa on CPU. */ static Fsa OpenFstTransducerFromStream(std::istringstream &is, Array1<int32_t> *aux_labels) { NVTX_RANGE(K2_FUNC); K2_CHECK(aux_labels != nullptr); std::vector<std::vector<int32_t>> state_to_aux_labels; // indexed by states std::vector<std::vector<Arc>> state_to_arcs; // indexed by states std::vector<int32_t> aux_labels_internal; std::vector<Arc> arcs; std::vector<std::string> splits; std::string line; int32_t max_state = -1; int32_t num_arcs = 0; std::vector<int32_t> original_final_states; std::vector<float> original_final_weights; while (std::getline(is, line)) { SplitStringToVector(line, kDelim, &splits); // splits is cleared in the function if (splits.empty()) continue; // this is an empty line auto num_fields = splits.size(); if (num_fields == 4u || num_fields == 5u) { // 0 1 2 3 // src_state dest_state label aux_label // // or // // 0 1 2 3 4 // src_state dest_state label aux_label score int32_t src_state = StringToInt(splits[0]); int32_t dest_state = StringToInt(splits[1]); int32_t symbol = StringToInt(splits[2]); int32_t aux_label = StringToInt(splits[3]); float score = 0.0f; if (num_fields == 5u) score = -1.0f * StringToFloat(splits[4]); // Add the arc to "state_to_arcs", and aux_label to "state_to_aux_labels" ++num_arcs; max_state = std::max(max_state, std::max(src_state, dest_state)); if (static_cast<int32_t>(state_to_arcs.size()) <= src_state) { state_to_arcs.resize(src_state + 1); state_to_aux_labels.resize(src_state + 1); } state_to_arcs[src_state].emplace_back(src_state, dest_state, symbol, score); state_to_aux_labels[src_state].push_back(aux_label); } else if (num_fields == 1u || num_fields == 2u) { // 0 // final_state // // or // // 0 1 // final_state score // There could be multiple final states, so we first have to collect all // the final states, and then work out the super final state. float score = 0.0f; if (num_fields == 2u) score = -1.0f * StringToFloat(splits[1]); original_final_states.push_back(StringToInt(splits[0])); original_final_weights.push_back(score); max_state = std::max(max_state, original_final_states.back()); } else { K2_LOG(FATAL) << "Invalid line: " << line << "\nOpenFST transducer expects a line with " "1 (final_state), 2 (final_state score), " "4 (src_state dest_state label aux_label) or " "5 (src_state dest_state label aux_label score) fields."; } } K2_CHECK(is.eof()); // Post processing on final states. If there are final state(s) in the // original FST, we add the super final state as well as arc(s) from original // final state(s) to the super final state. Otherwise, the super final state // will be added by FsaFromArray1 (since there's no arc with label // kFinalSymbol). if (original_final_states.size() > 0) { K2_CHECK_EQ(original_final_states.size(), original_final_weights.size()); int32_t super_final_state = max_state + 1; state_to_arcs.resize(super_final_state); state_to_aux_labels.resize(super_final_state); for (std::size_t i = 0; i != original_final_states.size(); ++i) { state_to_arcs[original_final_states[i]].emplace_back( original_final_states[i], super_final_state, -1, // kFinalSymbol original_final_weights[i]); state_to_aux_labels[original_final_states[i]].push_back( -1); // kFinalSymbol ++num_arcs; } } // Move arcs from "state_to_arcs" to "arcs", and aux_labels from // "state_to_aux_labels" to "aux_labels_internal" int32_t arc_index = 0; arcs.resize(num_arcs); aux_labels_internal.resize(num_arcs); K2_CHECK_EQ(state_to_arcs.size(), state_to_aux_labels.size()); for (std::size_t s = 0; s < state_to_arcs.size(); ++s) { K2_CHECK_EQ(state_to_arcs[s].size(), state_to_aux_labels[s].size()); for (std::size_t a = 0; a < state_to_arcs[s].size(); ++a) { K2_CHECK_GT(num_arcs, arc_index); arcs[arc_index] = state_to_arcs[s][a]; aux_labels_internal[arc_index] = state_to_aux_labels[s][a]; ++arc_index; } } K2_CHECK_EQ(num_arcs, arc_index); auto cpu_context = GetCpuContext(); *aux_labels = Array1<int32_t>(cpu_context, aux_labels_internal); Array1<Arc> array(cpu_context, arcs); bool error = true; // FsaFromArray1 will add a super final state if the original FSA doesn't have // a final state. auto fsa = FsaFromArray1(array, &error); K2_CHECK_EQ(error, false); return fsa; } Fsa FsaFromString(const std::string &s, bool openfst /*= false*/, Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); std::istringstream is(s); K2_CHECK(is); if (openfst == false && aux_labels == nullptr) return K2AcceptorFromStream(is); else if (openfst == false && aux_labels != nullptr) return K2TransducerFromStream(is, aux_labels); else if (openfst == true && aux_labels == nullptr) return OpenFstAcceptorFromStream(is); else if (openfst == true && aux_labels != nullptr) return OpenFstTransducerFromStream(is, aux_labels); return Fsa(); // unreachable code } std::string FsaToString(const Fsa &fsa, bool openfst /*= false*/, const Array1<int32_t> *aux_labels /*= nullptr*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsa.NumAxes(), 2); if (fsa.Context()->GetDeviceType() != kCpu) { Fsa _fsa = fsa.To(GetCpuContext()); Array1<int32_t> _aux_labels; if (aux_labels) _aux_labels = aux_labels->To(_fsa.Context()); return FsaToString(_fsa, openfst, aux_labels ? &_aux_labels : nullptr); } K2_CHECK_EQ(fsa.Context()->GetDeviceType(), kCpu); const Array1<int32_t> &row_splits = fsa.shape.RowSplits(1); const Array1<Arc> &arcs = fsa.values; const int32_t *p = nullptr; if (aux_labels != nullptr) { K2_CHECK(IsCompatible(fsa, *aux_labels)); K2_CHECK_EQ(aux_labels->Dim(), arcs.Dim()); p = aux_labels->Data(); } float scale = 1; if (openfst) scale = -1; std::ostringstream os; int32_t n = arcs.Dim(); char sep = ' '; char line_sep = '\n'; for (int32_t i = 0; i != n; ++i) { const auto &arc = arcs[i]; os << arc.src_state << sep << arc.dest_state << sep << arc.label << sep; if (p != nullptr) os << p[i] << sep; os << (scale * arc.score) << line_sep; } os << (fsa.shape.Dim0() - 1) << line_sep; return os.str(); } Array1<int32_t> GetDestStates(FsaVec &fsas, bool as_idx01) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_arcs = fsas.NumElements(); Array1<int32_t> ans(c, num_arcs); const Arc *arcs_data = fsas.values.Data(); int32_t *ans_data = ans.Data(); if (!as_idx01) { K2_EVAL( c, num_arcs, lambda_set_dest_states1, (int32_t arc_idx012) { ans_data[arc_idx012] = arcs_data[arc_idx012].dest_state; }); } else { const int32_t *row_ids2_data = fsas.RowIds(2).Data(); K2_EVAL( c, num_arcs, lambda_set_dest_states01, (int32_t arc_idx012) { int32_t src_state = arcs_data[arc_idx012].src_state, dest_state = arcs_data[arc_idx012].dest_state; // (row_ids2[arc_idx012] - src_state) is the same as // row_splits1[row_ids1[row_ids2[arc_idx012]]]; it's the idx01 of the // 1st state in this FSA. ans_data[arc_idx012] = dest_state + (row_ids2_data[arc_idx012] - src_state); }); } return ans; } Ragged<int32_t> GetStateBatches(FsaVec &fsas, bool transpose) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); Array1<int32_t> arc_dest_states = GetDestStates(fsas, true); MonotonicLowerBound(arc_dest_states, &arc_dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); // We can tune `log_power` as a tradeoff between work done and clock time on // GPU. int32_t log_power = (c->GetDeviceType() == kCpu ? 0 : 4); int32_t max_num_states = fsas.shape.MaxSize(1); // the following avoids doing too much extra work accumulating powers // of 'dest_states' for very small problem sizes. while (log_power > 0 && (1 << (1 + log_power)) > max_num_states) log_power--; // Ignoring edge effects: `dest_states_powers[0]` is just an array indexed by // state_idx01, that gives us the dest_state_idx01 that would be the beginning // of the next batch if state_idx01 were the beginning of the current batch. // So if we follow this chain forward from the start of one of the FSAs until // it passes the end of this FSA, we get the beginnings of the batches // we want. The natural algorithm to find the beginnings of the batches // is sequential. Array2<int32_t> dest_states_powers(c, log_power + 1, num_states); const int32_t *arc_dest_states_data = arc_dest_states.Data(), *fsas_row_splits2_data = fsas.RowSplits(2).Data(); int32_t *dest_states_power_data = dest_states_powers.Data(); // only process Row[0] below const int32_t int_max = std::numeric_limits<int32_t>::max(); K2_EVAL( c, num_states, lambda_set_dest_states, (int32_t state_idx01)->void { int32_t arc_idx01x = fsas_row_splits2_data[state_idx01]; // If this state has arcs, let its `dest_state` be the smallest // `dest_state` of any of its arcs (which is the first element of those // arcs' dest states in `arc_dest_states_data`); otherwise, take the // `dest_state` from the 1st arc of the next state, which is the largest // value we can take (if the definition is: the highest-numbered state s // for which neither this state nor any later-numbered state has an arc // to a state lower than s). // if this state has arcs, // arc_idx01x is the first arc index of this state, we get the // smallest dest state of this state's arcs using // arc_dest_states_data[arc_idx01x] // else // arc_idx01x is the first arc index of the next state, then // arc_dest_states_data[arc_idx01x] is the largest value we can take, // which is also the smallest dest state in the next state. int32_t dest_state = (arc_idx01x < num_arcs ? arc_dest_states_data[arc_idx01x] : int_max); dest_states_power_data[state_idx01] = dest_state; // if the following fails, it's either a code error or the input FSA had // cycles. K2_CHECK_GT(dest_state, state_idx01); }); // `num_batches_per_fsa` will be set to the number of batches of states that // we'll use for each FSA... it corresponds to the number of times we have // to follow links forward in the dest_states array till we pass the // end of the array for this fSA. Array1<int32_t> num_batches_per_fsa(c, num_fsas + 1, 0); // `batch_starts` will contain the locations of the first state_idx01 for each // batch, but in an 'un-consolidated' format. Specifically, for FSA with // index i, the batch_starts for that FSA begin at element fsa.RowSplits(1)[i] // of `batch_starts`. This is just a convenient layout because we know there // can't be more batches than there are states. We'll later consolidate the // information into a single array. Array1<int32_t> batch_starts(c, num_states + 1); int32_t *num_batches_per_fsa_data = num_batches_per_fsa.Data(), *batch_starts_data = batch_starts.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(); #if 0 // This is a simple version of the kernel that demonstrates what we're trying // to do with the more complex code. K2_EVAL( c, num_fsas, lambda_set_batch_info_simple, (int32_t fsa_idx) { int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t i = 0, cur_state_idx01 = begin_state_idx01; while (cur_state_idx01 < end_state_idx01) { batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; cur_state_idx01 = dest_states_power_data[cur_state_idx01]; ++i; } num_batches_per_fsa_data[fsa_idx] = i; }); #else int32_t stride = dest_states_powers.ElemStride0(); for (int32_t power = 1; power <= log_power; power++) { const int32_t *src_data = dest_states_powers.Data() + (power - 1) * stride; int32_t *dest_data = dest_states_powers.Data() + power * stride; K2_EVAL( c, num_states, lambda_square_array, (int32_t state_idx01)->void { int32_t dest_state = src_data[state_idx01], dest_state_sq = (dest_state < num_states ? src_data[dest_state] : int_max); dest_data[state_idx01] = dest_state_sq; }); } // jobs_per_fsa tells us how many separate chains of states we'll follow for // each FSA. // jobs_multiple is a kind of trick to ensure any given warp doesn't // issue more memory requests than it can handle at a time (we drop // some threads). int32_t jobs_per_fsa = (1 << log_power), jobs_multiple = (c->GetDeviceType() == kCuda ? 8 : 1); while (jobs_multiple > 1 && jobs_per_fsa * jobs_multiple * num_fsas > 10000) jobs_multiple /= 2; // Likely won't get here. Just reduce multiple if // num-jobs is ridiculous. auto dest_states_powers_acc = dest_states_powers.Accessor(); K2_EVAL2( c, num_fsas, jobs_per_fsa * jobs_multiple, lambda_set_batch_info, (int32_t fsa_idx, int32_t j) { if (j % jobs_multiple != 0) return; // a trick to avoid too much random // memory access for any given warp int32_t task_idx = j / jobs_multiple; // Now 0 <= task_idx < jobs_per_fsa. // The task indexed `task_idx` is responsible for batches numbered // task_idx, task_idx + jobs_per_fsa, task_index + 2 * job_per_fsa and // so on, for the FSA numbered `fsa_idx`. Comparing this code to // `lambda_set_batch_info_simple`, this task is responsible for the // assignment to batch_starts_data for all i such that i % jobs_per_fsas // == task_idx, together with the assignment to // num_batchess_per_fsa_data if // i % jobs_per_fsas == task_idx (here referring to the i value finally // assigned to that location). int32_t begin_state_idx01 = fsas_row_splits1_data[fsa_idx], end_state_idx01 = fsas_row_splits1_data[fsa_idx + 1]; int32_t num_states_this_fsa = end_state_idx01 - begin_state_idx01; int32_t i = 0, cur_state_idx01 = begin_state_idx01; if (task_idx >= num_states_this_fsa) return; // The next loop advances `cur_state_idx01` by // a number of steps equal to `task_idx`. for (int32_t m = 0; m < log_power; ++m) { int32_t n = 1 << m; if ((task_idx & n) != 0) { i += n; int32_t next = dest_states_powers_acc(m, cur_state_idx01); if (next >= end_state_idx01) return; cur_state_idx01 = next; } } K2_CHECK_EQ(i, task_idx); while (1) { if (i >= num_states_this_fsa) return; batch_starts_data[begin_state_idx01 + i] = cur_state_idx01; int32_t next_state_idx01 = dest_states_powers_acc( log_power, cur_state_idx01); // advance jobs_per_fsa = (1 << log_power) // steps if (next_state_idx01 >= end_state_idx01) { // if exactly one step would also be enough to take us past the // boundary... if (dest_states_powers_acc(0, cur_state_idx01) >= end_state_idx01) { num_batches_per_fsa_data[fsa_idx] = i + 1; } return; } else { i += jobs_per_fsa; cur_state_idx01 = next_state_idx01; } } }); #endif ExclusiveSum(num_batches_per_fsa, &num_batches_per_fsa); Array1<int32_t> &ans_row_splits1 = num_batches_per_fsa; int32_t num_batches = num_batches_per_fsa[num_fsas]; Array1<int32_t> ans_row_ids1(c, num_batches); RowSplitsToRowIds(ans_row_splits1, &ans_row_ids1); Array1<int32_t> ans_row_splits2(c, num_batches + 1); const int32_t *ans_row_splits1_data = ans_row_splits1.Data(), *ans_row_ids1_data = ans_row_ids1.Data(); int32_t *ans_row_splits2_data = ans_row_splits2.Data(); ans_row_splits2.Range(num_batches, 1) = num_states; // The kernel below won't // set this last element K2_EVAL( c, num_batches, lambda_set_ans_row_splits2, (int32_t idx01)->void { int32_t idx0 = ans_row_ids1_data[idx01], // Fsa index idx0x = ans_row_splits1_data[idx0], idx1 = idx01 - idx0x, fsas_idx0x = fsas_row_splits1_data[idx0]; // 1st state-idx (idx01) // in fsas_, for this FSA int32_t fsas_idx01 = fsas_idx0x + idx1; // the idx1 is actually the // batch-index, this statement // reflects the 'un-consolidated' // format of `batch_starts`. int32_t this_batch_start = batch_starts_data[fsas_idx01]; ans_row_splits2_data[idx01] = this_batch_start; }); RaggedShape ans_shape = RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_batches, &ans_row_splits2, nullptr, num_states); Array1<int32_t> ans_value = Range(c, num_states, 0); if (transpose) { ans_shape = MakeTransposable(ans_shape); Ragged<int32_t> ans(ans_shape, ans_value); return Transpose(ans); } else { return Ragged<int32_t>(ans_shape, ans_value); } } Ragged<int32_t> GetIncomingArcs(FsaVec &fsas, const Array1<int32_t> &dest_states) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK(IsCompatible(fsas, dest_states)); ContextPtr &c = fsas.Context(); Ragged<int32_t> dest_states_tensor(fsas.shape, dest_states); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); Array1<int32_t> incoming_arcs_order = GetTransposeReordering(dest_states_tensor, num_states), ans_row_ids2 = dest_states[incoming_arcs_order]; // Note: incoming_arcs_row_ids2 will be monotonically increasing Array1<int32_t> ans_row_splits2(c, num_states + 1); RowIdsToRowSplits(ans_row_ids2, &ans_row_splits2); // Axis 1 corresponds to FSA states, so the row-ids and row-splits for axis // 1 are the same as for `fsas`. Array1<int32_t> ans_row_ids1 = fsas.RowIds(1), ans_row_splits1 = fsas.RowSplits(1); return Ragged<int32_t>( RaggedShape3(&ans_row_splits1, &ans_row_ids1, num_states, &ans_row_splits2, &ans_row_ids2, num_arcs), incoming_arcs_order); } Ragged<int32_t> GetLeavingArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *fsa_states_row_splits_data = fsas.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); K2_EVAL( c, num_states, lambda_set_ans_row_splits3, (int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = fsa_states_row_splits_data[state_idx + 1] - fsa_states_row_splits_data[state_idx]; }); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); K2_EVAL( c, num_arcs, lambda_set_ans_values, (int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in fsas int32_t fsa_idx01x = fsa_states_row_splits_data[state_idx]; // ans_idx3 is fsas_idx2, i.e. the arc idx in a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; ans_values_data[idx0123] = fsa_idx01x + ans_idx3; }); return Ragged<int32_t>(ans_shape, ans_values); } Ragged<int32_t> GetEnteringArcIndexBatches(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, Ragged<int32_t> &state_batches) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, incoming_arcs)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(incoming_arcs.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK(state_batches.TotSize(1) == num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(incoming_arcs.Dim0(), num_fsas); K2_DCHECK_EQ(incoming_arcs.TotSize(1), num_states); K2_DCHECK_EQ(incoming_arcs.NumElements(), num_arcs); // get ans_shape Array1<int32_t> ans_row_splits3(c, num_states + 1); int32_t *ans_row_splits3_data = ans_row_splits3.Data(); const int32_t *incoming_arcs_row_splits_data = incoming_arcs.RowSplits(2).Data(); const int32_t *batch_states_data = state_batches.values.Data(); K2_EVAL( c, num_states, lambda_set_ans_row_splits3, (int32_t idx) { int32_t state_idx = batch_states_data[idx]; ans_row_splits3_data[idx] = incoming_arcs_row_splits_data[state_idx + 1] - incoming_arcs_row_splits_data[state_idx]; }); ExclusiveSum(ans_row_splits3, &ans_row_splits3); Array1<int32_t> ans_row_ids3(c, num_arcs); RowSplitsToRowIds(ans_row_splits3, &ans_row_ids3); RaggedShape ans_shape = ComposeRaggedShapes( state_batches.shape, RaggedShape2(&ans_row_splits3, &ans_row_ids3, num_arcs)); // get ans_values Array1<int32_t> ans_values(c, num_arcs); int32_t *ans_values_data = ans_values.Data(); const int32_t *ans_row_ids3_data = ans_row_ids3.Data(); const int32_t *incoming_arcs_data = incoming_arcs.values.Data(); K2_EVAL( c, num_arcs, lambda_set_ans_values, (int32_t idx0123) { int32_t ans_idx012 = ans_row_ids3_data[idx0123]; int32_t state_idx = batch_states_data[ans_idx012]; // state_idx is idx01 in // incoming_arcs int32_t incoming_arcs_idx01x = incoming_arcs_row_splits_data[state_idx]; // ans_idx3 is incoming_arcs_idx2, i.e. the entering arc idx for a state int32_t ans_idx3 = idx0123 - ans_row_splits3_data[ans_idx012]; int32_t incoming_arcs_idx012 = incoming_arcs_idx01x + ans_idx3; ans_values_data[idx0123] = incoming_arcs_data[incoming_arcs_idx012]; }); return Ragged<int32_t>(ans_shape, ans_values); } FsaVec ConvertDenseToFsaVec(DenseFsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.shape.Context(); // caution: 'num_symbols' is the number of symbols excluding the final-symbol // -1. int32_t num_fsas = src.shape.Dim0(), num_symbols = src.scores.Dim1() - 1; // the "1" is the extra state per FSA we need in the FsaVec format, // for the final-state. RaggedShape fsa2state = ChangeSublistSize(src.shape, 1); // again, the "+num_fsas" below is the extra state per FSA we need in the // FsaVec format, for the final-state. int32_t num_states = src.shape.NumElements() + num_fsas; // The explanation num-arcs below is as follows: // Firstly, all rows of src.scores (==all elements of src.shape) correspond // to states with arcs leaving them. Most of them have `num_symbols` arcs, // but the final one for each FSA has 1 arc (with symbol -1) int32_t num_arcs = src.shape.NumElements() * num_symbols - (num_symbols - 1) * num_fsas; Array1<int32_t> row_splits2(c, num_states + 1), row_ids2(c, num_arcs); const int32_t *row_ids1_data = fsa2state.RowIds(1).Data(), *src_row_ids1_data = src.shape.RowIds(1).Data(), *src_row_splits1_data = src.shape.RowSplits(1).Data(); Array1<Arc> arcs(c, num_arcs); Arc *arcs_data = arcs.Data(); auto scores_acc = src.scores.Accessor(); int32_t *row_splits2_data = row_splits2.Data(), *row_ids2_data = row_ids2.Data(); // 0 <= s < num_symbols; note, `num_symbols` excludes the final-symbol (-1). // note: `src` means: w.r.t. the numbering in the original DenseFsaVec. K2_EVAL2( c, src.shape.NumElements(), num_symbols, lambda_set_arcs_etc, (int32_t src_state_idx01, int32_t s)->void { int32_t fsa_idx0 = src_row_ids1_data[src_state_idx01], src_state_idx0x = src_row_splits1_data[fsa_idx0], state_idx1 = src_state_idx01 - src_state_idx0x, src_next_state_idx0x = src_row_splits1_data[fsa_idx0 + 1], src_num_states1 = src_next_state_idx0x - src_state_idx0x, ans_state_idx01 = src_state_idx01 + fsa_idx0; // we add one final-state per FSA.. // "+ fsa_idx0" gives the // difference from old->new // numbering. // arc_idx0xx is the 1st arc-index of the FSA we are creating.. each // source state has `num_symbols` arcs leaving it except the last one of // each FSA, which has 1 arc leaving it (to the final-state). int32_t arc_idx0xx = (src_state_idx0x * num_symbols) - fsa_idx0 * (num_symbols - 1), arc_idx01x = arc_idx0xx + (state_idx1 * num_symbols), arc_idx012 = arc_idx01x + s; int32_t symbol_offset; if (state_idx1 + 1 == src_num_states1) { symbol_offset = -1; if (s > 0) return; // we just need the arc with -1. // if this is the state before the final state of this FSA. it has the // responsibility to write the row_splits2 value for the final state. // It's arc_idx012 + 1; the "+1" corresponds to the single arc with // the final-symbol on it. row_splits2_data[ans_state_idx01 + 1] = arc_idx012 + 1; } else { symbol_offset = 0; } // the "+ 1" is because index 0 in `scores` is for the final-symbol -1, // then 0, 1, etc. int32_t symbol_index_in_scores = s + symbol_offset + 1; arcs_data[arc_idx012] = Arc(state_idx1, state_idx1 + 1, s + symbol_offset, scores_acc(src_state_idx01, symbol_index_in_scores)); row_ids2_data[arc_idx012] = ans_state_idx01; if (s == 0) { // 1st arc for this state. row_splits2_data[ans_state_idx01] = arc_idx012; K2_CHECK(row_ids1_data[ans_state_idx01] == fsa_idx0); if (src_state_idx01 == 0) row_splits2_data[num_states] = num_arcs; } }); RaggedShape state2arc = RaggedShape2(&row_splits2, &row_ids2, num_arcs); return Ragged<Arc>(ComposeRaggedShapes(fsa2state, state2arc), arcs); } template <typename FloatType> Array1<FloatType> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs) { NVTX_RANGE(K2_FUNC); K2_STATIC_ASSERT((std::is_same<float, FloatType>::value || std::is_same<double, FloatType>::value)); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, entering_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(entering_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK_EQ(state_batches.TotSize(1), num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(entering_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(entering_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(entering_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(entering_arc_batches.NumElements(), num_arcs); const int32_t *fsas_row_ids1_data = fsas.RowIds(1).Data(), *fsas_row_splits1_data = fsas.RowSplits(1).Data(), *fsas_row_ids2_data = fsas.RowIds(2).Data(); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); // set the score of start state in each fsa to be 0 const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); K2_EVAL( c, num_fsas, lambda_set_start_state_score, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1[fsa_idx], start_state_next_fsa = fsa_row_splits1[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state] = 0; }); const Arc *arcs = fsas.values.Data(); int32_t *entering_arcs_data = nullptr; if (entering_arcs) { K2_CHECK_EQ(log_semiring, false) << " entering_arcs supplied"; *entering_arcs = Array1<int32_t>(c, num_states, -1); entering_arcs_data = entering_arcs->Data(); } RaggedAxis0Splitter<int32_t> arc_batches_splitter(entering_arc_batches); // process batch sequentially. for (int32_t i = 0; i < num_batches; ++i) { // entering_arc_batch is indexed [fsa][state_list][arc_list] int32_t arc_begin; Ragged<int32_t> entering_arc_batch = arc_batches_splitter.GetElement(i, &arc_begin); const int32_t *entering_arc_batch_data = entering_arc_batch.values.Data(); int32_t state_begin = arc_batches_splitter.GetOffset(i, 2), state_end = arc_batches_splitter.GetOffset(i + 1, 2), num_states_this_batch = state_end - state_begin, num_arcs_this_batch = entering_arc_batch.NumElements(); Array1<int32_t> states_batch = state_batches.values.Arange(state_begin, state_end); const int32_t *states_batch_data = states_batch.Data(); Ragged<FloatType> entering_arc_batch_scores(entering_arc_batch.shape); FloatType *entering_arc_batch_scores_data = entering_arc_batch_scores.values.Data(); // get entering arc scores K2_EVAL( c, num_arcs_this_batch, lambda_set_entering_arc_score, (int32_t idx012)->void { // `idx012` is into the batch. int32_t fsas_arc_idx012 = entering_arc_batch_data[idx012]; float curr_arc_score = arcs[fsas_arc_idx012].score; int32_t src_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012]; entering_arc_batch_scores_data[idx012] = state_scores_data[src_state_idx01] + curr_arc_score; }); Array1<FloatType> state_batch_scores(c, num_states_this_batch); FloatType *state_batch_scores_data = state_batch_scores.Data(); // get scores per state in this batch if (log_semiring) { LogSumPerSublist(entering_arc_batch_scores, negative_infinity, &state_batch_scores); } else { if (entering_arcs_data == nullptr) { MaxPerSublist(entering_arc_batch_scores, negative_infinity, &state_batch_scores); } else { // entering_arc_idxs will contain indexes into // `entering_arc_batch_scores`, equiv. to indexes into // `entering_arc_batch`. Array1<int32_t> entering_arc_idxs(c, num_states_this_batch); ArgMaxPerSublist(entering_arc_batch_scores, negative_infinity, &entering_arc_idxs); const int32_t *entering_arc_idxs_data = entering_arc_idxs.Data(), *entering_arc_batch_data = entering_arc_batch.values.Data(); // arc_idx01 below is an index into sub_scores, it is also an arc_idx123 // into entering_arc_batches. K2_EVAL( c, num_states_this_batch, lambda_set_entering_arcs_etc, (int32_t state_idx) { // state_idx is into state_batch_scores_data // and entering_arc_idxs. // arc_idx is into entering_arc_batch_data. int32_t arc_idx = entering_arc_idxs_data[state_idx]; FloatType score; int32_t fsas_arc_idx012; if (arc_idx == -1) { score = negative_infinity; fsas_arc_idx012 = -1; } else { fsas_arc_idx012 = entering_arc_batch_data[arc_idx]; score = entering_arc_batch_scores_data[arc_idx]; } // we'll later ignore this score if it was the start state. state_batch_scores_data[state_idx] = score; int32_t fsas_state_idx01 = states_batch_data[state_idx]; entering_arcs_data[fsas_state_idx01] = fsas_arc_idx012; }); } } // Copy those scores to the corresponding state in state_scores. // `state_idx` is an index into `states_batch_data.values`. K2_EVAL( c, num_states_this_batch, lambda_copy_state_scores, (int32_t state_idx) { int32_t fsas_state_idx01 = states_batch_data[state_idx]; FloatType score = state_batch_scores_data[state_idx]; // The if-statement below is to prevent it overriding the zero score // for the start-states. We only bother checking whether it's a start // state if the score is -infinity, to save memory bandwidth. (It // would always be -infinity for start states because they have no // entering arcs; these FSAs are acyclic. if (score != negative_infinity || fsas_state_idx01 != fsas_row_splits1_data[fsas_row_ids1_data[fsas_state_idx01]]) { state_scores_data[fsas_state_idx01] = score; } }); } return state_scores; } template <typename FloatType> void BackpropGetArcPost(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, const Array1<FloatType> &arc_post_deriv, Array1<FloatType> *forward_scores_deriv, Array1<FloatType> *backward_scores_deriv) { NVTX_RANGE(K2_FUNC); K2_STATIC_ASSERT((std::is_same<float, FloatType>::value || std::is_same<double, FloatType>::value)); K2_CHECK(forward_scores_deriv != nullptr && backward_scores_deriv != nullptr); ContextPtr c = GetContext(fsas, incoming_arcs, arc_post_deriv); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(incoming_arcs.NumAxes(), 3); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); K2_CHECK_EQ(arc_post_deriv.Dim(), num_arcs); K2_DCHECK_EQ(incoming_arcs.Dim0(), num_fsas); K2_DCHECK_EQ(incoming_arcs.TotSize(1), num_states); K2_DCHECK_EQ(incoming_arcs.TotSize(2), num_arcs); *forward_scores_deriv = Array1<FloatType>(c, num_states); *backward_scores_deriv = Array1<FloatType>(c, num_states); // compute forward_scores_deriv Ragged<FloatType> ragged_forward_scores_deriv(fsas.shape, arc_post_deriv); SumPerSublist<FloatType>(ragged_forward_scores_deriv, FloatType(0), forward_scores_deriv); // compute backward_scores_deriv Array1<FloatType> incoming_arc_post_deriv = arc_post_deriv[incoming_arcs.values]; Ragged<FloatType> ragged_backward_scores_deriv(incoming_arcs.shape, incoming_arc_post_deriv); SumPerSublist<FloatType>(ragged_backward_scores_deriv, FloatType(0), backward_scores_deriv); // set the forward_scores_deriv for the final state and backward_scores_deriv // for the start state. Ragged<FloatType> arc_post_deriv_per_fsa = ragged_forward_scores_deriv.RemoveAxis(1); Array1<FloatType> tot_arc_post_deriv(c, num_fsas); SumPerSublist<FloatType>(arc_post_deriv_per_fsa, FloatType(0), &tot_arc_post_deriv); FloatType *tot_arc_post_deriv_data = tot_arc_post_deriv.Data(), *forward_scores_deriv_data = forward_scores_deriv->Data(), *backward_scores_deriv_data = backward_scores_deriv->Data(); const int32_t *fsa_row_splits1_data = fsas.RowSplits(1).Data(); K2_EVAL( c, num_fsas, lambda_set_deriv_for_start_and_final_state, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1_data[fsa_idx], start_state_next_fsa = fsa_row_splits1_data[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) { FloatType deriv = FloatType(-0.5) * tot_arc_post_deriv_data[fsa_idx]; forward_scores_deriv_data[start_state_next_fsa - 1] = deriv; backward_scores_deriv_data[start_state] = deriv; } }); } template void BackpropGetArcPost(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, const Array1<float> &arc_post_deriv, Array1<float> *forward_scores_deriv, Array1<float> *backward_scores_deriv); template void BackpropGetArcPost(FsaVec &fsas, Ragged<int32_t> &incoming_arcs, const Array1<double> &arc_post_deriv, Array1<double> *forward_scores_deriv, Array1<double> *backward_scores_deriv); template <typename FloatType> Array1<FloatType> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring /*= true*/) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, state_batches)); K2_CHECK(IsCompatible(fsas, leaving_arc_batches)); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(leaving_arc_batches.NumAxes(), 4); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = state_batches.Dim0(); // just using DCHECK below to save time in production code K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(leaving_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(leaving_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(leaving_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(leaving_arc_batches.NumElements(), num_arcs); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> state_scores(c, num_states, negative_infinity); FloatType *state_scores_data = state_scores.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(), *fsas_row_ids1_data = fsas.RowIds(1).Data(), *fsas_row_ids2_data = fsas.RowIds(2).Data(); // set the score of final state in each fsa to be 0 K2_EVAL( c, num_fsas, lambda_set_final_state_score, (int32_t fsa_idx) { int32_t start_state = fsas_row_splits1_data[fsa_idx], start_state_next_fsa = fsas_row_splits1_data[fsa_idx + 1]; if (start_state_next_fsa - start_state > 0) state_scores_data[start_state_next_fsa - 1] = 0; }); RaggedAxis0Splitter<int32_t> arc_batches_splitter(leaving_arc_batches); const Arc *arcs = fsas.values.Data(); // process batch sequentially. for (int32_t i = num_batches - 1; i >= 0; --i) { int32_t arc_begin; Ragged<int32_t> this_arc_batch = arc_batches_splitter.GetElement(i, &arc_begin); int32_t state_begin = arc_batches_splitter.GetOffset(i, 2), state_end = arc_batches_splitter.GetOffset(i + 1, 2), num_states_this_batch = state_end - state_begin, num_arcs_this_batch = this_arc_batch.NumElements(); Ragged<FloatType> this_arc_batch_scores(this_arc_batch.shape); const int32_t *this_arc_batch_data = this_arc_batch.values.Data(); FloatType *this_arc_batch_scores_data = this_arc_batch_scores.values.Data(); // Get arc backward scores at the beginning of arcs in this batch K2_EVAL( c, num_arcs_this_batch, lambda_set_leaving_arc_score, (int32_t arc_idx) { int32_t fsa_arc_idx012 = this_arc_batch_data[arc_idx]; float curr_arc_score = arcs[fsa_arc_idx012].score; int32_t dest_state_idx1 = arcs[fsa_arc_idx012].dest_state, src_state_idx1 = arcs[fsa_arc_idx012].src_state, src_state_idx01 = fsas_row_ids2_data[fsa_arc_idx012], idx0x = src_state_idx01 - src_state_idx1, dest_state_idx01 = idx0x + dest_state_idx1; this_arc_batch_scores_data[arc_idx] = state_scores_data[dest_state_idx01] + curr_arc_score; }); Array1<FloatType> this_batch_state_scores(c, num_states_this_batch); // get scores per state in this batch if (log_semiring) { LogSumPerSublist(this_arc_batch_scores, negative_infinity, &this_batch_state_scores); } else { MaxPerSublist(this_arc_batch_scores, negative_infinity, &this_batch_state_scores); } Array1<int32_t> this_batch_state_ids = state_batches.values.Arange(state_begin, state_end); const int32_t *this_batch_state_ids_data = this_batch_state_ids.Data(); const FloatType *this_batch_state_scores_data = this_batch_state_scores.Data(); // copy those scores to the corresponding states in state_scores (they are // in a different order). K2_EVAL( c, num_states_this_batch, lambda_copy_state_scores, (int32_t state_idx) { int32_t fsas_state_idx01 = this_batch_state_ids_data[state_idx]; FloatType score = this_batch_state_scores_data[state_idx]; if (score != negative_infinity || fsas_state_idx01 + 1 != fsas_row_splits1_data[fsas_row_ids1_data[fsas_state_idx01] + 1]) { // The if-block is to ensure we don't overwrite the final-states' // backward-probs (0) with -infinity. We check the score first to // avoid unnecessary memory traffic. state_scores_data[fsas_state_idx01] = score; } }); } return state_scores; } template <typename FloatType> Array1<FloatType> BackpropGetBackwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, const Array1<FloatType> &backward_scores, const Array1<FloatType> &backward_scores_deriv_in) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(fsas, state_batches, entering_arc_batches, backward_scores, backward_scores_deriv_in); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(entering_arc_batches.NumAxes(), 4); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = entering_arc_batches.Dim0(); K2_DCHECK_EQ(state_batches.TotSize(1), num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(entering_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(entering_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(entering_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(entering_arc_batches.NumElements(), num_arcs); K2_DCHECK_EQ(backward_scores.Dim(), num_states); K2_DCHECK_EQ(backward_scores_deriv_in.Dim(), num_states); // We will be adding to the elements of `backward_scores_deriv`. // `backward_scores_deriv_in` was just the derivative w.r.t. the output of // GetBackwardScores(), but because GetBackwardScores() is recursive, // the derivatives for earlier states contribute to those of later ones. Array1<FloatType> backward_scores_deriv(backward_scores_deriv_in.Clone()); FloatType *backward_scores_deriv_data = backward_scores_deriv.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(), *fsas_row_ids1_data = fsas.RowIds(1).Data(), *fsas_row_ids2_data = fsas.RowIds(2).Data(); const FloatType *backward_scores_data = backward_scores.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> arc_scores_deriv(c, num_arcs); // will return this. FloatType *arc_scores_deriv_data = arc_scores_deriv.Data(); RaggedAxis0Splitter<int32_t> arc_batches_splitter(entering_arc_batches); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); if (log_semiring) { // For each batch of states, from start to end (opposite direction to // GetBackwardScores())... for (int32_t b = 0; b < num_batches; ++b) { int32_t arc_begin; Ragged<int32_t> entering_arc_batch = arc_batches_splitter.GetElement(b, &arc_begin); const int32_t *entering_arc_batch_data = entering_arc_batch.values.Data(); Ragged<FloatType> entering_arc_deriv(entering_arc_batch.shape); FloatType *entering_arc_deriv_data = entering_arc_deriv.values.Data(); K2_EVAL( c, entering_arc_batch.NumElements(), lambda_set_arc_deriv_etc, (int32_t arc_idx) { int32_t fsas_arc_idx012 = entering_arc_batch_data[arc_idx]; const Arc &arc = arcs[fsas_arc_idx012]; int32_t dest_state_idx1 = arc.dest_state, src_state_idx1 = arc.src_state, src_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012], state_idx0x = src_state_idx01 - src_state_idx1, dest_state_idx01 = state_idx0x + dest_state_idx1; FloatType dest_score = backward_scores_data[dest_state_idx01], arc_begin_score = dest_score + arc.score, src_score = backward_scores_data[src_state_idx01]; // so that arc_begin_score - src_score will never be nan if (src_score == negative_infinity) src_score = -negative_infinity; // alpha = d(src_score) / d(arc_begin_score) FloatType alpha = exp(arc_begin_score - src_score), arc_deriv = alpha * backward_scores_deriv_data[src_state_idx01]; K2_CHECK_LT(alpha, 1.1); arc_scores_deriv_data[fsas_arc_idx012] = arc_deriv; entering_arc_deriv_data[arc_idx] = arc_deriv; }); int32_t state_begin = arc_batches_splitter.GetOffset(b, 2), state_end = arc_batches_splitter.GetOffset(b + 1, 2), this_num_states = state_end - state_begin; // `state_score_derivs` is the extra part contributed to // `backward_scores_deriv` by the recursion, for the batch of states we're // currently processing. Array1<FloatType> state_score_derivs(c, this_num_states); SumPerSublist<FloatType>(entering_arc_deriv, 0, &state_score_derivs); const FloatType *state_score_derivs_data = state_score_derivs.Data(); const int32_t *state_ids_batch_data = state_batches.values.Data() + state_begin; K2_EVAL( c, this_num_states, lambda_modify_state_score_derivs, (int32_t state_idx) { int32_t fsas_state_idx01 = state_ids_batch_data[state_idx]; FloatType state_score_extra_deriv = state_score_derivs_data[state_idx]; backward_scores_deriv_data[fsas_state_idx01] += state_score_extra_deriv; }); } } else { // in a single kernel, figure out the contribution of each arc to its // source-state's backward prob by seeing which outgoing arc contributes the // max loglike; this uses the shape of the fsas. Note, it's arbitrary in // case of ties, we pick one. Ragged<FloatType> arc_begin_scores(fsas.shape); FloatType *arc_begin_scores_data = arc_begin_scores.values.Data(); K2_EVAL( c, num_arcs, lambda_set_arc_begin_scores, (int32_t arc_idx012) { const Arc &arc = arcs[arc_idx012]; int32_t dest_state_idx1 = arc.dest_state, src_state_idx1 = arc.src_state, src_state_idx01 = fsas_row_ids2_data[arc_idx012], state_idx0x = src_state_idx01 - src_state_idx1, dest_state_idx01 = state_idx0x + dest_state_idx1; FloatType dest_score = backward_scores_data[dest_state_idx01], arc_begin_score = dest_score + arc.score; arc_begin_scores_data[arc_idx012] = arc_begin_score; }); Array1<int32_t> best_leaving_arc_idx(c, num_states); ArgMaxPerSublist(arc_begin_scores, negative_infinity, &best_leaving_arc_idx); const int32_t *best_leaving_arc_idx_data = best_leaving_arc_idx.Data(); for (int32_t b = 0; b < num_batches; ++b) { int32_t arc_begin; Ragged<int32_t> entering_arc_batch = arc_batches_splitter.GetElement(b, &arc_begin); const int32_t *entering_arc_batch_data = entering_arc_batch.values.Data(); Ragged<FloatType> entering_arc_deriv(entering_arc_batch.shape); FloatType *entering_arc_deriv_data = entering_arc_deriv.values.Data(); K2_EVAL( c, entering_arc_batch.NumElements(), lambda_set_arc_deriv_etc, (int32_t arc_idx)->void { int32_t fsas_arc_idx012 = entering_arc_batch_data[arc_idx]; int32_t src_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012]; FloatType arc_deriv = FloatType(0); if (best_leaving_arc_idx_data[src_state_idx01] == fsas_arc_idx012) { arc_deriv = backward_scores_deriv_data[src_state_idx01]; } // otherwise arc_deriv is 0.0, the arc's score has no effect arc_scores_deriv_data[fsas_arc_idx012] = arc_deriv; entering_arc_deriv_data[arc_idx] = arc_deriv; }); int32_t state_begin = arc_batches_splitter.GetOffset(b, 2), state_end = arc_batches_splitter.GetOffset(b + 1, 2), this_num_states = state_end - state_begin; // `state_score_derivs` is the extra part contributed to // `backward_scores_deriv` by the recursion, for the batch of states we're // currently processing. Array1<FloatType> state_score_derivs(c, this_num_states); SumPerSublist<FloatType>(entering_arc_deriv, 0, &state_score_derivs); const FloatType *state_score_derivs_data = state_score_derivs.Data(); const int32_t *state_ids_batch_data = state_batches.values.Data() + state_begin; K2_EVAL( c, this_num_states, lambda_modify_state_score_derivs, (int32_t state_idx)->void { int32_t fsas_state_idx01 = state_ids_batch_data[state_idx]; FloatType state_score_extra_deriv = state_score_derivs_data[state_idx]; backward_scores_deriv_data[fsas_state_idx01] += state_score_extra_deriv; }); } } return arc_scores_deriv; } template Array1<float> BackpropGetBackwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, const Array1<float> &backward_scores, const Array1<float> &backward_scores_deriv_in); template Array1<double> BackpropGetBackwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, const Array1<double> &backward_scores, const Array1<double> &backward_scores_deriv_in); template <typename FloatType> Array1<FloatType> BackpropGetForwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring, const Array1<int32_t> *entering_arcs, const Array1<FloatType> &forward_scores, const Array1<FloatType> &forward_scores_deriv_in) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetContext(fsas, state_batches, leaving_arc_batches, forward_scores, forward_scores_deriv_in); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(state_batches.NumAxes(), 3); K2_CHECK_EQ(leaving_arc_batches.NumAxes(), 4); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); int32_t num_batches = leaving_arc_batches.Dim0(); K2_DCHECK_EQ(state_batches.TotSize(1), num_fsas * num_batches); K2_DCHECK_EQ(state_batches.NumElements(), num_states); K2_DCHECK_EQ(leaving_arc_batches.Dim0(), num_batches); K2_DCHECK_EQ(leaving_arc_batches.TotSize(1), state_batches.TotSize(1)); K2_DCHECK_EQ(leaving_arc_batches.TotSize(2), num_states); K2_DCHECK_EQ(leaving_arc_batches.NumElements(), num_arcs); K2_DCHECK_EQ(forward_scores.Dim(), num_states); K2_DCHECK_EQ(forward_scores_deriv_in.Dim(), num_states); // We will be adding to the elements of `forward_scores_deriv`. // `forward_scores_deriv_in` was just the derivative w.r.t. the output of // GetForwardScores(), but because GetForwardScores() is recursive, // the derivatives for later states contribute to those of earlier ones. Array1<FloatType> forward_scores_deriv(forward_scores_deriv_in.Clone()); FloatType *forward_scores_deriv_data = forward_scores_deriv.Data(); const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data(), *fsas_row_ids1_data = fsas.RowIds(1).Data(), *fsas_row_ids2_data = fsas.RowIds(2).Data(); const FloatType *forward_scores_data = forward_scores.Data(); const Arc *arcs = fsas.values.Data(); Array1<FloatType> arc_scores_deriv(c, num_arcs); // will return this. FloatType *arc_scores_deriv_data = arc_scores_deriv.Data(); RaggedAxis0Splitter<int32_t> arc_batches_splitter(leaving_arc_batches); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); if (log_semiring) { // For each batch of states, from end to start (opposite direction to // GetForwardScores())... for (int32_t b = num_batches - 1; b >= 0; --b) { int32_t arc_begin; Ragged<int32_t> leaving_arc_batch = arc_batches_splitter.GetElement(b, &arc_begin); int32_t *leaving_arc_batch_data = leaving_arc_batch.values.Data(); Ragged<FloatType> leaving_arc_deriv(leaving_arc_batch.shape); FloatType *leaving_arc_deriv_data = leaving_arc_deriv.values.Data(); K2_EVAL( c, leaving_arc_batch.NumElements(), lambda_set_arc_deriv_etc, (int32_t arc_idx) { int32_t fsas_arc_idx012 = leaving_arc_batch_data[arc_idx]; const Arc &arc = arcs[fsas_arc_idx012]; int32_t dest_state_idx1 = arc.dest_state, src_state_idx1 = arc.src_state, src_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012], state_idx0x = src_state_idx01 - src_state_idx1, dest_state_idx01 = state_idx0x + dest_state_idx1; FloatType src_score = forward_scores_data[src_state_idx01], arc_end_score = src_score + arc.score, dest_score = forward_scores_data[dest_state_idx01]; // so that arc_end_score - dest_score will never be nan if (dest_score == negative_infinity) dest_score = -negative_infinity; // alpha = d(dest_score) / d(arc_end_score) FloatType alpha = exp(arc_end_score - dest_score), arc_deriv = alpha * forward_scores_deriv_data[dest_state_idx01]; K2_CHECK_LT(alpha, 1.1); arc_scores_deriv_data[fsas_arc_idx012] = arc_deriv; leaving_arc_deriv_data[arc_idx] = arc_deriv; }); int32_t state_begin = arc_batches_splitter.GetOffset(b, 2), state_end = arc_batches_splitter.GetOffset(b + 1, 2), this_num_states = state_end - state_begin; // `state_score_derivs` is the extra part contributed to // `forward_scores_deriv` by the recursion, for the batch of states we're // currently processing. Array1<FloatType> state_score_derivs(c, this_num_states); SumPerSublist<FloatType>(leaving_arc_deriv, 0, &state_score_derivs); const FloatType *state_score_derivs_data = state_score_derivs.Data(); const int32_t *state_ids_batch_data = state_batches.values.Data() + state_begin; K2_EVAL( c, this_num_states, lambda_modify_state_score_derivs, (int32_t state_idx) { int32_t fsas_state_idx01 = state_ids_batch_data[state_idx]; FloatType state_score_extra_deriv = state_score_derivs_data[state_idx]; forward_scores_deriv_data[fsas_state_idx01] += state_score_extra_deriv; }); } } else { K2_CHECK_NE(entering_arcs, nullptr); K2_CHECK_EQ(entering_arcs->Dim(), num_states); K2_CHECK(entering_arcs->Context()->IsCompatible(*c)); const int32_t *entering_arcs_data = entering_arcs->Data(); for (int32_t b = num_batches - 1; b >= 0; --b) { int32_t arc_begin; Ragged<int32_t> leaving_arc_batch = arc_batches_splitter.GetElement(b, &arc_begin); const int32_t *leaving_arc_batch_data = leaving_arc_batch.values.Data(); Ragged<FloatType> leaving_arc_deriv(leaving_arc_batch.shape); FloatType *leaving_arc_deriv_data = leaving_arc_deriv.values.Data(); K2_EVAL( c, leaving_arc_batch.NumElements(), lambda_set_arc_deriv_etc, (int32_t arc_idx)->void { int32_t fsas_arc_idx012 = leaving_arc_batch_data[arc_idx]; const Arc &arc = arcs[fsas_arc_idx012]; int32_t dest_state_idx1 = arc.dest_state, src_state_idx1 = arc.src_state, src_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012], state_idx0x = src_state_idx01 - src_state_idx1, dest_state_idx01 = state_idx0x + dest_state_idx1; FloatType arc_deriv = FloatType(0); if (entering_arcs_data[dest_state_idx01] == fsas_arc_idx012) { arc_deriv = forward_scores_deriv_data[dest_state_idx01]; } // otherwise arc_deriv is 0.0, the arc's score has no effect arc_scores_deriv_data[fsas_arc_idx012] = arc_deriv; leaving_arc_deriv_data[arc_idx] = arc_deriv; }); int32_t state_begin = arc_batches_splitter.GetOffset(b, 2), state_end = arc_batches_splitter.GetOffset(b + 1, 2), this_num_states = state_end - state_begin; // `state_score_derivs` is the extra part contributed to // `forward_scores_deriv` by the recursion, for the batch of states we're // currently processing. Array1<FloatType> state_score_derivs(c, this_num_states); SumPerSublist<FloatType>(leaving_arc_deriv, 0, &state_score_derivs); const FloatType *state_score_derivs_data = state_score_derivs.Data(); const int32_t *state_ids_batch_data = state_batches.values.Data() + state_begin; K2_EVAL( c, this_num_states, lambda_modify_state_score_derivs, (int32_t state_idx)->void { int32_t fsas_state_idx01 = state_ids_batch_data[state_idx]; FloatType state_score_extra_deriv = state_score_derivs_data[state_idx]; forward_scores_deriv_data[fsas_state_idx01] += state_score_extra_deriv; }); } } return arc_scores_deriv; } template Array1<float> BackpropGetForwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring, const Array1<int32_t> *entering_arcs, const Array1<float> &forward_scores, const Array1<float> &forward_scores_deriv_in); template Array1<double> BackpropGetForwardScores( FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring, const Array1<int32_t> *entering_arcs, const Array1<double> &forward_scores, const Array1<double> &forward_scores_deriv_in); template <typename FloatType> Array1<FloatType> GetTotScores(FsaVec &fsas, const Array1<FloatType> &forward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1); K2_CHECK_EQ(num_states, forward_scores.Dim()); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); Array1<FloatType> tot_scores(c, num_fsas, negative_infinity); FloatType *tot_scores_data = tot_scores.Data(); const int32_t *fsa_row_splits1_data = fsas.RowSplits(1).Data(); const FloatType *forward_scores_data = forward_scores.Data(); K2_EVAL( c, num_fsas, lambda_copy_tot_scores, (int32_t fsa_idx) { int32_t start_state = fsa_row_splits1_data[fsa_idx], start_state_next_fsa = fsa_row_splits1_data[fsa_idx + 1]; if (start_state_next_fsa > start_state) { // non-empty fsa int32_t final_state_idx = start_state_next_fsa - 1; tot_scores_data[fsa_idx] = forward_scores_data[final_state_idx]; } }); return tot_scores; } template <typename FloatType> Array1<FloatType> GetArcPost(FsaVec &fsas, const Array1<FloatType> &forward_scores, const Array1<FloatType> &backward_scores) { NVTX_RANGE(K2_FUNC); K2_CHECK(IsCompatible(fsas, forward_scores)); K2_CHECK(IsCompatible(fsas, backward_scores)); K2_CHECK_EQ(fsas.NumAxes(), 3); ContextPtr &c = fsas.Context(); int32_t num_fsas = fsas.Dim0(), num_states = fsas.TotSize(1), num_arcs = fsas.TotSize(2); K2_CHECK_EQ(num_states, forward_scores.Dim()); K2_CHECK_EQ(num_states, backward_scores.Dim()); Array1<FloatType> arc_scores(c, num_arcs), fsa_neg_tot_scores(c, num_fsas); // minus the tot scores per FSA. FloatType *arc_scores_data = arc_scores.Data(), *fsa_neg_tot_scores_data = fsa_neg_tot_scores.Data(); const int32_t *fsa_row_splits1 = fsas.RowSplits(1).Data(); const int32_t *fsa_row_ids1 = fsas.RowIds(1).Data(); const int32_t *fsa_row_ids2 = fsas.RowIds(2).Data(); const Arc *arcs = fsas.values.Data(); const FloatType *forward_scores_data = forward_scores.Data(); const FloatType *backward_scores_data = backward_scores.Data(); const FloatType negative_infinity = -std::numeric_limits<FloatType>::infinity(); K2_EVAL( c, num_fsas, lambda_set_fsa_scores, (int32_t fsa_idx0)->void { int32_t begin = fsa_row_splits1[fsa_idx0], end = fsa_row_splits1[fsa_idx0 + 1]; FloatType tot_score = FloatType(0); if (begin != end) { tot_score = FloatType(0.5) * (forward_scores_data[end - 1] + backward_scores_data[begin]); } // We never set the score of a state to positive_infinity, otherwise // we may get NaN when add it with negative_infinity below. But this // usually would not happen for a connected FSA. fsa_neg_tot_scores_data[fsa_idx0] = tot_score != negative_infinity ? -tot_score : negative_infinity; }); K2_EVAL( c, num_arcs, lambda_get_arc_scores, (int32_t arc_idx012) { int32_t src_state_idx1 = arcs[arc_idx012].src_state; int32_t dest_state_idx1 = arcs[arc_idx012].dest_state; float arc_score = arcs[arc_idx012].score; int32_t idx01 = fsa_row_ids2[arc_idx012]; int32_t idx0 = fsa_row_ids1[idx01]; int32_t idx0x = fsa_row_splits1[idx0]; int32_t src_state_idx01 = idx0x + src_state_idx1; int32_t dest_state_idx01 = idx0x + dest_state_idx1; arc_scores_data[arc_idx012] = arc_score + forward_scores_data[src_state_idx01] + backward_scores_data[dest_state_idx01] + fsa_neg_tot_scores_data[idx0]; }); return arc_scores; } // explicit instantiation for those score computation functions above template Array1<float> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<double> GetForwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &entering_arc_batches, bool log_semiring, Array1<int32_t> *entering_arcs); template Array1<float> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring); template Array1<double> GetBackwardScores(FsaVec &fsas, Ragged<int32_t> &state_batches, Ragged<int32_t> &leaving_arc_batches, bool log_semiring); template Array1<float> GetArcPost(FsaVec &fsas, const Array1<float> &forward_scores, const Array1<float> &backward_scores); template Array1<double> GetArcPost(FsaVec &fsas, const Array1<double> &forward_scores, const Array1<double> &backward_scores); template Array1<float> GetTotScores(FsaVec &fsas, const Array1<float> &forward_scores); template Array1<double> GetTotScores(FsaVec &fsas, const Array1<double> &forward_scores); Fsa RandomFsa(bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); K2_CHECK_GE(min_num_arcs, 0); K2_CHECK_GE(max_num_arcs, min_num_arcs); K2_CHECK_GE(max_symbol, 0); RaggedShape shape = RandomRaggedShape(false, 2, 2, min_num_arcs, max_num_arcs); int32_t dim0 = shape.Dim0(); // empty Fsa if (dim0 == 0) return Fsa(shape, Array1<Arc>(c, std::vector<Arc>{})); // as there should be no arcs leaving the final_state, we always push back an // empty row here. Array1<int32_t> ans_row_splits1(c, dim0 + 2); Array1<int32_t> sub_range = ans_row_splits1.Range(0, dim0 + 1); sub_range.CopyFrom(shape.RowSplits(1)); int32_t *ans_row_splits1_data = ans_row_splits1.Data(); ans_row_splits1_data[dim0 + 1] = ans_row_splits1_data[dim0]; // create returned shape RaggedShapeLayer ans_shape_dim; ans_shape_dim.row_splits = ans_row_splits1; ans_shape_dim.cached_tot_size = shape.TotSize(1); RaggedShape ans_shape(std::vector<RaggedShapeLayer>{ans_shape_dim}, true); ans_shape.Populate(); // will be used to generate scores on arcs. std::random_device rd; std::mt19937 gen(rd()); // TODO(haowen): let the users set the range of scores? it's fine to use it // for now as we just use it to test. std::uniform_real_distribution<float> dis_score(0, 10); // create arcs int32_t *row_ids1 = ans_shape.RowIds(1).Data(); int32_t num_states = ans_shape.Dim0(), num_arcs = ans_shape.TotSize(1); int32_t start_state = 0, final_state = num_states - 1; std::vector<Arc> arcs(num_arcs); for (int32_t i = 0; i != num_arcs; ++i) { int32_t curr_state = row_ids1[i]; int32_t dest_state = acyclic ? RandInt(curr_state + 1, final_state) : RandInt(start_state, final_state); int32_t symbol = dest_state == final_state ? -1 : RandInt(0, max_symbol); float score = dis_score(gen); arcs[i] = Arc(curr_state, dest_state, symbol, score); } return Fsa(ans_shape, Array1<Arc>(c, arcs)); } FsaVec RandomFsaVec(int32_t min_num_fsas /*=1*/, int32_t max_num_fsas /*=1000*/, bool acyclic /*=true*/, int32_t max_symbol /*=50*/, int32_t min_num_arcs /*=0*/, int32_t max_num_arcs /*=1000*/) { NVTX_RANGE(K2_FUNC); K2_CHECK_GE(min_num_fsas, 0); K2_CHECK_GE(max_num_fsas, min_num_fsas); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); std::vector<Fsa> fsas(num_fsas); for (int32_t i = 0; i != num_fsas; ++i) { fsas[i] = RandomFsa(acyclic, max_symbol, min_num_arcs, max_num_arcs); } return Stack(0, num_fsas, fsas.data()); } DenseFsaVec RandomDenseFsaVec(int32_t min_num_fsas, int32_t max_num_fsas, int32_t min_frames, int32_t max_frames, int32_t min_symbols, int32_t max_symbols, float scores_scale) { NVTX_RANGE(K2_FUNC); ContextPtr c = GetCpuContext(); int32_t num_fsas = RandInt(min_num_fsas, max_num_fsas); // num_symbols includes epsilon but not final-symbol -1. int32_t num_symbols = RandInt(min_symbols, max_symbols); // `num_frames` includes the extra 1 frame for the final-symbol. std::vector<int32_t> num_frames(num_fsas + 1); int32_t tot_frames = 0; for (int32_t i = 0; i < num_fsas; ++i) { num_frames[i] = RandInt(min_frames, max_frames) + 1; tot_frames += num_frames[i]; } Array2<float> scores(c, tot_frames, num_symbols + 1); auto scores_acc = scores.Accessor(); std::vector<int32_t> row_splits_vec(num_fsas + 1); row_splits_vec[0] = 0; int32_t cur_start_frame = 0; RandIntGenerator gen; for (int32_t i = 0; i < num_fsas; ++i) { int32_t this_num_frames = num_frames[i], end_frame = cur_start_frame + this_num_frames; for (int32_t f = cur_start_frame; f + 1 < end_frame; f++) { scores_acc(f, 0) = -std::numeric_limits<float>::infinity(); for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = scores_scale * gen(-50, 50) * 0.01; } // on the last frame the placement of infinity vs. finite is reversed: // -1 gets finite value, others get infinity. int32_t f = end_frame - 1; scores_acc(f, 0) = scores_scale * gen(-50, 50) * 0.01; for (int32_t j = 0; j < num_symbols; j++) scores_acc(f, j + 1) = -std::numeric_limits<float>::infinity(); row_splits_vec[i + 1] = cur_start_frame = end_frame; } Array1<int32_t> row_splits(c, row_splits_vec); return DenseFsaVec(RaggedShape2(&row_splits, nullptr, tot_frames), scores); } Ragged<int32_t> GetStartStates(FsaVec &src) { NVTX_RANGE(K2_FUNC); ContextPtr &c = src.Context(); K2_CHECK_EQ(src.NumAxes(), 3); int32_t num_fsas = src.Dim0(); const int32_t *src_row_splits1_data = src.RowSplits(1).Data(); Array1<int32_t> ans_row_splits(c, num_fsas + 1); // will first set the elements of ans_row_splits to the number of states kept // from this FSA (either 0 or 1). int32_t *num_states_data = ans_row_splits.Data(); K2_EVAL( c, num_fsas, lambda_set_num_states, (int32_t fsa_idx0)->void { // 1 if the FSA is not empty, 0 if empty. num_states_data[fsa_idx0] = (src_row_splits1_data[fsa_idx0 + 1] > src_row_splits1_data[fsa_idx0]); }); ExclusiveSum(ans_row_splits, &ans_row_splits); int32_t ans_dim = ans_row_splits.Back(); Ragged<int32_t> ans(RaggedShape2(&ans_row_splits, nullptr, ans_dim), Array1<int32_t>(c, ans_dim)); const int32_t *ans_row_ids1_data = ans.shape.RowIds(1).Data(); int32_t *ans_values_data = ans.values.Data(); K2_EVAL( c, ans_dim, lambda_set_ans_values, (int32_t ans_idx01)->void { int32_t idx0 = ans_row_ids1_data[ans_idx01]; int32_t src_start_state_idx01 = src_row_splits1_data[idx0]; K2_DCHECK_GT(src_row_splits1_data[idx0 + 1], src_row_splits1_data[idx0]); ans_values_data[ans_idx01] = src_start_state_idx01; }); return ans; } FsaVec FsaVecFromArcIndexes(FsaVec &fsas, Ragged<int32_t> &best_arc_indexes) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(fsas.NumAxes(), 3); K2_CHECK_EQ(best_arc_indexes.NumAxes(), 2); K2_CHECK(IsCompatible(fsas, best_arc_indexes)); K2_CHECK_EQ(fsas.Dim0(), best_arc_indexes.Dim0()); // if there are n arcs (for n > 0), there are n + 1 states; if there are 0 // arcs, there are 0 states (that FSA will have no arcs or states). RaggedShape states_shape = ChangeSublistSizePinned(best_arc_indexes.shape, 1); const int32_t *states_shape_row_splits1_data = states_shape.RowSplits(1).Data(); int32_t num_fsas = fsas.Dim0(); int32_t num_states = states_shape.NumElements(); int32_t num_arcs = best_arc_indexes.shape.NumElements(); ContextPtr &context = fsas.Context(); if (num_arcs == 0) { RaggedShape shape_a = RegularRaggedShape(context, num_fsas, 0), shape_b = RegularRaggedShape(context, 0, 0); return FsaVec(ComposeRaggedShapes(shape_a, shape_b), Array1<Arc>(context, 0)); } Array1<int32_t> row_splits2(context, num_states + 1); Array1<int32_t> row_ids2(context, num_arcs); int32_t *row_splits2_data = row_splits2.Data(); int32_t *row_ids2_data = row_ids2.Data(); Array1<Arc> arcs(context, num_arcs); Arc *arcs_data = arcs.Data(); const int32_t *best_arc_indexes_row_splits1_data = best_arc_indexes.RowSplits(1).Data(); const int32_t *best_arc_indexes_row_ids1_data = best_arc_indexes.RowIds(1).Data(); const int32_t *best_arc_indexes_data = best_arc_indexes.values.Data(); const Arc *fsas_values_data = fsas.values.Data(); K2_EVAL( context, num_arcs, lambda_set_arcs, (int32_t best_arc_idx01) { int32_t fsas_idx0 = best_arc_indexes_row_ids1_data[best_arc_idx01]; int32_t best_arc_idx0x = best_arc_indexes_row_splits1_data[fsas_idx0]; int32_t best_arc_idx0x_next = best_arc_indexes_row_splits1_data[fsas_idx0 + 1]; int32_t num_best_arcs = best_arc_idx0x_next - best_arc_idx0x; int32_t best_arc_idx1 = best_arc_idx01 - best_arc_idx0x; int32_t state_offset = states_shape_row_splits1_data[fsas_idx0]; const Arc &arc = fsas_values_data[best_arc_indexes_data[best_arc_idx01]]; int32_t src_state = best_arc_idx1; int32_t dest_state = src_state + 1; int32_t label = arc.label; float score = arc.score; arcs_data[best_arc_idx01] = Arc(src_state, dest_state, label, score); int32_t state_idx01 = state_offset + src_state; row_ids2_data[best_arc_idx01] = state_idx01; row_splits2_data[state_idx01 + 1] = best_arc_idx01 + 1; if (best_arc_idx01 == 0) row_splits2_data[0] = 0; if (best_arc_idx1 + 1 == num_best_arcs) row_splits2_data[state_idx01 + 2] = best_arc_idx01 + 1; }); RaggedShape shape = RaggedShape3(&states_shape.RowSplits(1), &states_shape.RowIds(1), num_states, &row_splits2, &row_ids2, num_arcs); Ragged<Arc> ans(shape, arcs); return ans; } FsaVec GetIncomingFsaVec(FsaVec &fsas) { Array1<int32_t> dest_states = GetDestStates(fsas, true); Ragged<int32_t> arc_indexes = GetIncomingArcs(fsas, dest_states); return FsaVec(arc_indexes.shape, fsas.values[arc_indexes.values]); } Ragged<int32_t> ComposeArcMaps(Ragged<int32_t> &step1_arc_map, Ragged<int32_t> &step2_arc_map) { NVTX_RANGE(K2_FUNC); K2_CHECK_EQ(step1_arc_map.NumAxes(), 2); K2_CHECK_EQ(step2_arc_map.NumAxes(), 2); return Index(step1_arc_map, step2_arc_map, true); } void FixNumStates(FsaVec *fsas) { K2_CHECK_EQ(fsas->NumAxes(), 3); ContextPtr c = fsas->Context(); int32_t num_fsas = fsas->Dim0(), num_states = fsas->TotSize(1); Array1<int32_t> changed(c, 1, 0); Renumbering renumber_states(c, num_states); renumber_states.Keep() = static_cast<char>(1); // by default keep all states. int32_t *changed_data = changed.Data(); char *keep_data = renumber_states.Keep().Data(); const int32_t *row_splits1_data = fsas->RowSplits(1).Data(); K2_EVAL( c, num_fsas, lambda_set_must_remove, (int32_t i)->void { int32_t num_states = (row_splits1_data[i + 1] - row_splits1_data[i]); if (num_states == 1) keep_data[row_splits1_data[i]] = 0; changed_data[0] = 1; }); if (changed[0] == 0) return; // an optimization.. fsas->shape = RemoveSomeEmptyLists(fsas->shape, 1, renumber_states); } } // namespace k2
13afbd651ba46bbb76b86bde1241b26fdf720206.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <cstdio> #include <cmath> #include <chrono> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <hip/hip_runtime.h> #include <strings.h> #include "common.h" #define MS 5; using namespace std; using namespace cv; __global__ void Blur_Kernel(unsigned char* input, unsigned char* output, int width, int height, int colorWidthStep, int ref){ // 2D Index of current thread const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if ((xIndex < width) && (yIndex < height)) { int blue = 0; int green = 0; int red = 0; int contador=0; const int color_tid = (yIndex) * colorWidthStep + (3 * xIndex); for(int i = -ref ; i <= ref ; i++){ for (int j = -ref; j<=ref ; j++){ //Location of colored pixel in input const int color_tid = (yIndex+i) * colorWidthStep + (3 * (xIndex+j)); if(xIndex+j>0 && yIndex+i>0 && xIndex+j<width && yIndex+i<height ){ contador++; blue += input[color_tid]; green += input[color_tid+1]; red += input[color_tid+2]; } } } output[color_tid] = static_cast<unsigned char>(blue/contador); output[color_tid+1] = static_cast<unsigned char>(green/contador); output[color_tid+2] = static_cast<unsigned char>(red/contador); } } void Blur(string file){ int ms = MS; int ref = floor(ms/2); // Set up device int dev = 0; hipDeviceProp_t deviceProp; SAFE_CALL(hipGetDeviceProperties(&deviceProp, dev), "Error device prop"); printf("Using Device %d: %s\n", dev, deviceProp.name); SAFE_CALL(hipSetDevice(dev), "Error setting device"); Mat input = cv::imread(file, CV_LOAD_IMAGE_COLOR); cout << "Input image step: " << input.step << " cols: " << input.cols << " rows: " << input.rows << endl; //Create output image Mat output(input.rows, input.cols, CV_8UC3); // Calculate total number of bytes of input and output image // Step = cols * number of colors size_t colorBytes = input.step * input.rows; size_t grayBytes = output.step * output.rows; unsigned char *d_input, *d_output; // Allocate device memory SAFE_CALL(hipMalloc<unsigned char>(&d_input, colorBytes), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc<unsigned char>(&d_output, grayBytes), "CUDA Malloc Failed"); // Copy data from OpenCV input image to device memory SAFE_CALL(hipMemcpy(d_input, input.ptr(), colorBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(hipMemcpy(d_output, output.ptr(), colorBytes, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); // Specify a reasonable block size const dim3 block(16, 16); // Calculate grid size to cover the whole image const dim3 grid((int)ceil((float)input.cols / block.x), (int)ceil((float)input.rows/ block.y)); printf("Blur_Kernel<<<(%d, %d) , (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y); // Launch the color conversion kernel hipLaunchKernelGGL(( Blur_Kernel) , dim3(grid), dim3(block) , 0, 0, d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), ref); // Synchronize to check for any kernel launch errors SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); // Copy back data from destination device meory to OpenCV output image SAFE_CALL(hipMemcpy(output.ptr(), d_output, grayBytes, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); // Free the device memory SAFE_CALL(hipFree(d_input), "CUDA Free Failed"); SAFE_CALL(hipFree(d_output), "CUDA Free Failed"); //Allow the windows to resize namedWindow("Input", cv::WINDOW_NORMAL); namedWindow("Output", cv::WINDOW_NORMAL); //Show the input and output imshow("Input", input); imshow("Output", output); } int main(int argc, char *argv[]){ if (argc < 2){ cout << "No hay argumentos suficientes" << endl; }else{ auto startTime = chrono::high_resolution_clock::now(); Blur(argv[1]); auto endTime = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = endTime - startTime; printf("Blur elapsed %f ms\n", duration_ms.count()); waitKey(0); } }
13afbd651ba46bbb76b86bde1241b26fdf720206.cu
#include <iostream> #include <cstdio> #include <cmath> #include <chrono> #include <opencv2/core/core.hpp> #include <opencv2/highgui/highgui.hpp> #include <cuda_runtime.h> #include <strings.h> #include "common.h" #define MS 5; using namespace std; using namespace cv; __global__ void Blur_Kernel(unsigned char* input, unsigned char* output, int width, int height, int colorWidthStep, int ref){ // 2D Index of current thread const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; const int yIndex = blockIdx.y * blockDim.y + threadIdx.y; if ((xIndex < width) && (yIndex < height)) { int blue = 0; int green = 0; int red = 0; int contador=0; const int color_tid = (yIndex) * colorWidthStep + (3 * xIndex); for(int i = -ref ; i <= ref ; i++){ for (int j = -ref; j<=ref ; j++){ //Location of colored pixel in input const int color_tid = (yIndex+i) * colorWidthStep + (3 * (xIndex+j)); if(xIndex+j>0 && yIndex+i>0 && xIndex+j<width && yIndex+i<height ){ contador++; blue += input[color_tid]; green += input[color_tid+1]; red += input[color_tid+2]; } } } output[color_tid] = static_cast<unsigned char>(blue/contador); output[color_tid+1] = static_cast<unsigned char>(green/contador); output[color_tid+2] = static_cast<unsigned char>(red/contador); } } void Blur(string file){ int ms = MS; int ref = floor(ms/2); // Set up device int dev = 0; cudaDeviceProp deviceProp; SAFE_CALL(cudaGetDeviceProperties(&deviceProp, dev), "Error device prop"); printf("Using Device %d: %s\n", dev, deviceProp.name); SAFE_CALL(cudaSetDevice(dev), "Error setting device"); Mat input = cv::imread(file, CV_LOAD_IMAGE_COLOR); cout << "Input image step: " << input.step << " cols: " << input.cols << " rows: " << input.rows << endl; //Create output image Mat output(input.rows, input.cols, CV_8UC3); // Calculate total number of bytes of input and output image // Step = cols * number of colors size_t colorBytes = input.step * input.rows; size_t grayBytes = output.step * output.rows; unsigned char *d_input, *d_output; // Allocate device memory SAFE_CALL(cudaMalloc<unsigned char>(&d_input, colorBytes), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc<unsigned char>(&d_output, grayBytes), "CUDA Malloc Failed"); // Copy data from OpenCV input image to device memory SAFE_CALL(cudaMemcpy(d_input, input.ptr(), colorBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(cudaMemcpy(d_output, output.ptr(), colorBytes, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); // Specify a reasonable block size const dim3 block(16, 16); // Calculate grid size to cover the whole image const dim3 grid((int)ceil((float)input.cols / block.x), (int)ceil((float)input.rows/ block.y)); printf("Blur_Kernel<<<(%d, %d) , (%d, %d)>>>\n", grid.x, grid.y, block.x, block.y); // Launch the color conversion kernel Blur_Kernel <<<grid, block >>>(d_input, d_output, input.cols, input.rows, static_cast<int>(input.step), ref); // Synchronize to check for any kernel launch errors SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); // Copy back data from destination device meory to OpenCV output image SAFE_CALL(cudaMemcpy(output.ptr(), d_output, grayBytes, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); // Free the device memory SAFE_CALL(cudaFree(d_input), "CUDA Free Failed"); SAFE_CALL(cudaFree(d_output), "CUDA Free Failed"); //Allow the windows to resize namedWindow("Input", cv::WINDOW_NORMAL); namedWindow("Output", cv::WINDOW_NORMAL); //Show the input and output imshow("Input", input); imshow("Output", output); } int main(int argc, char *argv[]){ if (argc < 2){ cout << "No hay argumentos suficientes" << endl; }else{ auto startTime = chrono::high_resolution_clock::now(); Blur(argv[1]); auto endTime = chrono::high_resolution_clock::now(); chrono::duration<float, std::milli> duration_ms = endTime - startTime; printf("Blur elapsed %f ms\n", duration_ms.count()); waitKey(0); } }
89d1b4d6452851054d93bb848c2cba4906591c65.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <vector> #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include "dali/tensor/Mat.h" #include "dali/math/TensorInternal.h" #include "dali/math/KernelizedSoftmax.h" #include "dali/utils/core_utils.h" #include "dali/math/memory_bank/MemoryBank.h" using std::vector; typedef float R; template<int x_bits, typename R, typename DstPlan, typename SrcPlan> __global__ void SoftmaxKernel(DstPlan dst, SrcPlan src, mshadow::index_t num_cols, R temperature) { const unsigned buffer_size = 1 << x_bits; const int row = blockIdx.x; const int thread_idx = threadIdx.x; __shared__ R buffer[buffer_size]; // step 1: get max if (thread_idx < num_cols) { buffer[thread_idx] = src.Eval(row, thread_idx); } for (unsigned x = buffer_size; x < num_cols; x += buffer_size) { const int col = x + thread_idx; if (col < num_cols) { R a = src.Eval(row, col); buffer[thread_idx] = max(a, buffer[thread_idx]); } } __syncthreads(); // if number of rows is smaller than buffer, // fill buffer with copy of buffer[0] - this // makes sure reduction does not use uninitialized // values in the buffer and returns correct max. if (thread_idx >= num_cols) { buffer[thread_idx] = buffer[0]; } __syncthreads(); mshadow::cuda::ReduceX<mshadow::red::maximum, x_bits>(buffer, thread_idx); __syncthreads(); // every thread memorizes max value in column, // so that we can reuse the buffer, for next // task R max_in_row = buffer[0]; __syncthreads(); // clear buffer (so that sum works out later) buffer[thread_idx] = 0.0f; __syncthreads(); // calculate normalizer, with writeback for (unsigned x = 0; x < num_cols; x += buffer_size) { const int col = x + thread_idx; if (col < num_cols) { R p = expf((src.Eval(row, col) - max_in_row) / temperature); // add sum to buffer, so that we can later reduce it to // column-wise sum of exps and use as normalizer. buffer[thread_idx] += p; // save exped value to the corresponding idx in destination. dst.REval(row, col) = p; } } // calculate normalizer by reducing partial sums __syncthreads(); mshadow::cuda::ReduceX<mshadow::red::sum, x_bits>(buffer, thread_idx); __syncthreads(); R sum_in_row = buffer[0]; for (unsigned x = 0; x < num_cols; x += buffer_size) { const int col = x + thread_idx; if (col < num_cols) { dst.REval(row, col) /= sum_in_row; } } } template<int x_bits, typename R, typename DstPlan, typename SrcPlan> __global__ void SoftmaxKernelCached(DstPlan dst, SrcPlan src, mshadow::index_t num_cols, R temperature) { const unsigned buffer_size = 1 << x_bits; const int num_offsets = num_cols/buffer_size + 1; const int row = blockIdx.x; const int thread_idx = threadIdx.x; __shared__ R buffer[buffer_size]; R row_cache[20]; // step 0: copy the memory to cache. for (unsigned offset = 0; offset < num_offsets; ++offset) { const int col = offset * buffer_size + thread_idx; if (col < num_cols) { row_cache[offset] = src.Eval(row, col); } } // step 1: get max if (thread_idx < num_cols) { buffer[thread_idx] = row_cache[0]; } for (unsigned offset = 0; offset < num_offsets; ++offset) { const int col = offset * buffer_size + thread_idx; if (col < num_cols) { buffer[thread_idx] = max(row_cache[offset], buffer[thread_idx]); } } __syncthreads(); // if number of rows is smaller than buffer, // fill buffer with copy of buffer[0] - this // makes sure reduction does not use uninitialized // values in the buffer and returns correct max. if (thread_idx >= num_cols) { buffer[thread_idx] = buffer[0]; } __syncthreads(); mshadow::cuda::ReduceX<mshadow::red::maximum, x_bits>(buffer, thread_idx); __syncthreads(); // every thread memorizes max value in column, // so that we can reuse the buffer, for next // task R max_in_row = buffer[0]; __syncthreads(); // clear buffer (so that sum works out later) buffer[thread_idx] = 0.0f; __syncthreads(); // calculate normalizer, with writeback for (unsigned offset = 0; offset < num_offsets; ++offset) { const int col = offset * buffer_size + thread_idx; if (col < num_cols) { const R p = expf((row_cache[offset] - max_in_row) / temperature); // add sum to buffer, so that we can later reduce it to // column-wise sum of exps and use as normalizer. buffer[thread_idx] += p; // save exped value to the corresponding idx in destination. row_cache[offset] = p; } } // calculate normalizer by reducing partial sums __syncthreads(); mshadow::cuda::ReduceX<mshadow::red::sum, x_bits>(buffer, thread_idx); __syncthreads(); R sum_in_row = buffer[0]; for (unsigned offset = 0; offset < num_offsets; ++offset) { const int col = offset * buffer_size + thread_idx; if (col < num_cols) { dst.REval(row, col) = row_cache[offset] / sum_in_row; } } } // Note: in a dim3 (width, height, depth) // every uninitialized dimension defaults to 1. static const int MAX_ROW_SIZE_FOR_CACHED = 1000; // Note: <<<Dg, Db, Ns, S>>> CUDA Language Extension is explained here: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#execution-configuration template<typename R> void softmax(mshadow::Tensor<mshadow::gpu, 2, R> dst, const mshadow::Tensor<mshadow::gpu, 2, R> src, R temperature = 1.0) { const int num_threads = mshadow::cuda::kBaseThreadNum; const int thread_bits = mshadow::cuda::kBaseThreadBits; dim3 tiles(dst.size(0)); // block size is a matrix column dim3 within_tile(num_threads); mshadow::utils::Check(dst.shape_ == src.shape_, "Softmax: shape mismatch"); mshadow::cuda::CheckLaunchParam(tiles, within_tile, "Softmax"); hipStream_t stream = mshadow::Stream<mshadow::gpu>::GetStream(dst.stream_); if (dst.size(1) <= MAX_ROW_SIZE_FOR_CACHED) { std::cout << "siema" << std::endl; hipLaunchKernelGGL(( SoftmaxKernelCached<thread_bits, R>) , dim3(tiles), dim3(within_tile), 0, stream, mshadow::expr::MakePlan(dst), mshadow::expr::MakePlan(src), dst.size(1), temperature); } else { hipLaunchKernelGGL(( SoftmaxKernel<thread_bits, R>) , dim3(tiles), dim3(within_tile), 0, stream, mshadow::expr::MakePlan(dst), mshadow::expr::MakePlan(src), dst.size(1), temperature); } } template<typename R> void softmax_rowwise(mshadow::Tensor<mshadow::gpu, 2, R> dst, const mshadow::Tensor<mshadow::gpu, 2, R> src, R temperature = 1.0) { const int num_threads = mshadow::cuda::kBaseThreadNum; const int thread_bits = mshadow::cuda::kBaseThreadBits; dim3 tiles(dst.size(1)); // block size is a matrix column dim3 within_tile(num_threads); mshadow::utils::Check(dst.shape_ == src.shape_, "Softmax: shape mismatch"); mshadow::cuda::CheckLaunchParam(tiles, within_tile, "Softmax"); hipStream_t stream = mshadow::Stream<mshadow::gpu>::GetStream(dst.stream_); if (dst.size(0) <= MAX_ROW_SIZE_FOR_CACHED) { hipLaunchKernelGGL(( SoftmaxKernelCached<thread_bits, R>) , dim3(tiles), dim3(within_tile), 0, stream, mshadow::expr::MakePlan(dst.T()), mshadow::expr::MakePlan(src.T()), dst.size(0), temperature); } else { hipLaunchKernelGGL(( SoftmaxKernel<thread_bits, R>) , dim3(tiles), dim3(within_tile), 0, stream, mshadow::expr::MakePlan(dst.T()), mshadow::expr::MakePlan(src.T()), dst.size(0), temperature); } } int main() { dali_init(); int N = 5; Mat<R> bob(N, N, weights<R>::uniform(20)); Mat<R> bob_col_softmax(N, N); // set the computing streams softmax(bob_col_softmax.w().mutable_gpu_data(), bob.w().gpu_data()); softmax_rowwise(bob_col_softmax.w().mutable_gpu_data(), bob.w().gpu_data()); int iter = 1; bob.print(); for (int i = 0; i < iter; i++) { //bob_col_softmax.w().clear(); { utils::Timer t1("Softmax row-wise (Dali)"); // our softmax softmax(bob_col_softmax.w().mutable_gpu_data(), bob.w().gpu_data()); hipDeviceSynchronize(); } bob_col_softmax.print(); // bob_col_softmax.w().clear(); // { // utils::Timer t1("Softmax col-wise (Dali)"); // // our softmax // softmax_rowwise(bob_col_softmax.w().mutable_gpu_data(), bob.w().gpu_data()); // hipDeviceSynchronize(); // } // bob_col_softmax.print(); } utils::Timer::report(); }
89d1b4d6452851054d93bb848c2cba4906591c65.cu
#include <iostream> #include <vector> #include <mshadow/tensor.h> #include <mshadow/cuda/reduce.cuh> #include "dali/tensor/Mat.h" #include "dali/math/TensorInternal.h" #include "dali/math/KernelizedSoftmax.h" #include "dali/utils/core_utils.h" #include "dali/math/memory_bank/MemoryBank.h" using std::vector; typedef float R; template<int x_bits, typename R, typename DstPlan, typename SrcPlan> __global__ void SoftmaxKernel(DstPlan dst, SrcPlan src, mshadow::index_t num_cols, R temperature) { const unsigned buffer_size = 1 << x_bits; const int row = blockIdx.x; const int thread_idx = threadIdx.x; __shared__ R buffer[buffer_size]; // step 1: get max if (thread_idx < num_cols) { buffer[thread_idx] = src.Eval(row, thread_idx); } for (unsigned x = buffer_size; x < num_cols; x += buffer_size) { const int col = x + thread_idx; if (col < num_cols) { R a = src.Eval(row, col); buffer[thread_idx] = max(a, buffer[thread_idx]); } } __syncthreads(); // if number of rows is smaller than buffer, // fill buffer with copy of buffer[0] - this // makes sure reduction does not use uninitialized // values in the buffer and returns correct max. if (thread_idx >= num_cols) { buffer[thread_idx] = buffer[0]; } __syncthreads(); mshadow::cuda::ReduceX<mshadow::red::maximum, x_bits>(buffer, thread_idx); __syncthreads(); // every thread memorizes max value in column, // so that we can reuse the buffer, for next // task R max_in_row = buffer[0]; __syncthreads(); // clear buffer (so that sum works out later) buffer[thread_idx] = 0.0f; __syncthreads(); // calculate normalizer, with writeback for (unsigned x = 0; x < num_cols; x += buffer_size) { const int col = x + thread_idx; if (col < num_cols) { R p = expf((src.Eval(row, col) - max_in_row) / temperature); // add sum to buffer, so that we can later reduce it to // column-wise sum of exps and use as normalizer. buffer[thread_idx] += p; // save exped value to the corresponding idx in destination. dst.REval(row, col) = p; } } // calculate normalizer by reducing partial sums __syncthreads(); mshadow::cuda::ReduceX<mshadow::red::sum, x_bits>(buffer, thread_idx); __syncthreads(); R sum_in_row = buffer[0]; for (unsigned x = 0; x < num_cols; x += buffer_size) { const int col = x + thread_idx; if (col < num_cols) { dst.REval(row, col) /= sum_in_row; } } } template<int x_bits, typename R, typename DstPlan, typename SrcPlan> __global__ void SoftmaxKernelCached(DstPlan dst, SrcPlan src, mshadow::index_t num_cols, R temperature) { const unsigned buffer_size = 1 << x_bits; const int num_offsets = num_cols/buffer_size + 1; const int row = blockIdx.x; const int thread_idx = threadIdx.x; __shared__ R buffer[buffer_size]; R row_cache[20]; // step 0: copy the memory to cache. for (unsigned offset = 0; offset < num_offsets; ++offset) { const int col = offset * buffer_size + thread_idx; if (col < num_cols) { row_cache[offset] = src.Eval(row, col); } } // step 1: get max if (thread_idx < num_cols) { buffer[thread_idx] = row_cache[0]; } for (unsigned offset = 0; offset < num_offsets; ++offset) { const int col = offset * buffer_size + thread_idx; if (col < num_cols) { buffer[thread_idx] = max(row_cache[offset], buffer[thread_idx]); } } __syncthreads(); // if number of rows is smaller than buffer, // fill buffer with copy of buffer[0] - this // makes sure reduction does not use uninitialized // values in the buffer and returns correct max. if (thread_idx >= num_cols) { buffer[thread_idx] = buffer[0]; } __syncthreads(); mshadow::cuda::ReduceX<mshadow::red::maximum, x_bits>(buffer, thread_idx); __syncthreads(); // every thread memorizes max value in column, // so that we can reuse the buffer, for next // task R max_in_row = buffer[0]; __syncthreads(); // clear buffer (so that sum works out later) buffer[thread_idx] = 0.0f; __syncthreads(); // calculate normalizer, with writeback for (unsigned offset = 0; offset < num_offsets; ++offset) { const int col = offset * buffer_size + thread_idx; if (col < num_cols) { const R p = expf((row_cache[offset] - max_in_row) / temperature); // add sum to buffer, so that we can later reduce it to // column-wise sum of exps and use as normalizer. buffer[thread_idx] += p; // save exped value to the corresponding idx in destination. row_cache[offset] = p; } } // calculate normalizer by reducing partial sums __syncthreads(); mshadow::cuda::ReduceX<mshadow::red::sum, x_bits>(buffer, thread_idx); __syncthreads(); R sum_in_row = buffer[0]; for (unsigned offset = 0; offset < num_offsets; ++offset) { const int col = offset * buffer_size + thread_idx; if (col < num_cols) { dst.REval(row, col) = row_cache[offset] / sum_in_row; } } } // Note: in a dim3 (width, height, depth) // every uninitialized dimension defaults to 1. static const int MAX_ROW_SIZE_FOR_CACHED = 1000; // Note: <<<Dg, Db, Ns, S>>> CUDA Language Extension is explained here: // http://docs.nvidia.com/cuda/cuda-c-programming-guide/#execution-configuration template<typename R> void softmax(mshadow::Tensor<mshadow::gpu, 2, R> dst, const mshadow::Tensor<mshadow::gpu, 2, R> src, R temperature = 1.0) { const int num_threads = mshadow::cuda::kBaseThreadNum; const int thread_bits = mshadow::cuda::kBaseThreadBits; dim3 tiles(dst.size(0)); // block size is a matrix column dim3 within_tile(num_threads); mshadow::utils::Check(dst.shape_ == src.shape_, "Softmax: shape mismatch"); mshadow::cuda::CheckLaunchParam(tiles, within_tile, "Softmax"); cudaStream_t stream = mshadow::Stream<mshadow::gpu>::GetStream(dst.stream_); if (dst.size(1) <= MAX_ROW_SIZE_FOR_CACHED) { std::cout << "siema" << std::endl; SoftmaxKernelCached<thread_bits, R> <<<tiles, within_tile, 0, stream>>> (mshadow::expr::MakePlan(dst), mshadow::expr::MakePlan(src), dst.size(1), temperature); } else { SoftmaxKernel<thread_bits, R> <<<tiles, within_tile, 0, stream>>> (mshadow::expr::MakePlan(dst), mshadow::expr::MakePlan(src), dst.size(1), temperature); } } template<typename R> void softmax_rowwise(mshadow::Tensor<mshadow::gpu, 2, R> dst, const mshadow::Tensor<mshadow::gpu, 2, R> src, R temperature = 1.0) { const int num_threads = mshadow::cuda::kBaseThreadNum; const int thread_bits = mshadow::cuda::kBaseThreadBits; dim3 tiles(dst.size(1)); // block size is a matrix column dim3 within_tile(num_threads); mshadow::utils::Check(dst.shape_ == src.shape_, "Softmax: shape mismatch"); mshadow::cuda::CheckLaunchParam(tiles, within_tile, "Softmax"); cudaStream_t stream = mshadow::Stream<mshadow::gpu>::GetStream(dst.stream_); if (dst.size(0) <= MAX_ROW_SIZE_FOR_CACHED) { SoftmaxKernelCached<thread_bits, R> <<<tiles, within_tile, 0, stream>>> (mshadow::expr::MakePlan(dst.T()), mshadow::expr::MakePlan(src.T()), dst.size(0), temperature); } else { SoftmaxKernel<thread_bits, R> <<<tiles, within_tile, 0, stream>>> (mshadow::expr::MakePlan(dst.T()), mshadow::expr::MakePlan(src.T()), dst.size(0), temperature); } } int main() { dali_init(); int N = 5; Mat<R> bob(N, N, weights<R>::uniform(20)); Mat<R> bob_col_softmax(N, N); // set the computing streams softmax(bob_col_softmax.w().mutable_gpu_data(), bob.w().gpu_data()); softmax_rowwise(bob_col_softmax.w().mutable_gpu_data(), bob.w().gpu_data()); int iter = 1; bob.print(); for (int i = 0; i < iter; i++) { //bob_col_softmax.w().clear(); { utils::Timer t1("Softmax row-wise (Dali)"); // our softmax softmax(bob_col_softmax.w().mutable_gpu_data(), bob.w().gpu_data()); cudaDeviceSynchronize(); } bob_col_softmax.print(); // bob_col_softmax.w().clear(); // { // utils::Timer t1("Softmax col-wise (Dali)"); // // our softmax // softmax_rowwise(bob_col_softmax.w().mutable_gpu_data(), bob.w().gpu_data()); // cudaDeviceSynchronize(); // } // bob_col_softmax.print(); } utils::Timer::report(); }
3914284029fcd1eeb1bd01897a8fe6348018bfa0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/dropout_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void DropoutForward(const int n, const Dtype* in, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index] > threshold) * scale; } } template <typename Dtype> void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); if (this->phase_ == TRAIN) { unsigned int* mask = static_cast<unsigned int*>(rand_vec_.mutable_gpu_data()); caffe_gpu_rng_uniform(count, mask); // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream_[0], count, bottom_data, mask, uint_thres_, scale_, top_data); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(count, bottom_data, top_data); } } template <typename Dtype> __global__ void DropoutBackward(const int n, const Dtype* in_diff, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); } } template <typename Dtype> void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->phase_ == TRAIN) { const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_.gpu_data()); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( DropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS),0,stream_[0], count, top_diff, mask, uint_thres_, scale_, bottom_diff); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); } // namespace caffe
3914284029fcd1eeb1bd01897a8fe6348018bfa0.cu
#include <vector> #include "caffe/layers/dropout_layer.hpp" #include "caffe/util/math_functions.hpp" namespace caffe { template <typename Dtype> __global__ void DropoutForward(const int n, const Dtype* in, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] * (mask[index] > threshold) * scale; } } template <typename Dtype> void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); if (this->phase_ == TRAIN) { unsigned int* mask = static_cast<unsigned int*>(rand_vec_.mutable_gpu_data()); caffe_gpu_rng_uniform(count, mask); // set thresholds // NOLINT_NEXT_LINE(whitespace/operators) DropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,0,stream_[0]>>>( count, bottom_data, mask, uint_thres_, scale_, top_data); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(count, bottom_data, top_data); } } template <typename Dtype> __global__ void DropoutBackward(const int n, const Dtype* in_diff, const unsigned int* mask, const unsigned int threshold, const float scale, Dtype* out_diff) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * scale * (mask[index] > threshold); } } template <typename Dtype> void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { if (propagate_down[0]) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (this->phase_ == TRAIN) { const unsigned int* mask = static_cast<const unsigned int*>(rand_vec_.gpu_data()); const int count = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS,0,stream_[0]>>> ( count, top_diff, mask, uint_thres_, scale_, bottom_diff); CUDA_POST_KERNEL_CHECK; } else { caffe_copy(top[0]->count(), top_diff, bottom_diff); } } } INSTANTIATE_LAYER_GPU_FUNCS(DropoutLayer); } // namespace caffe
9b552b32406eae1c919c326c84ca5ceea385f759.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void branch(int *A) { int tid = threadIdx.x; if ((tid % 2) == 1) A[tid] += A[tid + 2]; }
9b552b32406eae1c919c326c84ca5ceea385f759.cu
__global__ void branch(int *A) { int tid = threadIdx.x; if ((tid % 2) == 1) A[tid] += A[tid + 2]; }
c28b990fb0766cf55899851a375f2f4303b9d134.hip
// !!! This is a file automatically generated by hipify!!! #include "funset.hpp" #include <iostream> #include <algorithm> #include <memory> #include <hip/hip_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <device_launch_parameters.h> #include "common.hpp" /* __global__: ;;,3.2 ;void;, ;, gridblock,(<<< >>>); a kernel,(GPUCUDAkernel( ),__global__); */ __global__ static void green_ball(unsigned char* ptr, int width, int height) { /* gridDim: ,,, ,,. dim3 blockDim: ,block.dim3, block;,, ; blockIdx: ,; threadblockgrid,blockIdx.x [0,gridDim.x-1],blockIdx.y[0, gridDim.y-1].uint3, blockgrid; threadIdx: ,; threadblock;threadIdx.x, threadIdx.y,threadIdx.z;uint3 ,threadblock */ // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; /* __shared__: __shared____device__ blockblock block__shared____constant__ __shared__extern __shared__CUDA C __shared__CUDA C */ __shared__ float shared[16][16]; // == threads_block // now calculate the value at that position const float period = 128.0f; shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x*2.0f*PI / period) + 1.0f) *(sinf(y*2.0f*PI / period) + 1.0f) / 4.0f; /* __syncthreads: CUDA __syncthreads() __syncthreads();block(shared memory)(kernel __syncthreads())clock() clock() __syncthreads()block threadblock thread */ // removing this syncthreads shows graphically what happens // when it doesn't exist.this is an example of why we need it. __syncthreads(); ptr[offset * 4 + 0] = 0; ptr[offset * 4 + 1] = shared[/*15 - */threadIdx.x][/*15 - */threadIdx.y]; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int green_ball_gpu(unsigned char* ptr, int width, int height, float* elapsed_time) { /* hipEvent_t: CUDA event types,, CUDA,GPU ,CUDAGPU,CUDA GPU, */ hipEvent_t start, stop; // hipEventCreate: , hipEventCreate(&start); hipEventCreate(&stop); // hipEventRecord: ,,start hipEventRecord(start, 0); const size_t length{ width * height * 4 * sizeof(unsigned char) }; unsigned char* dev{ nullptr }; // hipMalloc: hipMalloc(&dev, length); const int threads_block{ 16 }; dim3 blocks(width / threads_block, height / threads_block); dim3 threads(threads_block, threads_block); /* <<< >>>: CUDA,, CUDA,, ;, ,, ;; kernel,kernel, GPU,; API,<<<Dg,Db,Ns,S>>> ,Dgdim3,grid .Dg,gridDg.x*Dg.y*Dg.zblock;Db dim3,block.Db, blockDb.x*Db.y*Db.zthread;Nssize_t, , (extern __shared__);Ns,0;S cudaStream_t,.S,0. */ green_ball << <blocks, threads >> >(dev, width, height); /* hipMemcpy: ,: (1). hipMemcpyHostToHost: (2). hipMemcpyHostToDevice: (3). hipMemcpyDeviceToHost: (4). hipMemcpyDeviceToDevice: (5). hipMemcpyDefault: , (CUDA6.0) cudaMemcpy */ hipMemcpy(ptr, dev, length, hipMemcpyDeviceToHost); // hipFree: cudaMalloc hipFree(dev); // hipEventRecord: ,,stop hipEventRecord(stop, 0); // hipEventSynchronize: ,, hipEventSynchronize(stop); // cudaEventElapseTime: ,, hipEventElapsedTime(elapsed_time, start, stop); // hipEventDestroy: , hipEventDestroy(start); hipEventDestroy(stop); return 0; }
c28b990fb0766cf55899851a375f2f4303b9d134.cu
#include "funset.hpp" #include <iostream> #include <algorithm> #include <memory> #include <cuda_runtime.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <device_launch_parameters.h> #include "common.hpp" /* __global__: 函数类型限定符;在设备上运行;在主机端调用,计算能力3.2及以上可以在 设备端调用;声明的函数的返回值必须是void类型;对此类型函数的调用是异步的,即在 设备完全完成它的运行之前就返回了;对此类型函数的调用必须指定执行配置,即用于在 设备上执行函数时的grid和block的维度,以及相关的流(即插入<<< >>>运算符); a kernel,表示此函数为内核函数(运行在GPU上的CUDA并行计算函数称为kernel(内核函 数),内核函数必须通过__global__函数类型限定符定义); */ __global__ static void green_ball(unsigned char* ptr, int width, int height) { /* gridDim: 内置变量,用于描述线程网格的维度,对于所有线程块来说,这个 变量是一个常数,用来保存线程格每一维的大小,即每个线程格中线程块的数量. 为dim3类型; blockDim: 内置变量,用于说明每个block的维度与尺寸.为dim3类型,包含 了block在三个维度上的尺寸信息;对于所有线程块来说,这个变量是一个常数, 保存的是线程块中每一维的线程数量; blockIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程块的索引;用 于说明当前thread所在的block在整个grid中的位置,blockIdx.x取值范围是 [0,gridDim.x-1],blockIdx.y取值范围是[0, gridDim.y-1].为uint3类型, 包含了一个block在grid中各个维度上的索引信息; threadIdx: 内置变量,变量中包含的值就是当前执行设备代码的线程索引;用于 说明当前thread在block中的位置;如果线程是一维的可获取threadIdx.x,如果 是二维的还可获取threadIdx.y,如果是三维的还可获取threadIdx.z;为uint3类 型,包含了一个thread在block中各个维度的索引信息 */ // map from threadIdx/BlockIdx to pixel position int x = threadIdx.x + blockIdx.x * blockDim.x; int y = threadIdx.y + blockIdx.y * blockDim.y; int offset = x + y * blockDim.x * gridDim.x; /* __shared__: 变量类型限定符;使用__shared__限定符,或者与__device__限 定符连用,此时声明的变量位于block中的共享存储器空间中,与block具有相同 的生命周期,仅可通过block内的所有线程访问;__shared__和__constant__变量 默认为是静态存储;在__shared__前可以加extern关键字,但表示的是变量大小 由执行参数确定;__shared__变量在声明时不能初始化;可以将CUDA C的关键字 __shared__添加到变量声明中,这将使这个变量驻留在共享内存中;CUDA C编译 器对共享内存中的变量与普通变量将分别采取不同的处理方式 */ __shared__ float shared[16][16]; // == threads_block // now calculate the value at that position const float period = 128.0f; shared[threadIdx.x][threadIdx.y] = 255 * (sinf(x*2.0f*PI / period) + 1.0f) *(sinf(y*2.0f*PI / period) + 1.0f) / 4.0f; /* __syncthreads: 对线程块中的线程进行同步;CUDA架构将确保,除非线程块 中的每个线程都执行了__syncthreads(),否则没有任何线程能执行 __syncthreads()之后的指令;在同一个block中的线程通过共享存储器(shared memory)交换数据,并通过栅栏同步(可以在kernel函数中需要同步的位置调用 __syncthreads()函数)保证线程间能够正确地共享数据;使用clock()函数计时, 在内核函数中要测量的一段代码的开始和结束的位置分别调用一次clock()函数, 并将结果记录下来。由于调用__syncthreads()函数后,一个block中的所有 thread需要的时间是相同的,因此只需要记录每个block执行需要的时间就行了, 而不需要记录每个thread的时间 */ // removing this syncthreads shows graphically what happens // when it doesn't exist.this is an example of why we need it. __syncthreads(); ptr[offset * 4 + 0] = 0; ptr[offset * 4 + 1] = shared[/*15 - */threadIdx.x][/*15 - */threadIdx.y]; ptr[offset * 4 + 2] = 0; ptr[offset * 4 + 3] = 255; } int green_ball_gpu(unsigned char* ptr, int width, int height, float* elapsed_time) { /* cudaEvent_t: CUDA event types,结构体类型, CUDA事件,用于测量GPU在某 个任务上花费的时间,CUDA中的事件本质上是一个GPU时间戳,由于CUDA事件是在 GPU上实现的,因此它们不适于对同时包含设备代码和主机代码的混合代码计时 */ cudaEvent_t start, stop; // cudaEventCreate: 创建一个事件对象,异步启动 cudaEventCreate(&start); cudaEventCreate(&stop); // cudaEventRecord: 记录一个事件,异步启动,start记录起始时间 cudaEventRecord(start, 0); const size_t length{ width * height * 4 * sizeof(unsigned char) }; unsigned char* dev{ nullptr }; // cudaMalloc: 在设备端分配内存 cudaMalloc(&dev, length); const int threads_block{ 16 }; dim3 blocks(width / threads_block, height / threads_block); dim3 threads(threads_block, threads_block); /* <<< >>>: 为CUDA引入的运算符,指定线程网格和线程块维度等,传递执行参 数给CUDA编译器和运行时系统,用于说明内核函数中的线程数量,以及线程是如何 组织的;尖括号中这些参数并不是传递给设备代码的参数,而是告诉运行时如何 启动设备代码,传递给设备代码本身的参数是放在圆括号中传递的,就像标准的函 数调用一样;不同计算能力的设备对线程的总数和组织方式有不同的约束;必须 先为kernel中用到的数组或变量分配好足够的空间,再调用kernel函数,否则在 GPU计算时会发生错误,例如越界等; 使用运行时API时,需要在调用的内核函数名与参数列表直接以<<<Dg,Db,Ns,S>>> 的形式设置执行配置,其中:Dg是一个dim3型变量,用于设置grid的维度和各个 维度上的尺寸.设置好Dg后,grid中将有Dg.x*Dg.y*Dg.z个block;Db是 一个dim3型变量,用于设置block的维度和各个维度上的尺寸.设置好Db后,每个 block中将有Db.x*Db.y*Db.z个thread;Ns是一个size_t型变量,指定各块为此调 用动态分配的共享存储器大小,这些动态分配的存储器可供声明为外部数组 (extern __shared__)的其他任何变量使用;Ns是一个可选参数,默认值为0;S为 cudaStream_t类型,用于设置与内核函数关联的流.S是一个可选参数,默认值0. */ green_ball << <blocks, threads >> >(dev, width, height); /* cudaMemcpy: 在主机端和设备端拷贝数据,此函数第四个参数仅能是下面之一: (1). cudaMemcpyHostToHost: 拷贝数据从主机端到主机端 (2). cudaMemcpyHostToDevice: 拷贝数据从主机端到设备端 (3). cudaMemcpyDeviceToHost: 拷贝数据从设备端到主机端 (4). cudaMemcpyDeviceToDevice: 拷贝数据从设备端到设备端 (5). cudaMemcpyDefault: 从指针值自动推断拷贝数据方向,需要支持 统一虚拟寻址(CUDA6.0及以上版本) cudaMemcpy函数对于主机是同步的 */ cudaMemcpy(ptr, dev, length, cudaMemcpyDeviceToHost); // cudaFree: 释放设备上由cudaMalloc函数分配的内存 cudaFree(dev); // cudaEventRecord: 记录一个事件,异步启动,stop记录结束时间 cudaEventRecord(stop, 0); // cudaEventSynchronize: 事件同步,等待一个事件完成,异步启动 cudaEventSynchronize(stop); // cudaEventElapseTime: 计算两个事件之间经历的时间,单位为毫秒,异步启动 cudaEventElapsedTime(elapsed_time, start, stop); // cudaEventDestroy: 销毁事件对象,异步启动 cudaEventDestroy(start); cudaEventDestroy(stop); return 0; }
ed818dff9b898bf97825887b56009a09d9c71495.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* This is machine problem 2, part 2: brute force k nearest neighbors * You are given a large number of particles, and are asked * to find the k particles that are nearest to each one. * Look at the example in /tutorials/thread_local_variables.cu * for how you can use per thread arrays for sorting. * Using that example, port the cpu reference code to the gpu in a first step. * In a second step, modify your code so that the per-thread arrays are in * shared memory. You should submit this second version of your code. */ #include <cassert> #include "mp2-util.h" #define BLOCK_SIZE 256 #define NUM_NEIGHBORS 5 // TODO enable this to print debugging information //const bool print_debug = true; const bool print_debug = false; event_pair timer; inline __device__ __host__ float3 operator -(float3 a, float3 b) { return make_float3(a.x-b.x, a.y-b.y, a.z-b.z); } __host__ __device__ float dist2(float3 a, float3 b) { float3 d = a - b; float d2 = d.x*d.x + d.y*d.y + d.z*d.z; return d2; } template <typename T> __host__ __device__ void init_list(T *base_ptr, unsigned int size, T val) { for(int i=0;i<size;i++) { base_ptr[i] = val; } } __host__ __device__ void insert_list(float *dist_list, int *id_list, int size, float dist, int id) { int k; for (k=0; k < size; k++) { if (dist < dist_list[k]) { // we should insert it in here, so push back and make it happen for (int j = size - 1; j > k ; j--) { dist_list[j] = dist_list[j-1]; id_list[j] = id_list[j-1]; } dist_list[k] = dist; id_list[k] = id; break; } } } __global__ void device_find_knn( float3 *particles, int *knn, int num_particles, int num_neighbors ) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < num_particles) { float3 p = particles[i]; float neigh_dist[NUM_NEIGHBORS]; int neigh_ids[NUM_NEIGHBORS]; init_list(&neigh_dist[0],num_neighbors,2.0f); init_list(&neigh_ids[0],num_neighbors,-1); for(int j=0;j<num_particles;j++) { if(i != j) { float rsq = dist2(p,particles[j]); insert_list(&neigh_dist[0], &neigh_ids[0], num_neighbors, rsq, j); } } for(int j=0;j<num_neighbors;j++) { knn[num_neighbors*i + j] = neigh_ids[j]; } } } __global__ void device_find_knn_shared_mem( float3 *particles, int *knn, int num_particles, int num_neighbors ) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < num_particles) { float3 p = particles[i]; __shared__ float neigh_dist[BLOCK_SIZE * NUM_NEIGHBORS]; __shared__ int neigh_ids[BLOCK_SIZE * NUM_NEIGHBORS]; init_list(&neigh_dist[threadIdx.x * num_neighbors],num_neighbors,2.0f); init_list(&neigh_ids[threadIdx.x * num_neighbors],num_neighbors,-1); for(int j=0;j<num_particles;j++) { if(i != j) { float rsq = dist2(p,particles[j]); insert_list( &neigh_dist[threadIdx.x * num_neighbors], &neigh_ids[threadIdx.x * num_neighbors], num_neighbors, rsq, j ); } } for(int j=0;j<num_neighbors;j++) { knn[num_neighbors * i + j] = neigh_ids[threadIdx.x * num_neighbors + j]; } } } template <int num_neighbors> void host_find_knn(float3 *particles, int *knn, int array_length) { for(int i=0;i<array_length;i++) { float3 p = particles[i]; float neigh_dist[num_neighbors]; int neigh_ids[num_neighbors]; init_list(&neigh_dist[0],num_neighbors,2.0f); init_list(&neigh_ids[0],num_neighbors,-1); for(int j=0;j<array_length;j++) { if(i != j) { float rsq = dist2(p,particles[j]); insert_list(&neigh_dist[0], &neigh_ids[0], num_neighbors, rsq, j); } } for(int j=0;j<num_neighbors;j++) { knn[num_neighbors*i + j] = neigh_ids[j]; } } } void allocate_host_memory( int num_particles, int num_neighbors, float3 *&h_particles, int *&h_knn, int *&h_knn_checker ) { // malloc host array h_particles = (float3*)malloc(num_particles * sizeof(float3)); h_knn = (int*)malloc(num_particles * num_neighbors * sizeof(int)); h_knn_checker = (int*)malloc(num_particles * num_neighbors * sizeof(int)); // if either memory allocation failed, report an error message if(h_particles == 0 || h_knn == 0 || h_knn_checker == 0) { printf("couldn't allocate host memory\n"); exit(1); } } void allocate_device_memory(int num_particles, int num_neighbors, float3 *&d_particles, int *&d_knn) { hipMalloc((void**)&d_particles, num_particles * sizeof(float3)); hipMalloc((void**)&d_knn, num_particles * num_neighbors * sizeof(int)); if (d_particles == 0 || d_knn == 0) { printf("Failed to allocate memory on device\n"); exit(1); } } void deallocate_host_memory(float3 *h_particles, int *h_knn, int *h_knn_checker) { free(h_particles); free(h_knn); free(h_knn_checker); } void deallocate_device_memory(float3 *d_particles, int *d_knn) { hipFree(d_particles); hipFree(d_knn); } bool cross_check_results( int * reference_knn, int * knn, int num_particles, int num_neighbors ) { int error = 0; for(int i=0;i<num_particles;i++) { for(int j=0;j<num_neighbors;j++) { if(reference_knn[i*num_neighbors + j] != knn[i*num_neighbors + j]) { if(print_debug) { printf( "particle %d, neighbor %d is %d on cpu, %d on gpu\n", i, j, reference_knn[i*num_neighbors + j], knn[i*num_neighbors + j] ); } error = 1; } } } if(error) { printf("Output of CUDA version and normal version didn't match! \n"); } else { printf("Worked! CUDA and reference output match. \n"); } return error; } int main(void) { // create arrays of 8K elements int num_particles = 8*1024; const int num_neighbors = 5; // pointers to host arrays float3 *h_particles = 0; int *h_knn = 0; int *h_knn_checker = 0; // pointers to device arrays float3 *d_particles = 0; int *d_knn = 0; allocate_host_memory( num_particles, num_neighbors, h_particles, h_knn, h_knn_checker ); allocate_device_memory(num_particles, num_neighbors, d_particles, d_knn); // generate random input // initialize srand(13); for(int i=0;i< num_particles;i++) { h_particles[i] = make_float3( (float)rand() / (float)RAND_MAX, (float)rand() / (float)RAND_MAX, (float)rand()/(float)RAND_MAX ); } // copy input to GPU start_timer(&timer); hipMemcpy( d_particles, h_particles, num_particles * sizeof(float3), hipMemcpyHostToDevice ); stop_timer(&timer,"Copy memory from host to device: "); start_timer(&timer); hipLaunchKernelGGL(( device_find_knn), dim3(num_particles / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, d_particles, d_knn, num_particles, num_neighbors ); check_cuda_error("brute force knn"); stop_timer(&timer,"brute force knn"); start_timer(&timer); hipLaunchKernelGGL(( device_find_knn_shared_mem), dim3(num_particles / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, d_particles, d_knn, num_particles, num_neighbors ); check_cuda_error("shared mem knn"); stop_timer(&timer,"shared mem knn"); // download and inspect the result on the host start_timer(&timer); hipMemcpy( h_knn, d_knn, num_particles * num_neighbors * sizeof(int), hipMemcpyDeviceToHost ); check_cuda_error("copy from gpu"); stop_timer(&timer,"copy back from gpu memory"); // generate reference output start_timer(&timer); host_find_knn<num_neighbors>(h_particles, h_knn_checker, num_particles); stop_timer(&timer,"cpu brute force knn"); // check CUDA output versus reference output cross_check_results(h_knn_checker, h_knn, num_particles, num_neighbors); deallocate_host_memory(h_particles, h_knn, h_knn_checker); deallocate_device_memory(d_particles, d_knn); return 0; }
ed818dff9b898bf97825887b56009a09d9c71495.cu
/* This is machine problem 2, part 2: brute force k nearest neighbors * You are given a large number of particles, and are asked * to find the k particles that are nearest to each one. * Look at the example in /tutorials/thread_local_variables.cu * for how you can use per thread arrays for sorting. * Using that example, port the cpu reference code to the gpu in a first step. * In a second step, modify your code so that the per-thread arrays are in * shared memory. You should submit this second version of your code. */ #include <cassert> #include "mp2-util.h" #define BLOCK_SIZE 256 #define NUM_NEIGHBORS 5 // TODO enable this to print debugging information //const bool print_debug = true; const bool print_debug = false; event_pair timer; inline __device__ __host__ float3 operator -(float3 a, float3 b) { return make_float3(a.x-b.x, a.y-b.y, a.z-b.z); } __host__ __device__ float dist2(float3 a, float3 b) { float3 d = a - b; float d2 = d.x*d.x + d.y*d.y + d.z*d.z; return d2; } template <typename T> __host__ __device__ void init_list(T *base_ptr, unsigned int size, T val) { for(int i=0;i<size;i++) { base_ptr[i] = val; } } __host__ __device__ void insert_list(float *dist_list, int *id_list, int size, float dist, int id) { int k; for (k=0; k < size; k++) { if (dist < dist_list[k]) { // we should insert it in here, so push back and make it happen for (int j = size - 1; j > k ; j--) { dist_list[j] = dist_list[j-1]; id_list[j] = id_list[j-1]; } dist_list[k] = dist; id_list[k] = id; break; } } } __global__ void device_find_knn( float3 *particles, int *knn, int num_particles, int num_neighbors ) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < num_particles) { float3 p = particles[i]; float neigh_dist[NUM_NEIGHBORS]; int neigh_ids[NUM_NEIGHBORS]; init_list(&neigh_dist[0],num_neighbors,2.0f); init_list(&neigh_ids[0],num_neighbors,-1); for(int j=0;j<num_particles;j++) { if(i != j) { float rsq = dist2(p,particles[j]); insert_list(&neigh_dist[0], &neigh_ids[0], num_neighbors, rsq, j); } } for(int j=0;j<num_neighbors;j++) { knn[num_neighbors*i + j] = neigh_ids[j]; } } } __global__ void device_find_knn_shared_mem( float3 *particles, int *knn, int num_particles, int num_neighbors ) { int i = blockDim.x * blockIdx.x + threadIdx.x; if (i < num_particles) { float3 p = particles[i]; __shared__ float neigh_dist[BLOCK_SIZE * NUM_NEIGHBORS]; __shared__ int neigh_ids[BLOCK_SIZE * NUM_NEIGHBORS]; init_list(&neigh_dist[threadIdx.x * num_neighbors],num_neighbors,2.0f); init_list(&neigh_ids[threadIdx.x * num_neighbors],num_neighbors,-1); for(int j=0;j<num_particles;j++) { if(i != j) { float rsq = dist2(p,particles[j]); insert_list( &neigh_dist[threadIdx.x * num_neighbors], &neigh_ids[threadIdx.x * num_neighbors], num_neighbors, rsq, j ); } } for(int j=0;j<num_neighbors;j++) { knn[num_neighbors * i + j] = neigh_ids[threadIdx.x * num_neighbors + j]; } } } template <int num_neighbors> void host_find_knn(float3 *particles, int *knn, int array_length) { for(int i=0;i<array_length;i++) { float3 p = particles[i]; float neigh_dist[num_neighbors]; int neigh_ids[num_neighbors]; init_list(&neigh_dist[0],num_neighbors,2.0f); init_list(&neigh_ids[0],num_neighbors,-1); for(int j=0;j<array_length;j++) { if(i != j) { float rsq = dist2(p,particles[j]); insert_list(&neigh_dist[0], &neigh_ids[0], num_neighbors, rsq, j); } } for(int j=0;j<num_neighbors;j++) { knn[num_neighbors*i + j] = neigh_ids[j]; } } } void allocate_host_memory( int num_particles, int num_neighbors, float3 *&h_particles, int *&h_knn, int *&h_knn_checker ) { // malloc host array h_particles = (float3*)malloc(num_particles * sizeof(float3)); h_knn = (int*)malloc(num_particles * num_neighbors * sizeof(int)); h_knn_checker = (int*)malloc(num_particles * num_neighbors * sizeof(int)); // if either memory allocation failed, report an error message if(h_particles == 0 || h_knn == 0 || h_knn_checker == 0) { printf("couldn't allocate host memory\n"); exit(1); } } void allocate_device_memory(int num_particles, int num_neighbors, float3 *&d_particles, int *&d_knn) { cudaMalloc((void**)&d_particles, num_particles * sizeof(float3)); cudaMalloc((void**)&d_knn, num_particles * num_neighbors * sizeof(int)); if (d_particles == 0 || d_knn == 0) { printf("Failed to allocate memory on device\n"); exit(1); } } void deallocate_host_memory(float3 *h_particles, int *h_knn, int *h_knn_checker) { free(h_particles); free(h_knn); free(h_knn_checker); } void deallocate_device_memory(float3 *d_particles, int *d_knn) { cudaFree(d_particles); cudaFree(d_knn); } bool cross_check_results( int * reference_knn, int * knn, int num_particles, int num_neighbors ) { int error = 0; for(int i=0;i<num_particles;i++) { for(int j=0;j<num_neighbors;j++) { if(reference_knn[i*num_neighbors + j] != knn[i*num_neighbors + j]) { if(print_debug) { printf( "particle %d, neighbor %d is %d on cpu, %d on gpu\n", i, j, reference_knn[i*num_neighbors + j], knn[i*num_neighbors + j] ); } error = 1; } } } if(error) { printf("Output of CUDA version and normal version didn't match! \n"); } else { printf("Worked! CUDA and reference output match. \n"); } return error; } int main(void) { // create arrays of 8K elements int num_particles = 8*1024; const int num_neighbors = 5; // pointers to host arrays float3 *h_particles = 0; int *h_knn = 0; int *h_knn_checker = 0; // pointers to device arrays float3 *d_particles = 0; int *d_knn = 0; allocate_host_memory( num_particles, num_neighbors, h_particles, h_knn, h_knn_checker ); allocate_device_memory(num_particles, num_neighbors, d_particles, d_knn); // generate random input // initialize srand(13); for(int i=0;i< num_particles;i++) { h_particles[i] = make_float3( (float)rand() / (float)RAND_MAX, (float)rand() / (float)RAND_MAX, (float)rand()/(float)RAND_MAX ); } // copy input to GPU start_timer(&timer); cudaMemcpy( d_particles, h_particles, num_particles * sizeof(float3), cudaMemcpyHostToDevice ); stop_timer(&timer,"Copy memory from host to device: "); start_timer(&timer); device_find_knn<<<num_particles / BLOCK_SIZE, BLOCK_SIZE>>>( d_particles, d_knn, num_particles, num_neighbors ); check_cuda_error("brute force knn"); stop_timer(&timer,"brute force knn"); start_timer(&timer); device_find_knn_shared_mem<<<num_particles / BLOCK_SIZE, BLOCK_SIZE>>>( d_particles, d_knn, num_particles, num_neighbors ); check_cuda_error("shared mem knn"); stop_timer(&timer,"shared mem knn"); // download and inspect the result on the host start_timer(&timer); cudaMemcpy( h_knn, d_knn, num_particles * num_neighbors * sizeof(int), cudaMemcpyDeviceToHost ); check_cuda_error("copy from gpu"); stop_timer(&timer,"copy back from gpu memory"); // generate reference output start_timer(&timer); host_find_knn<num_neighbors>(h_particles, h_knn_checker, num_particles); stop_timer(&timer,"cpu brute force knn"); // check CUDA output versus reference output cross_check_results(h_knn_checker, h_knn, num_particles, num_neighbors); deallocate_host_memory(h_particles, h_knn, h_knn_checker); deallocate_device_memory(d_particles, d_knn); return 0; }
601afda78d4c029dfbf5ab74490c11ec05888e91.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
601afda78d4c029dfbf5ab74490c11ec05888e91.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<32>; using ThreadBlockShape = cutlass::gemm::GemmShape<128, 128, 32>; using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::convolution::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle< cutlass::convolution::ConvType::kConvolution>, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
c4bcf507d303729270591b17654c73ecbe160c86.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorRandom.cu" #else #define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THC_API void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_uniform), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, a, b); THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_normal), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(normal_means)(THCState *state, THCTensor *self, THCTensor *means, double stddev) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, stddev); THCTensor_(cadd)(state, self, self, ScalarConvert<int, real>::to(1), means); } THC_API void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double mean, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, stddevs); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(add)(state, self, self, ScalarConvert<double, real>::to(mean)); } THC_API void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor *means, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(cadd)(state, self, self, ScalarConvert<int, real>::to(1), means); } THC_API void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generateLogNormal<real>), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_exponential), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, lambda); THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_cauchy), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, median, sigma); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(renormRows)(struct THCState* state, THCTensor* t) { THAssert(THCTensor_(nDimension)(state, t) == 2); int64_t rows = THCTensor_(size)(state, t, 0); int64_t cols = THCTensor_(size)(state, t, 1); hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(cols < maxThreads ? cols : maxThreads); hipLaunchKernelGGL(( renormRowsL1<real>) , dim3(grid), dim3(block), block.x * sizeof(real), THCState_getCurrentStream(state), THCTensor_(data)(state, t), rows, cols); } THC_API void THCTensor_(multinomial)(struct THCState *state, THCudaLongTensor *self, THCTensor *prob_dist, int n_sample, int with_replacement) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist)); THCGenerator* gen = THCRandom_getGenerator(state); int inputSize = THCTensor_(nDimension)(state, prob_dist); THArgCheck(inputSize > 0 && inputSize <= 2, 2, "prob_dist must be 1 or 2 dim"); // Categories are in the innermost dimension int64_t numDist = inputSize == 1 ? 1 : THCTensor_(size)(state, prob_dist, 0); int64_t numCategoriesLong = inputSize == 1 ? THCTensor_(size)(state, prob_dist, 0) : THCTensor_(size)(state, prob_dist, 1); // Since the index tensor is float, numCategories cannot exceed max // float integer precision THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2, "number of categories cannot exceed 2^24"); int numCategories = (int) numCategoriesLong; THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); if (!with_replacement) { THArgCheck(n_sample <= numCategories, 2, "cannot sample n_sample > prob_dist:size(1) samples without " "replacement"); } int free_prob_dist = 0; // Restructure data for 2d if (inputSize == 1) { THCTensor *temp = THCTensor_(new)(state); THCTensor_(unsqueeze1d)(state, temp, prob_dist, 0); prob_dist = temp; free_prob_dist = 1; } THCudaLongTensor_resize2d(state, self, numDist, n_sample); // get current device properties hipDeviceProp_t* props = THCState_getCurrentDeviceProperties(state); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads) * (sizeof(real) * sizeof(accreal)); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation // To exploit greater parallelism for the sampling, generate the // Uniform random samples in a separate kernel launch, into // temporarily allocated memory. The device RNG is thread-limited THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample); THCTensor_(uniform)(state, sampled, 0.0, 1.0); dim3 block(numCategories < maxThreads ? numCategories : maxThreads); dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4); hipLaunchKernelGGL(( sampleMultinomialOnce<real, accreal>) , dim3(grid), dim3(block), requiredShared, THCState_getCurrentStream(state), THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, sampled), THCTensor_(data)(state, prob_dist), THCTensor_(stride)(state, prob_dist, 0), THCTensor_(stride)(state, prob_dist, 1) ); THCTensor_(free)(state, sampled); } else { // Generic, slow implementation with memory allocations // For sampling without replacement, we modify the distribution // for subsequent samples in this space THCTensor* origDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, origDist, prob_dist); THCTensor_(copy)(state, origDist, prob_dist); THCTensor* normDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, normDist, prob_dist); THCTensor* prefixSum = THCTensor_(new)(state); // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); if (with_replacement) { // Sample with replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from one // distribution concurrently. dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS); hipLaunchKernelGGL(( sampleMultinomialWithReplacement) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), gen->state.gen_states, n_sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, prefixSum)); } else { // Sample without replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from a different // distribution concurrently. ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4); dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS); for (int sample = 0; sample < n_sample; ++sample) { if (sample > 0) { // Update probabilities // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); } // The kernel can only draw one sample before we have to // recalculate our distribution hipLaunchKernelGGL(( sampleMultinomialWithoutReplacement) , dim3(grid), dim3(block), 0, THCState_getCurrentStream(state), gen->state.gen_states, n_sample, sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, origDist), THCTensor_(data)(state, prefixSum)); } } THCTensor_(free)(state, prefixSum); THCTensor_(free)(state, normDist); THCTensor_(free)(state, origDist); } // Revert data restructuring based on input sizes if (inputSize == 1) { THCudaLongTensor_resize1d(state, self, n_sample); } if (free_prob_dist) { THCTensor_(free)(state, prob_dist); } } THC_API void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){ THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THAssert(THCTensor_(isContiguous)(state, _probs)); int64_t inputsize = THCTensor_(nElement)(state, _probs); THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor_resize1d(state, _J, inputsize); THCTensor_(resize1d)(state, _q, inputsize); real one = ScalarConvert<int64_t, real>::to(1); int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE); hipLaunchKernelGGL(( aliasMultinomialFilter) , dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state) , THCTensor_(data)(state, _q), THCTensor_(data)(state, _probs), THCudaLongTensor_data(state, smaller), THCudaLongTensor_data(state, larger), THCudaLongTensor_data(state, _J), THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), one, inputsize ); THCudaLongTensor_nonzero(state, smaller_short, smaller); THCudaLongTensor_nonzero(state, larger_short, larger); int h_large_c = THCudaLongTensor_nElement(state, larger_short); THCudaLongTensor_resize1d(state, smaller_short, inputsize); THCudaLongTensor_resize1d(state, larger_short, inputsize); hipLaunchKernelGGL(( aliasMultinomialSetup) , dim3(1), dim3(1), 0, THCState_getCurrentStream(state), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), inputsize, THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), inputsize - h_large_c, h_large_c ); real q_max = THCTensor_(maxall)(state, _q); hipLaunchKernelGGL(( condDiv), dim3(inputBlockDim), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), THCTensor_(data)(state, _q), THCudaLongTensor_data(state, _J), inputsize, q_max ); THCudaLongTensor_free(state, smaller); THCudaLongTensor_free(state, larger); THCudaLongTensor_free(state, smaller_short); THCudaLongTensor_free(state, larger_short); } THC_API void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCudaLongTensor *_J, THCTensor *_q){ THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCGenerator* gen = THCRandom_getGenerator(state); int64_t K = THCudaLongTensor_nElement(state, _J); int64_t output_nelem = THCudaLongTensor_nElement(state, self); ptrdiff_t size = THCudaLongTensor_nElement(state, self); THCTensor *uniform = THCTensor_(newWithSize1d)(state, output_nelem); THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, output_nelem); THCTensor_(uniform)(state, uniform, 0, K); THCTensor_(uniform)(state, bernoulli, 0, 1); hipLaunchKernelGGL(( multinomialAliasDrawKernel) , dim3(THCCeilDiv((int)output_nelem+BLOCK_SIZE-1, BLOCK_SIZE)), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), size, THCudaLongTensor_data(state, self), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), K, THCTensor_(data)(state, uniform), THCTensor_(data)(state, bernoulli) ); } #endif #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_bernoulli, double, double p, double, hiprand_uniform_double, x <= p) #else GENERATE_KERNEL1(generate_bernoulli, real, double p, float, hiprand_uniform, (ScalarConvert<bool, real>::to(x <= p))) #endif THC_API void THCTensor_(bernoulli)(THCState* state, THCTensor *self_, double p) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_bernoulli), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, p); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(bernoulli_Tensor)(THCState *state, THCTensor *self, THCTensor* p) { #if defined(THC_REAL_IS_FLOAT) THCTensor_(bernoulli_FloatTensor)(state, self, p); #elif defined(THC_REAL_IS_DOUBLE) THCTensor_(bernoulli_DoubleTensor)(state, self, p); #endif } #define DEFINE_BERNOULLI_TENSOR(NAME, PROB_TYPE, PROB_DATA_TYPE) \ THC_API void THCTensor_(NAME)(THCState* state, \ THCTensor *self_, PROB_TYPE *probs_) \ { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, probs_)); \ ptrdiff_t size = THCTensor_(nElement)(state, self_); \ if (size == 0) return; \ THCGenerator* gen = THCRandom_getGenerator(state); \ THCTensor *self = THCTensor_(newContiguous)(state, self_); \ PROB_TYPE *probs = PROB_TYPE##_newContiguous(state, probs_); \ ptrdiff_t prob_size = PROB_TYPE##_nElement(state, probs); \ real *result_data = THCTensor_(data)(state, self); \ PROB_DATA_TYPE *probs_data = PROB_TYPE##_data(state, probs); \ \ THArgCheck(size == prob_size, 3, "inconsistent tensor size"); \ \ hipLaunchKernelGGL(( generate_bernoulli_tensor), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), \ gen->state.gen_states, size, result_data, probs_data); \ \ PROB_TYPE##_free(state, probs); \ THCTensor_(freeCopyTo)(state, self, self_); \ } DEFINE_BERNOULLI_TENSOR(bernoulli_FloatTensor, THCudaTensor, float) DEFINE_BERNOULLI_TENSOR(bernoulli_DoubleTensor, THCudaDoubleTensor, double) #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_geometric, double, double p, double, hiprand_uniform_double, ceil(log(x) / log(1-p))) #else GENERATE_KERNEL1(generate_geometric, real, double p, float, hiprand_uniform, (ScalarConvert<float, real>::to(ceilf(logf(x) / log(1-p))))) #endif #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) #define CURAND64(STATE) (((uint64_t)hiprand(STATE)) << 32) | (uint64_t)hiprand(STATE) GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, hiprand, \ static_cast<real>(static_cast<int32_t>((x % range) + base))) GENERATE_KERNEL2(generate_random_64, real, int64_t base, uint64_t range, uint64_t, CURAND64, \ static_cast<real>(static_cast<int64_t>((x % range) + base))) #elif defined(THC_REAL_IS_HALF) GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, hiprand, (ScalarConvert<int32_t, real>::to(static_cast<int32_t>(x % range + base)))) #else GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, hiprand, static_cast<real>(static_cast<int32_t>(x % range + base))) #endif THC_API void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); hipLaunchKernelGGL(( generate_geometric), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, p); THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(clampedRandom)(THCState* state, THCTensor *self_, int64_t min_val, int64_t max_val) { THArgCheck(min_val < max_val, 2, "max must be greater than min, but got: min = %lld, max = %lld", min_val, max_val); THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); uint64_t range = max_val - min_val; #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) if (range > 1ULL << 32) { hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, min_val, range); } else { #endif hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, min_val, range); #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) } #endif THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(cappedRandom)(THCState* state, THCTensor *self_, int64_t max_val) { THCTensor_(clampedRandom)(state, self_, 0LL, max_val); }; #define HLF_MANT_DIG 11 THC_API void THCTensor_(random)(THCState* state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); #if defined(THC_REAL_IS_HALF) hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, 0UL, (1UL << HLF_MANT_DIG) + 1); #elif defined(THC_REAL_IS_FLOAT) hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, 0UL, (1UL << FLT_MANT_DIG) + 1); #elif defined(THC_REAL_IS_DOUBLE) hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, 0ULL, (1ULL << DBL_MANT_DIG) + 1); #elif defined(THC_REAL_IS_LONG) hipLaunchKernelGGL(( generate_random_64), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, 0ULL, static_cast<uint64_t>(std::numeric_limits<real>::max()) + 1); #else hipLaunchKernelGGL(( generate_random), dim3(NUM_BLOCKS), dim3(BLOCK_SIZE), 0, THCState_getCurrentStream(state), gen->state.gen_states, size, data, 0UL, static_cast<uint32_t>(std::numeric_limits<real>::max()) + 1); #endif THCTensor_(freeCopyTo)(state, self, self_); }; #undef HLF_MANT_DIG #undef CURAND64 #undef NUM_BLOCKS #endif
c4bcf507d303729270591b17654c73ecbe160c86.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCTensorRandom.cu" #else #define NUM_BLOCKS min((int)THCCeilDiv(size, (ptrdiff_t) BLOCK_SIZE), MAX_NUM_BLOCKS) #if defined(THC_REAL_IS_FLOAT) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_HALF) THC_API void THCTensor_(uniform)(THCState* state, THCTensor *self_, double a, double b) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); generate_uniform<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, a, b); THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(normal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); generate_normal<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(normal_means)(THCState *state, THCTensor *self, THCTensor *means, double stddev) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, stddev); THCTensor_(cadd)(state, self, self, ScalarConvert<int, real>::to(1), means); } THC_API void THCTensor_(normal_stddevs)(THCState *state, THCTensor *self, double mean, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, stddevs); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(add)(state, self, self, ScalarConvert<double, real>::to(mean)); } THC_API void THCTensor_(normal_means_stddevs)(THCState *state, THCTensor *self, THCTensor *means, THCTensor *stddevs) { THCTensor_(resizeAs)(state, self, means); THCTensor_(normal)(state, self, 0, 1); THCTensor_(cmul)(state, self, self, stddevs); THCTensor_(cadd)(state, self, self, ScalarConvert<int, real>::to(1), means); } THC_API void THCTensor_(logNormal)(THCState* state, THCTensor *self_, double mean, double stdv) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); generateLogNormal<real><<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, mean, stdv); THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(exponential)(THCState* state, THCTensor *self_, double lambda) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); generate_exponential<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, lambda); THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(cauchy)(THCState* state, THCTensor *self_, double median, double sigma) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); generate_cauchy<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, median, sigma); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(renormRows)(struct THCState* state, THCTensor* t) { THAssert(THCTensor_(nDimension)(state, t) == 2); int64_t rows = THCTensor_(size)(state, t, 0); int64_t cols = THCTensor_(size)(state, t, 1); cudaDeviceProp* props = THCState_getCurrentDeviceProperties(state); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; dim3 grid(rows < numSM * 4 ? rows : numSM * 4); dim3 block(cols < maxThreads ? cols : maxThreads); renormRowsL1<real> <<<grid, block, block.x * sizeof(real), THCState_getCurrentStream(state)>>>(THCTensor_(data)(state, t), rows, cols); } THC_API void THCTensor_(multinomial)(struct THCState *state, THCudaLongTensor *self, THCTensor *prob_dist, int n_sample, int with_replacement) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self, prob_dist)); THCGenerator* gen = THCRandom_getGenerator(state); int inputSize = THCTensor_(nDimension)(state, prob_dist); THArgCheck(inputSize > 0 && inputSize <= 2, 2, "prob_dist must be 1 or 2 dim"); // Categories are in the innermost dimension int64_t numDist = inputSize == 1 ? 1 : THCTensor_(size)(state, prob_dist, 0); int64_t numCategoriesLong = inputSize == 1 ? THCTensor_(size)(state, prob_dist, 0) : THCTensor_(size)(state, prob_dist, 1); // Since the index tensor is float, numCategories cannot exceed max // float integer precision THArgCheck(numCategoriesLong <= FLOAT32_MAX_CONSECUTIVE_INT, 2, "number of categories cannot exceed 2^24"); int numCategories = (int) numCategoriesLong; THArgCheck(n_sample > 0, 3, "cannot sample <= 0 samples"); if (!with_replacement) { THArgCheck(n_sample <= numCategories, 2, "cannot sample n_sample > prob_dist:size(1) samples without " "replacement"); } int free_prob_dist = 0; // Restructure data for 2d if (inputSize == 1) { THCTensor *temp = THCTensor_(new)(state); THCTensor_(unsqueeze1d)(state, temp, prob_dist, 0); prob_dist = temp; free_prob_dist = 1; } THCudaLongTensor_resize2d(state, self, numDist, n_sample); // get current device properties cudaDeviceProp* props = THCState_getCurrentDeviceProperties(state); THAssert(props != NULL); int numSM = props->multiProcessorCount; int maxThreads = props->maxThreadsPerBlock; int maxShared = props->sharedMemPerBlock; int requiredShared = (numCategories < maxThreads ? numCategories : maxThreads) * (sizeof(real) * sizeof(accreal)); if (n_sample == 1 && maxShared >= requiredShared) { // Optimized allocation-free implementation // To exploit greater parallelism for the sampling, generate the // Uniform random samples in a separate kernel launch, into // temporarily allocated memory. The device RNG is thread-limited THCTensor *sampled = THCTensor_(newWithSize2d)(state, numDist, n_sample); THCTensor_(uniform)(state, sampled, 0.0, 1.0); dim3 block(numCategories < maxThreads ? numCategories : maxThreads); dim3 grid(numDist < numSM * 4 ? numDist : numSM * 4); sampleMultinomialOnce<real, accreal> <<<grid, block, requiredShared, THCState_getCurrentStream(state)>>>( THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, sampled), THCTensor_(data)(state, prob_dist), THCTensor_(stride)(state, prob_dist, 0), THCTensor_(stride)(state, prob_dist, 1) ); THCTensor_(free)(state, sampled); } else { // Generic, slow implementation with memory allocations // For sampling without replacement, we modify the distribution // for subsequent samples in this space THCTensor* origDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, origDist, prob_dist); THCTensor_(copy)(state, origDist, prob_dist); THCTensor* normDist = THCTensor_(new)(state); THCTensor_(resizeAs)(state, normDist, prob_dist); THCTensor* prefixSum = THCTensor_(new)(state); // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); if (with_replacement) { // Sample with replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from one // distribution concurrently. dim3 grid(numDist < MAX_NUM_BLOCKS ? numDist : MAX_NUM_BLOCKS); sampleMultinomialWithReplacement <<<grid, block, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, n_sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, prefixSum)); } else { // Sample without replacement // Binary search is warp divergent (so effectively we're running // with just a single thread), but for better utilization, // we need each block to have at least 4 warps. dim3 block(32, 4); // Each warp in a block will generate a sample from a different // distribution concurrently. ptrdiff_t numBlocks = THCCeilDiv(numDist, (int64_t) 4); dim3 grid(numBlocks < MAX_NUM_BLOCKS ? numBlocks : MAX_NUM_BLOCKS); for (int sample = 0; sample < n_sample; ++sample) { if (sample > 0) { // Update probabilities // Renorm along rows THCTensor_(copy)(state, normDist, origDist); THCTensor_(renormRows)(state, normDist); // Prefix sum along rows THCTensor_(cumsum)(state, prefixSum, normDist, 1); } // The kernel can only draw one sample before we have to // recalculate our distribution sampleMultinomialWithoutReplacement <<<grid, block, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, n_sample, sample, THCudaLongTensor_data(state, self), numDist, numCategories, THCTensor_(data)(state, origDist), THCTensor_(data)(state, prefixSum)); } } THCTensor_(free)(state, prefixSum); THCTensor_(free)(state, normDist); THCTensor_(free)(state, origDist); } // Revert data restructuring based on input sizes if (inputSize == 1) { THCudaLongTensor_resize1d(state, self, n_sample); } if (free_prob_dist) { THCTensor_(free)(state, prob_dist); } } THC_API void THCTensor_(multinomialAliasSetup)(THCState *state, THCTensor *_probs, THCudaLongTensor *_J, THCTensor *_q){ THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THAssert(THCTensor_(isContiguous)(state, _probs)); int64_t inputsize = THCTensor_(nElement)(state, _probs); THCudaLongTensor *smaller = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *smaller_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor *larger_short = THCudaLongTensor_newWithSize1d(state, inputsize); THCudaLongTensor_resize1d(state, _J, inputsize); THCTensor_(resize1d)(state, _q, inputsize); real one = ScalarConvert<int64_t, real>::to(1); int inputBlockDim = THCCeilDiv((int)inputsize + BLOCK_SIZE - 1, BLOCK_SIZE); aliasMultinomialFilter <<<inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state) >>>( THCTensor_(data)(state, _q), THCTensor_(data)(state, _probs), THCudaLongTensor_data(state, smaller), THCudaLongTensor_data(state, larger), THCudaLongTensor_data(state, _J), THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), one, inputsize ); THCudaLongTensor_nonzero(state, smaller_short, smaller); THCudaLongTensor_nonzero(state, larger_short, larger); int h_large_c = THCudaLongTensor_nElement(state, larger_short); THCudaLongTensor_resize1d(state, smaller_short, inputsize); THCudaLongTensor_resize1d(state, larger_short, inputsize); aliasMultinomialSetup <<<1, 1, 0, THCState_getCurrentStream(state)>>>( THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), inputsize, THCudaLongTensor_data(state, smaller_short), THCudaLongTensor_data(state, larger_short), inputsize - h_large_c, h_large_c ); real q_max = THCTensor_(maxall)(state, _q); condDiv<<< inputBlockDim, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( THCTensor_(data)(state, _q), THCudaLongTensor_data(state, _J), inputsize, q_max ); THCudaLongTensor_free(state, smaller); THCudaLongTensor_free(state, larger); THCudaLongTensor_free(state, smaller_short); THCudaLongTensor_free(state, larger_short); } THC_API void THCTensor_(multinomialAliasDraw)(THCState *state, THCudaLongTensor *self, THCudaLongTensor *_J, THCTensor *_q){ THAssert(THCTensor_(isContiguous)(state, _q)); THAssert(THCudaLongTensor_isContiguous(state, _J)); THCGenerator* gen = THCRandom_getGenerator(state); int64_t K = THCudaLongTensor_nElement(state, _J); int64_t output_nelem = THCudaLongTensor_nElement(state, self); ptrdiff_t size = THCudaLongTensor_nElement(state, self); THCTensor *uniform = THCTensor_(newWithSize1d)(state, output_nelem); THCTensor *bernoulli = THCTensor_(newWithSize1d)(state, output_nelem); THCTensor_(uniform)(state, uniform, 0, K); THCTensor_(uniform)(state, bernoulli, 0, 1); multinomialAliasDrawKernel <<<THCCeilDiv((int)output_nelem+BLOCK_SIZE-1, BLOCK_SIZE), BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( size, THCudaLongTensor_data(state, self), THCudaLongTensor_data(state, _J), THCTensor_(data)(state, _q), K, THCTensor_(data)(state, uniform), THCTensor_(data)(state, bernoulli) ); } #endif #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_bernoulli, double, double p, double, curand_uniform_double, x <= p) #else GENERATE_KERNEL1(generate_bernoulli, real, double p, float, curand_uniform, (ScalarConvert<bool, real>::to(x <= p))) #endif THC_API void THCTensor_(bernoulli)(THCState* state, THCTensor *self_, double p) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); generate_bernoulli<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, p); THCTensor_(freeCopyTo)(state, self, self_); }; void THCTensor_(bernoulli_Tensor)(THCState *state, THCTensor *self, THCTensor* p) { #if defined(THC_REAL_IS_FLOAT) THCTensor_(bernoulli_FloatTensor)(state, self, p); #elif defined(THC_REAL_IS_DOUBLE) THCTensor_(bernoulli_DoubleTensor)(state, self, p); #endif } #define DEFINE_BERNOULLI_TENSOR(NAME, PROB_TYPE, PROB_DATA_TYPE) \ THC_API void THCTensor_(NAME)(THCState* state, \ THCTensor *self_, PROB_TYPE *probs_) \ { \ THCAssertSameGPU(THCTensor_(checkGPU)(state, 2, self_, probs_)); \ ptrdiff_t size = THCTensor_(nElement)(state, self_); \ if (size == 0) return; \ THCGenerator* gen = THCRandom_getGenerator(state); \ THCTensor *self = THCTensor_(newContiguous)(state, self_); \ PROB_TYPE *probs = PROB_TYPE##_newContiguous(state, probs_); \ ptrdiff_t prob_size = PROB_TYPE##_nElement(state, probs); \ real *result_data = THCTensor_(data)(state, self); \ PROB_DATA_TYPE *probs_data = PROB_TYPE##_data(state, probs); \ \ THArgCheck(size == prob_size, 3, "inconsistent tensor size"); \ \ generate_bernoulli_tensor<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( \ gen->state.gen_states, size, result_data, probs_data); \ \ PROB_TYPE##_free(state, probs); \ THCTensor_(freeCopyTo)(state, self, self_); \ } DEFINE_BERNOULLI_TENSOR(bernoulli_FloatTensor, THCudaTensor, float) DEFINE_BERNOULLI_TENSOR(bernoulli_DoubleTensor, THCudaDoubleTensor, double) #if defined(THC_REAL_IS_DOUBLE) GENERATE_KERNEL1(generate_geometric, double, double p, double, curand_uniform_double, ceil(log(x) / log(1-p))) #else GENERATE_KERNEL1(generate_geometric, real, double p, float, curand_uniform, (ScalarConvert<float, real>::to(ceilf(logf(x) / log(1-p))))) #endif #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) #define CURAND64(STATE) (((uint64_t)curand(STATE)) << 32) | (uint64_t)curand(STATE) GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, curand, \ static_cast<real>(static_cast<int32_t>((x % range) + base))) GENERATE_KERNEL2(generate_random_64, real, int64_t base, uint64_t range, uint64_t, CURAND64, \ static_cast<real>(static_cast<int64_t>((x % range) + base))) #elif defined(THC_REAL_IS_HALF) GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, curand, (ScalarConvert<int32_t, real>::to(static_cast<int32_t>(x % range + base)))) #else GENERATE_KERNEL2(generate_random, real, int32_t base, uint32_t range, uint32_t, curand, static_cast<real>(static_cast<int32_t>(x % range + base))) #endif THC_API void THCTensor_(geometric)(THCState* state, THCTensor *self_, double p) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); generate_geometric<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, p); THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(clampedRandom)(THCState* state, THCTensor *self_, int64_t min_val, int64_t max_val) { THArgCheck(min_val < max_val, 2, "max must be greater than min, but got: min = %lld, max = %lld", min_val, max_val); THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); uint64_t range = max_val - min_val; #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) if (range > 1ULL << 32) { generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, min_val, range); } else { #endif generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, min_val, range); #if defined(THC_REAL_IS_LONG) || defined(THC_REAL_IS_DOUBLE) || defined(THC_REAL_IS_FLOAT) } #endif THCTensor_(freeCopyTo)(state, self, self_); }; THC_API void THCTensor_(cappedRandom)(THCState* state, THCTensor *self_, int64_t max_val) { THCTensor_(clampedRandom)(state, self_, 0LL, max_val); }; #define HLF_MANT_DIG 11 THC_API void THCTensor_(random)(THCState* state, THCTensor *self_) { THCAssertSameGPU(THCTensor_(checkGPU)(state, 1, self_)); ptrdiff_t size = THCTensor_(nElement)(state, self_); if (size == 0) return; THCGenerator* gen = THCRandom_getGenerator(state); THCTensor *self = THCTensor_(newContiguous)(state, self_); real *data = THCTensor_(data)(state, self); #if defined(THC_REAL_IS_HALF) generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, 0UL, (1UL << HLF_MANT_DIG) + 1); #elif defined(THC_REAL_IS_FLOAT) generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, 0UL, (1UL << FLT_MANT_DIG) + 1); #elif defined(THC_REAL_IS_DOUBLE) generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, 0ULL, (1ULL << DBL_MANT_DIG) + 1); #elif defined(THC_REAL_IS_LONG) generate_random_64<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, 0ULL, static_cast<uint64_t>(std::numeric_limits<real>::max()) + 1); #else generate_random<<<NUM_BLOCKS, BLOCK_SIZE, 0, THCState_getCurrentStream(state)>>>( gen->state.gen_states, size, data, 0UL, static_cast<uint32_t>(std::numeric_limits<real>::max()) + 1); #endif THCTensor_(freeCopyTo)(state, self, self_); }; #undef HLF_MANT_DIG #undef CURAND64 #undef NUM_BLOCKS #endif
ea8d9d600d6d2ed420b148e3d38ae679112a99e7.hip
// !!! This is a file automatically generated by hipify!!! /* * compile: * nvcc -o split_threads -O3 -arch=sm_35 split_threads.cu -I /usr/local/cuda/samples/common/inc/ -I. */ #include <iostream> #include <hip/hip_runtime.h> #include <helper_cuda.h> #include <algorithm> #include <hipcub/hipcub.hpp> using namespace std; using namespace cub; #define WARP_SIZE (32) //threads in a warp compute the histogram, each thread only knows the bucket_id of its element __device__ void warp_histogram(int *d_key_in, int bucket_bits, int *histo) { // int localId = threadIdx.x; // int globalId = blockIdx.x * blockDim.x + threadIdx.x; // int globalSize = blockDim.x * gridDim.x; // int warpNum = globalSize / WARP_SIZE; // int warpId = globalId / WARP_SIZE; // int lane = localId & (WARP_SIZE-1); // int buckets = 1<<bucket_bits; // int rounds = (buckets + WARP_SIZE - 1) / WARP_SIZE; //deal with mutliple buckets // unsigned histo_bmp[8]; //deal with at most 256 buckets. // for(int k = 0; k < rounds; k++) histo_bmp[k] = 0xffffffff; // // for(int k = 0; k < bucket_bits; k++) { // unsigned temp_buffer = __ballot(bucket_id & 0x01); // for(int j = 0; j < rounds; j++) { // if (((j*WARP_SIZE+lane)>>k) & 0x01) histo_bmp[j] &= temp_buffer; // else histo_bmp[j] &= (0xffffffff ^ temp_buffer); // } // bucket_id >>= 1; // } // // for(int j = 0; j < rounds; j++) { // int idx = j * WARP_SIZE + lane; // if (idx < buckets) histo[idx * warpNum + warpId] = __popc(histo_bmp[j]); // } //simplified version (<= 32 buckets) unsigned histo_bmp = 0xffffffff; //deal with at most 256 buckets. for(int k = 0; k < bucket_bits; k++) { unsigned temp_buffer = __ballot(bucket_id & 0x01); if ((lane>>k) & 0x01) histo_bmp &= temp_buffer; else histo_bmp &= (0xffffffff ^ temp_buffer); bucket_id >>= 1; } histo[lane * warpNum + warpId] = __popc(histo_bmp); //// histo[warpId*buckets + lane] = __popc(histo_bmp); } __device__ void warp_offset(int *key_in, int *key_out, int bucket_id, int bucket_bits, int *histo) { int localId = threadIdx.x; int globalId = blockIdx.x * blockDim.x + threadIdx.x; int globalSize = blockDim.x * gridDim.x; int warpNum = globalSize / WARP_SIZE; int warpId = globalId / WARP_SIZE; int lane = localId & (WARP_SIZE-1); int bucket_id_fixed = bucket_id; // int buckets = 1<< bucket_bits; unsigned offset_bmp = 0xffffffff; //deal with at most 256 buckets. for(int k = 0; k < bucket_bits; k++) { unsigned temp_buffer = __ballot(bucket_id & 0x01); if (bucket_id & 0x01) offset_bmp &= temp_buffer; else offset_bmp &= (0xffffffff ^ temp_buffer); bucket_id >>= 1; } int offset = __popc(offset_bmp & (0xffffffff>>(31-lane)))-1; int pos = histo[bucket_id_fixed*warpNum + warpId]+offset; // int pos = histo[warpId * buckets + bucket_id_fixed] + offset; key_out[pos] = key_in[globalId]; } __global__ void pre_scan(int *key_in, int *histo, int length, int bucket_bits) { int globalId = blockIdx.x * blockDim.x + threadIdx.x; // int bucket_id = key_in[globalId]; warp_histogram(key_in, bucket_bits, histo); } __global__ void post_scan(int *key_in, int *key_out, int *histo_scanned, int length, int bucket_bits) { int globalId = blockIdx.x * blockDim.x + threadIdx.x; int bucket_id = key_in[globalId]; warp_offset(key_in, key_out, bucket_id, bucket_bits, histo_scanned); } //testing //__global__ void transpose1(int *his_in, int *his_out,int buckets, int warp_num) { // int globalId = blockIdx.x * blockDim.x + threadIdx.x; // if (globalId == 0) { // for(int w = 0; w < warp_num; w++) { // for(int b = 0; b < buckets; b++) { // his_out[b*warp_num+w] = his_in[w*buckets+b]; // } // } // } //} // //__global__ void transpose2(int *his_in, int *his_out,int buckets, int warp_num) { // int globalId = blockIdx.x * blockDim.x + threadIdx.x; // if (globalId == 0) { // for(int b = 0; b < buckets; b++) { // for(int w = 0; w < warp_num; w++) { // his_out[w*buckets+b] = his_in[b*warp_num+w]; // } // } // } //} int main() { int length = 1<<25; int local_size = 256; int grid_size = length/local_size; int warp_num = local_size * grid_size / WARP_SIZE; float totalTime = 0; int bucket_bits = 5; //32 buckets int buckets = 1<<bucket_bits; int *key_in = new int[length]; int *key_out = new int[length]; int *histograms = new int[buckets*warp_num]; int *value_in = new int[length]; int *value_out = new int[length]; srand(time(NULL)); for(int i = 0; i <length; i++) { key_in[i] = rand() & (buckets-1); } int *d_key_in, *d_key_out, *d_histograms, *d_histograms_scanned; checkCudaErrors(hipMalloc(&d_key_in,sizeof(int)*length)); checkCudaErrors(hipMalloc(&d_key_out,sizeof(int)*length)); checkCudaErrors(hipMalloc(&d_histograms,sizeof(int)*buckets*warp_num)); checkCudaErrors(hipMalloc(&d_histograms_scanned,sizeof(int)*buckets*warp_num)); hipMemcpy(d_key_in, key_in, sizeof(int) * length, hipMemcpyHostToDevice); hipEvent_t start, end; hipEventCreate(&start); hipEventCreate(&end); float tempTime; //1.pre-scan hipEventRecord(start, 0); hipLaunchKernelGGL(( pre_scan), dim3(grid_size), dim3(local_size), 0, 0, d_key_in, d_histograms, length, bucket_bits); hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&tempTime, start, end); cout<<"Pre-scan time: "<<tempTime<<" ms."<<endl; totalTime += tempTime; // transpose1<<<1,1>>>(d_histograms, d_histograms_scanned, buckets, warp_num); //2.exclusive scan void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; hipEventRecord(start, 0); CubDebugExit(DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_histograms, d_histograms_scanned, buckets*warp_num)); checkCudaErrors(hipMalloc(&d_temp_storage,temp_storage_bytes)); CubDebugExit(DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_histograms, d_histograms_scanned, buckets*warp_num)); hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&tempTime, start, end); cout<<"Scan time: "<<tempTime<<" ms."<<endl; totalTime += tempTime; //test // transpose2<<<1,1>>>(d_histograms, d_histograms_scanned, buckets, warp_num); //3.post-scan hipEventRecord(start, 0); hipLaunchKernelGGL(( post_scan), dim3(grid_size), dim3(local_size), 0, 0, d_key_in, d_key_out, d_histograms_scanned, length, bucket_bits); hipEventRecord(end, 0); hipEventSynchronize(end); hipEventElapsedTime(&tempTime, start, end); cout<<"Post-scan time: "<<tempTime<<" ms."<<endl; totalTime += tempTime; cout<<"Total time: "<<totalTime<<" ms."<<endl; hipMemcpy(histograms, d_histograms, sizeof(int) * WARP_SIZE, hipMemcpyDeviceToHost); hipMemcpy(key_out, d_key_out, sizeof(int) * length, hipMemcpyDeviceToHost); checkCudaErrors(hipFree(d_key_in)); checkCudaErrors(hipFree(d_key_out)); checkCudaErrors(hipFree(d_histograms)); checkCudaErrors(hipFree(d_histograms_scanned)); //check sort(key_in, key_in+length); bool res = true; for(int i = 0; i <length; i++) { if (key_in[i] != key_out[i]) { res = false; cout<<key_in[i]<<' '<<key_out[i]<<endl; break; } } if (res) cout<<"Res: correct!"<<endl; else cout<<"Res: wrong!"<<endl; delete[] key_in; delete[] key_out; delete[] value_in; delete[] value_out; delete[] histograms; return 0; }
ea8d9d600d6d2ed420b148e3d38ae679112a99e7.cu
/* * compile: * nvcc -o split_threads -O3 -arch=sm_35 split_threads.cu -I /usr/local/cuda/samples/common/inc/ -I. */ #include <iostream> #include <cuda_runtime.h> #include <helper_cuda.h> #include <algorithm> #include <cub/device/device_scan.cuh> using namespace std; using namespace cub; #define WARP_SIZE (32) //threads in a warp compute the histogram, each thread only knows the bucket_id of its element __device__ void warp_histogram(int *d_key_in, int bucket_bits, int *histo) { // int localId = threadIdx.x; // int globalId = blockIdx.x * blockDim.x + threadIdx.x; // int globalSize = blockDim.x * gridDim.x; // int warpNum = globalSize / WARP_SIZE; // int warpId = globalId / WARP_SIZE; // int lane = localId & (WARP_SIZE-1); // int buckets = 1<<bucket_bits; // int rounds = (buckets + WARP_SIZE - 1) / WARP_SIZE; //deal with mutliple buckets // unsigned histo_bmp[8]; //deal with at most 256 buckets. // for(int k = 0; k < rounds; k++) histo_bmp[k] = 0xffffffff; // // for(int k = 0; k < bucket_bits; k++) { // unsigned temp_buffer = __ballot(bucket_id & 0x01); // for(int j = 0; j < rounds; j++) { // if (((j*WARP_SIZE+lane)>>k) & 0x01) histo_bmp[j] &= temp_buffer; // else histo_bmp[j] &= (0xffffffff ^ temp_buffer); // } // bucket_id >>= 1; // } // // for(int j = 0; j < rounds; j++) { // int idx = j * WARP_SIZE + lane; // if (idx < buckets) histo[idx * warpNum + warpId] = __popc(histo_bmp[j]); // } //simplified version (<= 32 buckets) unsigned histo_bmp = 0xffffffff; //deal with at most 256 buckets. for(int k = 0; k < bucket_bits; k++) { unsigned temp_buffer = __ballot(bucket_id & 0x01); if ((lane>>k) & 0x01) histo_bmp &= temp_buffer; else histo_bmp &= (0xffffffff ^ temp_buffer); bucket_id >>= 1; } histo[lane * warpNum + warpId] = __popc(histo_bmp); //// histo[warpId*buckets + lane] = __popc(histo_bmp); } __device__ void warp_offset(int *key_in, int *key_out, int bucket_id, int bucket_bits, int *histo) { int localId = threadIdx.x; int globalId = blockIdx.x * blockDim.x + threadIdx.x; int globalSize = blockDim.x * gridDim.x; int warpNum = globalSize / WARP_SIZE; int warpId = globalId / WARP_SIZE; int lane = localId & (WARP_SIZE-1); int bucket_id_fixed = bucket_id; // int buckets = 1<< bucket_bits; unsigned offset_bmp = 0xffffffff; //deal with at most 256 buckets. for(int k = 0; k < bucket_bits; k++) { unsigned temp_buffer = __ballot(bucket_id & 0x01); if (bucket_id & 0x01) offset_bmp &= temp_buffer; else offset_bmp &= (0xffffffff ^ temp_buffer); bucket_id >>= 1; } int offset = __popc(offset_bmp & (0xffffffff>>(31-lane)))-1; int pos = histo[bucket_id_fixed*warpNum + warpId]+offset; // int pos = histo[warpId * buckets + bucket_id_fixed] + offset; key_out[pos] = key_in[globalId]; } __global__ void pre_scan(int *key_in, int *histo, int length, int bucket_bits) { int globalId = blockIdx.x * blockDim.x + threadIdx.x; // int bucket_id = key_in[globalId]; warp_histogram(key_in, bucket_bits, histo); } __global__ void post_scan(int *key_in, int *key_out, int *histo_scanned, int length, int bucket_bits) { int globalId = blockIdx.x * blockDim.x + threadIdx.x; int bucket_id = key_in[globalId]; warp_offset(key_in, key_out, bucket_id, bucket_bits, histo_scanned); } //testing //__global__ void transpose1(int *his_in, int *his_out,int buckets, int warp_num) { // int globalId = blockIdx.x * blockDim.x + threadIdx.x; // if (globalId == 0) { // for(int w = 0; w < warp_num; w++) { // for(int b = 0; b < buckets; b++) { // his_out[b*warp_num+w] = his_in[w*buckets+b]; // } // } // } //} // //__global__ void transpose2(int *his_in, int *his_out,int buckets, int warp_num) { // int globalId = blockIdx.x * blockDim.x + threadIdx.x; // if (globalId == 0) { // for(int b = 0; b < buckets; b++) { // for(int w = 0; w < warp_num; w++) { // his_out[w*buckets+b] = his_in[b*warp_num+w]; // } // } // } //} int main() { int length = 1<<25; int local_size = 256; int grid_size = length/local_size; int warp_num = local_size * grid_size / WARP_SIZE; float totalTime = 0; int bucket_bits = 5; //32 buckets int buckets = 1<<bucket_bits; int *key_in = new int[length]; int *key_out = new int[length]; int *histograms = new int[buckets*warp_num]; int *value_in = new int[length]; int *value_out = new int[length]; srand(time(NULL)); for(int i = 0; i <length; i++) { key_in[i] = rand() & (buckets-1); } int *d_key_in, *d_key_out, *d_histograms, *d_histograms_scanned; checkCudaErrors(cudaMalloc(&d_key_in,sizeof(int)*length)); checkCudaErrors(cudaMalloc(&d_key_out,sizeof(int)*length)); checkCudaErrors(cudaMalloc(&d_histograms,sizeof(int)*buckets*warp_num)); checkCudaErrors(cudaMalloc(&d_histograms_scanned,sizeof(int)*buckets*warp_num)); cudaMemcpy(d_key_in, key_in, sizeof(int) * length, cudaMemcpyHostToDevice); cudaEvent_t start, end; cudaEventCreate(&start); cudaEventCreate(&end); float tempTime; //1.pre-scan cudaEventRecord(start, 0); pre_scan<<<grid_size, local_size>>>(d_key_in, d_histograms, length, bucket_bits); cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&tempTime, start, end); cout<<"Pre-scan time: "<<tempTime<<" ms."<<endl; totalTime += tempTime; // transpose1<<<1,1>>>(d_histograms, d_histograms_scanned, buckets, warp_num); //2.exclusive scan void *d_temp_storage = NULL; size_t temp_storage_bytes = 0; cudaEventRecord(start, 0); CubDebugExit(DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_histograms, d_histograms_scanned, buckets*warp_num)); checkCudaErrors(cudaMalloc(&d_temp_storage,temp_storage_bytes)); CubDebugExit(DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes, d_histograms, d_histograms_scanned, buckets*warp_num)); cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&tempTime, start, end); cout<<"Scan time: "<<tempTime<<" ms."<<endl; totalTime += tempTime; //test // transpose2<<<1,1>>>(d_histograms, d_histograms_scanned, buckets, warp_num); //3.post-scan cudaEventRecord(start, 0); post_scan<<<grid_size, local_size>>>(d_key_in, d_key_out, d_histograms_scanned, length, bucket_bits); cudaEventRecord(end, 0); cudaEventSynchronize(end); cudaEventElapsedTime(&tempTime, start, end); cout<<"Post-scan time: "<<tempTime<<" ms."<<endl; totalTime += tempTime; cout<<"Total time: "<<totalTime<<" ms."<<endl; cudaMemcpy(histograms, d_histograms, sizeof(int) * WARP_SIZE, cudaMemcpyDeviceToHost); cudaMemcpy(key_out, d_key_out, sizeof(int) * length, cudaMemcpyDeviceToHost); checkCudaErrors(cudaFree(d_key_in)); checkCudaErrors(cudaFree(d_key_out)); checkCudaErrors(cudaFree(d_histograms)); checkCudaErrors(cudaFree(d_histograms_scanned)); //check sort(key_in, key_in+length); bool res = true; for(int i = 0; i <length; i++) { if (key_in[i] != key_out[i]) { res = false; cout<<key_in[i]<<' '<<key_out[i]<<endl; break; } } if (res) cout<<"Res: correct!"<<endl; else cout<<"Res: wrong!"<<endl; delete[] key_in; delete[] key_out; delete[] value_in; delete[] value_out; delete[] histograms; return 0; }